mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-09-08 03:07:14 +02:00
Compare commits
22 Commits
4f8ce598a9
...
master
Author | SHA1 | Date | |
---|---|---|---|
445c94788e | |||
aac9704e8b | |||
a57a5f8828 | |||
90843726de | |||
d25da76117 | |||
d48a1b3c0a | |||
2839d2e1a4 | |||
00c99e58e9 | |||
904040589e | |||
9f3d300bca | |||
9e253a2d09 | |||
49120b0dcf | |||
b6f91ab9d3 | |||
77e8e7ed7e | |||
32bc17e0c3 | |||
e294637cb6 | |||
577767bed6 | |||
e77f8da510 | |||
4738b263ec | |||
0a588023a7 | |||
d2fa90774b | |||
0e72dcbe36 |
@@ -15,7 +15,7 @@ Follow these guides to install and configure Infinito.Nexus:
|
||||
- **Networking & VPN** - Configure `WireGuard`, `OpenVPN`, and `Nginx Reverse Proxy`.
|
||||
|
||||
## Managing & Updating Infinito.Nexus 🔄
|
||||
- Regularly update services using `update-docker`, `update-pacman`, or `update-apt`.
|
||||
- Regularly update services using `update-pacman`, or `update-apt`.
|
||||
- Monitor system health with `sys-ctl-hlth-btrfs`, `sys-ctl-hlth-webserver`, and `sys-ctl-hlth-docker-container`.
|
||||
- Automate system maintenance with `sys-lock`, `sys-ctl-cln-bkps`, and `sys-ctl-rpr-docker-hard`.
|
||||
|
||||
|
21
filter_plugins/volume_path.py
Normal file
21
filter_plugins/volume_path.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from ansible.errors import AnsibleFilterError
|
||||
|
||||
def docker_volume_path(volume_name: str) -> str:
|
||||
"""
|
||||
Returns the absolute filesystem path of a Docker volume.
|
||||
|
||||
Example:
|
||||
"akaunting_data" -> "/var/lib/docker/volumes/akaunting_data/_data/"
|
||||
"""
|
||||
if not volume_name or not isinstance(volume_name, str):
|
||||
raise AnsibleFilterError(f"Invalid volume name: {volume_name}")
|
||||
|
||||
return f"/var/lib/docker/volumes/{volume_name}/_data/"
|
||||
|
||||
class FilterModule(object):
|
||||
"""Docker volume path filters."""
|
||||
|
||||
def filters(self):
|
||||
return {
|
||||
"docker_volume_path": docker_volume_path,
|
||||
}
|
@@ -12,7 +12,6 @@ SYS_SERVICE_BACKUP_RMT_2_LOC: "{{ 'svc-bkp-rmt-2-loc' | get_se
|
||||
SYS_SERVICE_BACKUP_DOCKER_2_LOC: "{{ 'sys-ctl-bkp-docker-2-loc' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_SOFT: "{{ 'sys-ctl-rpr-docker-soft' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_REPAIR_DOCKER_HARD: "{{ 'sys-ctl-rpr-docker-hard' | get_service_name(SOFTWARE_NAME) }}"
|
||||
SYS_SERVICE_UPDATE_DOCKER: "{{ 'update-docker' | get_service_name(SOFTWARE_NAME) }}"
|
||||
|
||||
## On Failure
|
||||
SYS_SERVICE_ON_FAILURE_COMPOSE: "{{ ('sys-ctl-alm-compose@') | get_service_name(SOFTWARE_NAME, False) }}%n.service"
|
||||
@@ -46,8 +45,7 @@ SYS_SERVICE_GROUP_MANIPULATION: >
|
||||
SYS_SERVICE_GROUP_CLEANUP +
|
||||
SYS_SERVICE_GROUP_REPAIR +
|
||||
SYS_SERVICE_GROUP_OPTIMIZATION +
|
||||
SYS_SERVICE_GROUP_MAINTANANCE +
|
||||
[ SYS_SERVICE_UPDATE_DOCKER ]
|
||||
SYS_SERVICE_GROUP_MAINTANANCE
|
||||
) | sort
|
||||
}}
|
||||
|
||||
|
@@ -100,6 +100,8 @@ defaults_networks:
|
||||
subnet: 192.168.103.192/28
|
||||
web-app-magento:
|
||||
subnet: 192.168.103.208/28
|
||||
web-app-bridgy-fed:
|
||||
subnet: 192.168.103.224/28
|
||||
|
||||
# /24 Networks / 254 Usable Clients
|
||||
web-app-bigbluebutton:
|
||||
|
@@ -74,6 +74,7 @@ ports:
|
||||
web-app-chess: 8050
|
||||
web-app-bluesky_view: 8051
|
||||
web-app-magento: 8052
|
||||
web-app-bridgy-fed: 8053
|
||||
web-app-bigbluebutton: 48087 # This port is predefined by bbb. @todo Try to change this to a 8XXX port
|
||||
public:
|
||||
# The following ports should be changed to 22 on the subdomain via stream mapping
|
||||
|
@@ -43,3 +43,7 @@
|
||||
chdir: "{{ PKGMGR_INSTALL_PATH }}"
|
||||
executable: /bin/bash
|
||||
become: true
|
||||
|
||||
- name: "Update all repositories with pkgmgr"
|
||||
command: "pkgmgr pull --all"
|
||||
when: MODE_UPDATE | bool
|
@@ -8,10 +8,10 @@
|
||||
path: "{{ docker_compose.directories.env }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
- name: "For '{{ application_id }}': Create {{database_env}}"
|
||||
- name: "For '{{ application_id }}': Create {{ database_env }}"
|
||||
template:
|
||||
src: "env/{{database_type}}.env.j2"
|
||||
dest: "{{database_env}}"
|
||||
src: "env/{{ database_type }}.env.j2"
|
||||
dest: "{{ database_env }}"
|
||||
notify: docker compose up
|
||||
when: not applications | get_app_conf(application_id, 'features.central_database', False)
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
# I don't know why this includes leads to that the application_id in vars/main.yml of the database role isn't used
|
||||
# This is the behaviour which I want, but I'm still wondering why ;)
|
||||
include_role:
|
||||
name: "svc-db-{{database_type}}"
|
||||
name: "svc-db-{{ database_type }}"
|
||||
when: applications | get_app_conf(application_id, 'features.central_database', False)
|
||||
|
||||
- name: "For '{{ application_id }}': Add Entry for Backup Procedure"
|
||||
|
@@ -5,10 +5,10 @@
|
||||
container_name: {{ application_id | get_entity_name }}-database
|
||||
logging:
|
||||
driver: journald
|
||||
image: mariadb
|
||||
image: {{ database_image }}:{{ database_version }}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
- {{ database_env }}
|
||||
command: "--transaction-isolation=READ-COMMITTED --binlog-format=ROW"
|
||||
volumes:
|
||||
- database:/var/lib/mysql
|
||||
|
@@ -2,13 +2,13 @@
|
||||
|
||||
{% if not applications | get_app_conf(application_id, 'features.central_database', False) %}
|
||||
{{ database_host }}:
|
||||
image: postgres:{{ applications['svc-db-postgres'].version}}-alpine
|
||||
image: {{ database_image }}:{{ database_version }}
|
||||
container_name: {{ application_id | get_entity_name }}-database
|
||||
env_file:
|
||||
- {{database_env}}
|
||||
- {{ database_env }}
|
||||
restart: {{ DOCKER_RESTART_POLICY }}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U {{ database_name }}"]
|
||||
test: ["CMD-SHELL", "pg_isready -U {{ database_username }}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
|
@@ -1,20 +1,23 @@
|
||||
# Helper variables
|
||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
||||
_dbtype: "{{ (database_type | d('') | trim) }}"
|
||||
_database_id: "{{ ('svc-db-' ~ _dbtype) if _dbtype else '' }}"
|
||||
_database_central_name: "{{ (applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.name', False, '')) if _dbtype else '' }}"
|
||||
_database_consumer_id: "{{ database_application_id | d(application_id) }}"
|
||||
_database_consumer_entity_name: "{{ _database_consumer_id | get_entity_name }}"
|
||||
_database_central_enabled: "{{ (applications | get_app_conf(_database_consumer_id, 'features.central_database', False)) if _dbtype else False }}"
|
||||
_database_default_version: "{{ applications | get_app_conf(_database_id, 'docker.services.' ~ _dbtype ~ '.version') }}"
|
||||
|
||||
# Definition
|
||||
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
||||
database_name: "{{ _database_consumer_entity_name }}"
|
||||
database_instance: "{{ _database_central_name if _database_central_enabled else database_name }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_host: "{{ _database_central_name if _database_central_enabled else 'database' }}" # This could lead to bugs at dedicated database @todo cleanup
|
||||
database_username: "{{ _database_consumer_entity_name }}"
|
||||
database_password: "{{ applications | get_app_conf(_database_consumer_id, 'credentials.database_password', true) }}"
|
||||
database_port: "{{ (ports.localhost.database[_database_id] | d('')) if _dbtype else '' }}"
|
||||
database_env: "{{ docker_compose.directories.env }}{{ database_type }}.env"
|
||||
database_url_jdbc: "jdbc:{{ database_type if database_type == 'mariadb' else 'postgresql' }}://{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_url_full: "{{ database_type }}://{{ database_username }}:{{ database_password }}@{{ database_host }}:{{ database_port }}/{{ database_name }}"
|
||||
database_volume: "{{ _database_consumer_entity_name ~ '_' if not _database_central_enabled }}{{ database_host }}"
|
||||
database_image: "{{ _dbtype }}"
|
||||
database_version: "{{ applications | get_app_conf( _database_consumer_id, 'docker.services.database.version', False, _database_default_version) }}"
|
||||
|
@@ -14,13 +14,6 @@
|
||||
name: update-apt
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: "Update Docker Images"
|
||||
include_role:
|
||||
name: update-docker
|
||||
when:
|
||||
- docker_compose_directory_stat.stat.exists
|
||||
- run_once_update_docker is not defined
|
||||
|
||||
- name: "Check if yay is installed"
|
||||
command: which yay
|
||||
register: yay_installed
|
||||
@@ -51,7 +44,3 @@
|
||||
register: pkgmgr_available
|
||||
failed_when: false
|
||||
|
||||
- name: "Update all repositories using pkgmgr"
|
||||
include_role:
|
||||
name: update-pkgmgr
|
||||
when: pkgmgr_available.rc == 0
|
||||
|
@@ -1,27 +0,0 @@
|
||||
# Update Docker
|
||||
|
||||
## Description
|
||||
|
||||
This role updates Docker Compose instances by checking for changes in Docker image digests and applying updates if necessary. It utilizes a Python script to handle git pulls and Docker image pulls, and rebuilds containers when changes are detected.
|
||||
|
||||
## Overview
|
||||
|
||||
The role performs the following:
|
||||
- Deploys a Python script to check for Docker image updates.
|
||||
- Configures a systemd service to run the update script.
|
||||
- Restarts the Docker update service upon configuration changes.
|
||||
- Supports additional procedures for specific Docker applications (e.g., Discourse, Mastodon, Nextcloud).
|
||||
|
||||
## Purpose
|
||||
|
||||
The role is designed to ensure that Docker images remain current by automatically detecting changes and rebuilding containers as needed. This helps maintain a secure and efficient container environment.
|
||||
|
||||
## Features
|
||||
|
||||
- **Docker Image Monitoring:** Checks for changes in image digests.
|
||||
- **Automated Updates:** Pulls new images and rebuilds containers when necessary.
|
||||
- **Service Management:** Configures and restarts a systemd service to handle updates.
|
||||
- **Application-Specific Procedures:** Includes hooks for updating specific Docker applications.
|
||||
|
||||
## Credits 📝
|
||||
It was created with the help of ChatGPT. The conversation is available [here](https://chat.openai.com/share/165418b8-25fa-433b-baca-caded941e22a)
|
@@ -1,27 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Updates Docker Compose instances by detecting changes in Docker image digests and rebuilding containers when necessary. This role automates Docker image pulls and container rebuilds."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- docker
|
||||
- update
|
||||
- compose
|
||||
- images
|
||||
- systemd
|
||||
- maintenance
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
@@ -1,20 +0,0 @@
|
||||
- name: Include dependency 'sys-lock'
|
||||
include_role:
|
||||
name: sys-lock
|
||||
when: run_once_sys_lock is not defined
|
||||
|
||||
- name: "start {{ 'sys-ctl-bkp-docker-2-loc-everything' | get_service_name(SOFTWARE_NAME) }}"
|
||||
systemd:
|
||||
name: "{{ 'sys-ctl-bkp-docker-2-loc-everything' | get_service_name(SOFTWARE_NAME) }}"
|
||||
state: started
|
||||
when:
|
||||
- MODE_BACKUP | bool
|
||||
|
||||
- include_role:
|
||||
name: sys-service
|
||||
vars:
|
||||
system_service_restarted: true
|
||||
system_service_timer_enabled: false
|
||||
system_service_tpl_on_failure: "{{ SYS_SERVICE_ON_FAILURE_COMPOSE }}"
|
||||
system_service_tpl_exec_start: "{{ system_service_script_exec }} {{ PATH_DOCKER_COMPOSE_INSTANCES }}"
|
||||
system_service_tpl_exec_start_pre: "/usr/bin/python {{ PATH_SYSTEM_LOCK_SCRIPT }} {{ SYS_SERVICE_GROUP_MANIPULATION | join(' ') }} --ignore {{ SYS_SERVICE_GROUP_CLEANUP | join(' ') }} {{ 'update-docker' | get_service_name(SOFTWARE_NAME) }} --timeout '{{ SYS_TIMEOUT_DOCKER_UPDATE }}'"
|
@@ -1,4 +0,0 @@
|
||||
- block:
|
||||
- include_tasks: 01_core.yml
|
||||
- include_tasks: utils/run_once.yml
|
||||
when: run_once_update_docker is not defined
|
@@ -1,217 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
def run_command(command):
|
||||
"""
|
||||
Executes the specified shell command, streaming and collecting its output in real-time.
|
||||
If the command exits with a non-zero status, a subprocess.CalledProcessError is raised,
|
||||
including the exit code, the executed command, and the full output (as bytes) for debugging purposes.
|
||||
"""
|
||||
process = None
|
||||
try:
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
output = []
|
||||
|
||||
for line in iter(process.stdout.readline, b''):
|
||||
decoded_line = line.decode()
|
||||
output.append(decoded_line)
|
||||
sys.stdout.write(decoded_line)
|
||||
|
||||
return_code = process.wait()
|
||||
if return_code:
|
||||
full_output = ''.join(output)
|
||||
raise subprocess.CalledProcessError(return_code, command, output=full_output.encode())
|
||||
finally:
|
||||
if process and process.stdout:
|
||||
process.stdout.close()
|
||||
|
||||
def git_pull():
|
||||
"""
|
||||
Checks whether the Git repository in the specified directory is up to date and performs a git pull if necessary.
|
||||
|
||||
Raises:
|
||||
Exception: If retrieving the local or remote git revision fails because the command returns a non-zero exit code.
|
||||
"""
|
||||
print("Checking if the git repository is up to date.")
|
||||
|
||||
# Run 'git rev-parse @' and check its exit code explicitly.
|
||||
local_proc = subprocess.run("git rev-parse @", shell=True, capture_output=True)
|
||||
if local_proc.returncode != 0:
|
||||
error_msg = local_proc.stderr.decode().strip() or "Unknown error while retrieving local revision."
|
||||
raise Exception(f"Failed to retrieve local git revision: {error_msg}")
|
||||
local = local_proc.stdout.decode().strip()
|
||||
|
||||
# Run 'git rev-parse @{u}' and check its exit code explicitly.
|
||||
remote_proc = subprocess.run("git rev-parse @{u}", shell=True, capture_output=True)
|
||||
if remote_proc.returncode != 0:
|
||||
error_msg = remote_proc.stderr.decode().strip() or "Unknown error while retrieving remote revision."
|
||||
raise Exception(f"Failed to retrieve remote git revision: {error_msg}")
|
||||
remote = remote_proc.stdout.decode().strip()
|
||||
|
||||
if local != remote:
|
||||
print("Repository is not up to date. Performing git pull.")
|
||||
run_command("git pull")
|
||||
return True
|
||||
|
||||
print("Repository is already up to date.")
|
||||
return False
|
||||
|
||||
{% raw %}
|
||||
def get_image_digests(directory):
|
||||
"""
|
||||
Retrieves the image digests for all images in the specified Docker Compose project.
|
||||
"""
|
||||
compose_project = os.path.basename(directory)
|
||||
try:
|
||||
images_output = subprocess.check_output(
|
||||
f'docker images --format "{{{{.Repository}}}}:{{{{.Tag}}}}@{{{{.Digest}}}}" | grep {compose_project}',
|
||||
shell=True
|
||||
).decode().strip()
|
||||
return dict(line.split('@') for line in images_output.splitlines() if line)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.returncode == 1: # grep no match found
|
||||
return {}
|
||||
else:
|
||||
raise # Other errors are still raised
|
||||
{% endraw %}
|
||||
|
||||
def is_any_service_up():
|
||||
"""
|
||||
Checks if any Docker services are currently running.
|
||||
"""
|
||||
process = subprocess.Popen("docker-compose ps -q", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
output, _ = process.communicate()
|
||||
service_ids = output.decode().strip().splitlines()
|
||||
return bool(service_ids)
|
||||
|
||||
def pull_docker_images():
|
||||
"""
|
||||
Pulls the latest Docker images for the project.
|
||||
"""
|
||||
print("Pulling docker images.")
|
||||
try:
|
||||
run_command("docker-compose pull")
|
||||
except subprocess.CalledProcessError as e:
|
||||
if "pull access denied" in e.output.decode() or "must be built from source" in e.output.decode():
|
||||
print("Need to build the image from source.")
|
||||
return True
|
||||
else:
|
||||
print("Failed to pull images with unexpected error.")
|
||||
raise
|
||||
return False
|
||||
|
||||
def update_docker(directory):
|
||||
"""
|
||||
Checks for updates to Docker images and rebuilds containers if necessary.
|
||||
"""
|
||||
print(f"Checking for updates to Docker images in {directory}.")
|
||||
before_digests = get_image_digests(directory)
|
||||
need_to_build = pull_docker_images()
|
||||
after_digests = get_image_digests(directory)
|
||||
if before_digests != after_digests:
|
||||
print("Changes detected in image digests. Rebuilding containers.")
|
||||
need_to_build = True
|
||||
|
||||
if need_to_build:
|
||||
# This propably just rebuilds the Dockerfile image if there is a change in the other docker compose containers
|
||||
run_command("docker-compose build --pull")
|
||||
start_docker(directory)
|
||||
else:
|
||||
print("Docker images are up to date. No rebuild necessary.")
|
||||
|
||||
def update_discourse(directory):
|
||||
"""
|
||||
Updates Discourse by running the rebuild command on the launcher script.
|
||||
"""
|
||||
docker_repository_directory = os.path.join(directory, "services", "{{ applications | get_app_conf('web-app-discourse','repository') }}")
|
||||
print(f"Using path {docker_repository_directory } to pull discourse repository.")
|
||||
os.chdir(docker_repository_directory )
|
||||
if git_pull():
|
||||
print("Start Discourse update procedure.")
|
||||
update_procedure("docker stop {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
update_procedure("docker rm {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
try:
|
||||
update_procedure("docker network connect {{ applications | get_app_conf('web-app-discourse','docker.network') }} {{ applications | get_app_conf('svc-db-postgres', 'docker.network') }}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_message = e.output.decode()
|
||||
if "already exists" in error_message or "is already connected" in error_message:
|
||||
print("Network connection already exists. Skipping...")
|
||||
else:
|
||||
raise
|
||||
update_procedure("./launcher rebuild {{ applications | get_app_conf('web-app-discourse','docker.services.discourse.name') }}")
|
||||
else:
|
||||
print("Discourse update skipped. No changes in git repository.")
|
||||
|
||||
def upgrade_listmonk():
|
||||
"""
|
||||
Runs the upgrade for Listmonk
|
||||
"""
|
||||
print("Starting Listmonk upgrade.")
|
||||
run_command('echo "y" | docker compose run -T application ./listmonk --upgrade')
|
||||
print("Upgrade complete.")
|
||||
|
||||
def update_procedure(command):
|
||||
"""
|
||||
Attempts to execute a command up to a maximum number of retries.
|
||||
"""
|
||||
max_attempts = 3
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
run_command(command)
|
||||
break # If the command succeeds, exit the loop
|
||||
except subprocess.CalledProcessError as e:
|
||||
if attempt < max_attempts - 1: # Check if it's not the last attempt
|
||||
print(f"Attempt {attempt + 1} failed, retrying in 60 seconds...")
|
||||
time.sleep(60) # Wait for 60 seconds before retrying
|
||||
else:
|
||||
print("All attempts to update have failed.")
|
||||
raise # Re-raise the last exception after all attempts fail
|
||||
|
||||
def start_docker(directory):
|
||||
"""
|
||||
Starts or restarts Docker services in the specified directory.
|
||||
"""
|
||||
if is_any_service_up():
|
||||
print(f"Restarting containers in {directory}.")
|
||||
run_command("docker-compose up -d --force-recreate")
|
||||
else:
|
||||
print(f"Skipped starting. No service is up in {directory}.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Please provide the path to the parent directory as a parameter.")
|
||||
sys.exit(1)
|
||||
|
||||
parent_directory = sys.argv[1]
|
||||
for dir_entry in os.scandir(parent_directory):
|
||||
if dir_entry.is_dir():
|
||||
dir_path = dir_entry.path
|
||||
print(f"Checking for updates in: {dir_path}")
|
||||
os.chdir(dir_path)
|
||||
|
||||
# Pull git repository if it exist
|
||||
# @deprecated: This function should be removed in the future, as soon as all docker applications use the correct folder path
|
||||
if os.path.isdir(os.path.join(dir_path, ".git")):
|
||||
print("DEPRECATED: Docker .git repositories should be saved under /opt/docker/{instance}/services/{repository_name} ")
|
||||
git_pull()
|
||||
|
||||
if os.path.basename(dir_path) == "matrix":
|
||||
# No autoupdate for matrix is possible atm,
|
||||
# due to the reason that the role has to be executed every time.
|
||||
# The update has to be executed in the role
|
||||
# @todo implement in future
|
||||
pass
|
||||
else:
|
||||
# Pull and update docker images
|
||||
update_docker(dir_path)
|
||||
|
||||
# The following instances need additional update and upgrade procedures
|
||||
if os.path.basename(dir_path) == "discourse":
|
||||
update_discourse(dir_path)
|
||||
elif os.path.basename(dir_path) == "listmonk":
|
||||
upgrade_listmonk()
|
||||
|
||||
# @todo implement dedicated procedure for bluesky
|
||||
# @todo implement dedicated procedure for taiga
|
@@ -1,2 +0,0 @@
|
||||
application_id: update-docker
|
||||
system_service_id: "{{ application_id }}"
|
@@ -1,23 +0,0 @@
|
||||
# Update Pip Packages
|
||||
|
||||
## Description
|
||||
|
||||
This Ansible role automatically updates all installed Python Pip packages to their latest versions.
|
||||
|
||||
## Overview
|
||||
|
||||
The role performs the following:
|
||||
- Executes a command to retrieve all installed Python Pip packages.
|
||||
- Updates each package individually to its latest available version.
|
||||
- Ensures a smooth and automated Python environment maintenance process.
|
||||
|
||||
## Purpose
|
||||
|
||||
Ensures Python packages remain up-to-date, improving security and functionality.
|
||||
|
||||
## Features
|
||||
|
||||
- **Automatic Updates:** Automates the process of upgrading Python packages.
|
||||
- **Platform Independent:** Works on Linux, macOS, and Windows environments.
|
||||
- **Ansible Integration:** Easy to include in larger playbooks or maintenance routines.
|
||||
|
@@ -1,25 +0,0 @@
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
description: "Automatically updates all Python Pip packages to their latest available versions."
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- all
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- rolling
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- python
|
||||
- pip
|
||||
- update
|
||||
- maintenance
|
@@ -1,9 +0,0 @@
|
||||
- block:
|
||||
- name: Include dependency 'dev-python-pip'
|
||||
include_role:
|
||||
name: dev-python-pip
|
||||
when: run_once_dev_python_pip is not defined
|
||||
- include_tasks: utils/run_once.yml
|
||||
vars:
|
||||
flush_handlers: false
|
||||
when: run_once_update_pip is not defined
|
@@ -1 +0,0 @@
|
||||
application_id: update-pip
|
@@ -1,27 +0,0 @@
|
||||
# Update pkgmgr
|
||||
|
||||
## Description
|
||||
|
||||
This role checks if the [package manager](https://github.com/kevinveenbirkenbach/package-manager) is available on the system. If so, it runs `pkgmgr update --all` to update all repositories managed by the `pkgmgr`.
|
||||
|
||||
## Overview
|
||||
|
||||
This role performs the following tasks:
|
||||
- Checks if the `pkgmgr` command is available.
|
||||
- If available, runs `pkgmgr update --all` to update all repositories.
|
||||
|
||||
## Purpose
|
||||
|
||||
The purpose of this role is to simplify system updates by using the `pkgmgr` package manager to handle all repository updates with a single command.
|
||||
|
||||
## Features
|
||||
|
||||
- **Conditional Execution**: Runs only if the `pkgmgr` command is found on the system.
|
||||
- **Automated Updates**: Automatically runs `pkgmgr update --all` to update all repositories.
|
||||
|
||||
## License
|
||||
|
||||
Infinito.Nexus NonCommercial License
|
||||
[Learn More](https://s.infinito.nexus/license)
|
||||
|
||||
|
@@ -1,2 +0,0 @@
|
||||
# Todos
|
||||
- Activate update again. Atm not possible, because it pulls all repos
|
@@ -1,3 +0,0 @@
|
||||
# run_once_update_pkgmgr: deactivated
|
||||
#- name: "Update all repositories with pkgmgr"
|
||||
# command: "pkgmgr update --all"
|
@@ -1 +0,0 @@
|
||||
application_id: update-pkgmgr
|
@@ -23,6 +23,6 @@ AKAUNTING_COMPANY_NAME: "{{ applications | get_app_conf(application_
|
||||
AKAUNTING_COMPANY_EMAIL: "{{ applications | get_app_conf(application_id, 'company.email') }}"
|
||||
AKAUNTING_ADMIN_EMAIL: "{{ applications | get_app_conf(application_id, 'setup_admin_email') }}"
|
||||
AKAUNTING_ADMIN_PASSWORD: "{{ applications | get_app_conf(application_id, 'credentials.setup_admin_password') }}"
|
||||
AKAUNTING_SETUP_MARKER: "/var/lib/docker/volumes/{{ AKAUNTING_VOLUME }}/_data/.akaunting_installed"
|
||||
AKAUNTING_SETUP_MARKER: "{{ [ (AKAUNTING_VOLUME | docker_volume_path), '.akaunting_installed' ] | path_join }}"
|
||||
AKAUNTING_APP_KEY: "{{ applications | get_app_conf(application_id, 'credentials.app_key') }}"
|
||||
AKAUNTING_CACHE_DRIVER: "{{ 'redis' if applications | get_app_conf(application_id, 'docker.services.redis.enabled') else 'file' }}"
|
@@ -5,6 +5,7 @@ features:
|
||||
central_database: false
|
||||
logout: true
|
||||
server:
|
||||
config_upstream_url: "https://ip.bsky.app/config"
|
||||
domains:
|
||||
canonical:
|
||||
web: "bskyweb.{{ PRIMARY_DOMAIN }}"
|
||||
@@ -22,6 +23,7 @@ server:
|
||||
- https://statsigapi.net
|
||||
- https://ip.bsky.app
|
||||
- https://video.bsky.app
|
||||
- https://bsky.app
|
||||
- wss://bsky.network
|
||||
- wss://*.bsky.app
|
||||
media-src:
|
||||
@@ -33,7 +35,7 @@ docker:
|
||||
database:
|
||||
enabled: false
|
||||
web:
|
||||
enabled: true # @see https://github.com/bluesky-social/social-app
|
||||
enabled: true # @see https://github.com/bluesky-social/social-app
|
||||
view:
|
||||
enabled: false
|
||||
pds:
|
||||
|
@@ -3,6 +3,19 @@
|
||||
repo: "https://github.com/bluesky-social/social-app.git"
|
||||
dest: "{{ BLUESKY_SOCIAL_APP_DIR }}"
|
||||
version: "main"
|
||||
force: true
|
||||
notify:
|
||||
- docker compose up
|
||||
- docker compose build
|
||||
- docker compose build
|
||||
|
||||
- name: Force BAPP_CONFIG_URL to same-origin /config
|
||||
ansible.builtin.replace:
|
||||
path: "{{ BLUESKY_SOCIAL_APP_DIR }}/src/state/geolocation.tsx"
|
||||
regexp: '^\s*const\s+BAPP_CONFIG_URL\s*=\s*.*$'
|
||||
replace: "const BAPP_CONFIG_URL = '/config'"
|
||||
|
||||
- name: Force IPCC_URL to same-origin /ipcc
|
||||
ansible.builtin.replace:
|
||||
path: "{{ BLUESKY_SOCIAL_APP_DIR }}/src/state/geolocation.tsx"
|
||||
regexp: '^\s*const\s+IPCC_URL\s*=\s*.*$'
|
||||
replace: "const IPCC_URL = '/ipcc'"
|
||||
|
@@ -15,8 +15,9 @@
|
||||
include_role:
|
||||
name: sys-stk-front-proxy
|
||||
vars:
|
||||
domain: "{{ BLUESKY_WEB_DOMAIN }}"
|
||||
http_port: "{{ BLUESKY_WEB_PORT }}"
|
||||
domain: "{{ BLUESKY_WEB_DOMAIN }}"
|
||||
http_port: "{{ BLUESKY_WEB_PORT }}"
|
||||
proxy_extra_configuration: "{{ BLUESKY_FRONT_PROXY_CONTENT }}"
|
||||
when: BLUESKY_WEB_ENABLED | bool
|
||||
|
||||
- name: "Include front proxy for {{ BLUESKY_VIEW_DOMAIN }}:{{ BLUESKY_VIEW_PORT }}"
|
||||
|
@@ -1,21 +1,30 @@
|
||||
# General
|
||||
PDS_HOSTNAME="{{ BLUESKY_API_DOMAIN }}"
|
||||
PDS_ADMIN_EMAIL="{{ BLUESKY_ADMIN_EMAIL }}"
|
||||
PDS_SERVICE_DID="did:web:{{ BLUESKY_API_DOMAIN }}"
|
||||
|
||||
# See https://mattdyson.org/blog/2024/11/self-hosting-bluesky-pds/
|
||||
PDS_SERVICE_HANDLE_DOMAINS=".{{ PRIMARY_DOMAIN }}"
|
||||
PDS_JWT_SECRET="{{ BLUESKY_JWT_SECRET }}"
|
||||
PDS_ADMIN_PASSWORD="{{ BLUESKY_ADMIN_PASSWORD }}"
|
||||
PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX="{{ BLUESKY_ROTATION_KEY }}"
|
||||
PDS_CRAWLERS=https://bsky.network
|
||||
PDS_EMAIL_SMTP_URL=smtps://{{ users['no-reply'].email }}:{{ users['no-reply'].mailu_token }}@{{ SYSTEM_EMAIL.HOST }}:{{ SYSTEM_EMAIL.PORT }}/
|
||||
PDS_EMAIL_FROM_ADDRESS={{ users['no-reply'].email }}
|
||||
LOG_ENABLED={{ MODE_DEBUG | string | lower }}
|
||||
PDS_BLOBSTORE_DISK_LOCATION={{ BLUESKY_PDS_BLOBSTORE_LOCATION }}
|
||||
PDS_DATA_DIRECTORY={{ BLUESKY_PDS_DATA_DIR }}
|
||||
PDS_BLOB_UPLOAD_LIMIT=52428800
|
||||
PDS_DID_PLC_URL=https://plc.directory
|
||||
|
||||
# See https://mattdyson.org/blog/2024/11/self-hosting-bluesky-pds/
|
||||
PDS_SERVICE_HANDLE_DOMAINS=".{{ PRIMARY_DOMAIN }}"
|
||||
PDS_SERVICE_DID="did:web:{{ BLUESKY_API_DOMAIN }}"
|
||||
|
||||
# Email
|
||||
PDS_ADMIN_EMAIL="{{ BLUESKY_ADMIN_EMAIL }}"
|
||||
PDS_EMAIL_SMTP_URL=smtps://{{ users['no-reply'].email }}:{{ users['no-reply'].mailu_token }}@{{ SYSTEM_EMAIL.HOST }}:{{ SYSTEM_EMAIL.PORT }}/
|
||||
PDS_EMAIL_FROM_ADDRESS={{ users['no-reply'].email }}
|
||||
|
||||
# Credentials
|
||||
PDS_JWT_SECRET="{{ BLUESKY_JWT_SECRET }}"
|
||||
PDS_ADMIN_PASSWORD="{{ BLUESKY_ADMIN_PASSWORD }}"
|
||||
PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX="{{ BLUESKY_ROTATION_KEY }}"
|
||||
|
||||
# View
|
||||
PDS_BSKY_APP_VIEW_URL={{ BLUESKY_VIEW_URL }}
|
||||
PDS_BSKY_APP_VIEW_DID={{ BLUESKY_VIEW_DID }}
|
||||
|
||||
# Report
|
||||
PDS_REPORT_SERVICE_URL=https://mod.bsky.app
|
||||
PDS_REPORT_SERVICE_DID=did:plc:ar7c4by46qjdydhdevvrndac
|
||||
|
29
roles/web-app-bluesky/templates/extra_locations.conf.j2
Normal file
29
roles/web-app-bluesky/templates/extra_locations.conf.j2
Normal file
@@ -0,0 +1,29 @@
|
||||
# Injected by web-app-bluesky (same pattern as web-app-yourls)
|
||||
# Exposes a same-origin /config to avoid CORS when the social-app fetches config.
|
||||
location = /config {
|
||||
proxy_pass {{ BLUESKY_CONFIG_UPSTREAM_URL }};
|
||||
# Nur Hostname extrahieren:
|
||||
set $up_host "{{ BLUESKY_CONFIG_UPSTREAM_URL | regex_replace('^https?://', '') | regex_replace('/.*$', '') }}";
|
||||
proxy_set_header Host $up_host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_ssl_server_name on;
|
||||
|
||||
# Make response clearly same-origin for browsers
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
add_header Access-Control-Allow-Origin $scheme://$host always;
|
||||
add_header Vary Origin always;
|
||||
}
|
||||
|
||||
location = /ipcc {
|
||||
proxy_pass https://bsky.app/ipcc;
|
||||
set $up_host "bsky.app";
|
||||
proxy_set_header Host $up_host;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_ssl_server_name on;
|
||||
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
add_header Access-Control-Allow-Origin $scheme://$host always;
|
||||
add_header Vary Origin always;
|
||||
}
|
@@ -43,3 +43,6 @@ BLUESKY_ROTATION_KEY: "{{ applications | get_app_conf(application_id,
|
||||
BLUESKY_ADMIN_EMAIL: "{{ users.administrator.email }}"
|
||||
BLUESKY_ADMIN_PASSWORD: "{{ users.administrator.password }}"
|
||||
|
||||
# Front proxy
|
||||
BLUESKY_FRONT_PROXY_CONTENT: "{{ lookup('template', 'extra_locations.conf.j2') }}"
|
||||
BLUESKY_CONFIG_UPSTREAM_URL: "{{ applications | get_app_conf(application_id, 'server.config_upstream_url') }}"
|
||||
|
25
roles/web-app-bridgy-fed/README.md
Normal file
25
roles/web-app-bridgy-fed/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Bridgy Fed
|
||||
|
||||
## Description
|
||||
Bridgy Fed bridges ActivityPub (Fediverse), ATProto/Bluesky, and IndieWeb (webmentions/mf2). It mirrors identities and interactions across networks.
|
||||
|
||||
## Overview
|
||||
This role builds and runs Bridgy Fed as a Docker container and (optionally) starts a Datastore-mode Firestore emulator as a sidecar. It exposes HTTP locally for a front proxy.
|
||||
|
||||
Upstream docs & dev notes:
|
||||
- User & developer docs: https://fed.brid.gy and https://bridgy-fed.readthedocs.io/
|
||||
- Source: https://github.com/snarfed/bridgy-fed
|
||||
- Local run (reference): `flask run -p 8080` with BRIDGY_APPVIEW_HOST/BRIDGY_PLC_HOST/BRIDGY_BGS_HOST/BRIDGY_PDS_HOST set, and Datastore emulator envs
|
||||
|
||||
## Features
|
||||
- Dockerized Flask app (gunicorn)
|
||||
- Optional Firestore emulator (Datastore mode) sidecar
|
||||
- Front proxy integration via `sys-stk-front-proxy`
|
||||
|
||||
## Quick start
|
||||
1) Set domains and ports in inventory.
|
||||
2) Enable/disable the emulator in `config/main.yml`.
|
||||
3) Run the role; your front proxy will publish the app.
|
||||
|
||||
## Notes
|
||||
- Emulator is **not** for production; it’s in-memory unless you mount a volume/configure import/export.
|
29
roles/web-app-bridgy-fed/config/main.yml
Normal file
29
roles/web-app-bridgy-fed/config/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
central_database: false
|
||||
logout: false
|
||||
oidc: false
|
||||
|
||||
server:
|
||||
domains:
|
||||
canonical:
|
||||
- "bridgyfed.{{ PRIMARY_DOMAIN }}"
|
||||
csp:
|
||||
whitelist: {}
|
||||
flags: {}
|
||||
|
||||
docker:
|
||||
services:
|
||||
database:
|
||||
enabled: false
|
||||
application:
|
||||
image: "python"
|
||||
version: "3.12-bookworm"
|
||||
name: "web-app-bridgy-fed"
|
||||
repository: "https://github.com/snarfed/bridgy-fed.git"
|
||||
branch: "main"
|
||||
rbac:
|
||||
roles: {}
|
||||
|
49
roles/web-app-bridgy-fed/files/Dockerfile
Normal file
49
roles/web-app-bridgy-fed/files/Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# Runtime image for Bridgy Fed (Flask) with a build step that clones upstream
|
||||
ARG PY_BASE="python:3.12-bookworm"
|
||||
FROM ${PY_BASE} AS build
|
||||
|
||||
ARG BRIDGY_REPO_URL
|
||||
ARG BRIDGY_REPO_BRANCH
|
||||
|
||||
# System deps: git, build tools, curl for healthchecks, and gunicorn
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git build-essential curl ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
RUN git clone --depth=1 --branch "${BRIDGY_REPO_BRANCH}" "${BRIDGY_REPO_URL}" ./
|
||||
|
||||
# Python deps
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Create oauth_dropins static symlink (upstream expects this)
|
||||
RUN python - <<'PY'\n\
|
||||
import oauth_dropins, pathlib, os\n\
|
||||
target = pathlib.Path(oauth_dropins.__file__).parent / 'static'\n\
|
||||
link = pathlib.Path('/app/oauth_dropins_static')\n\
|
||||
try:\n\
|
||||
if link.exists() or link.is_symlink():\n\
|
||||
link.unlink()\n\
|
||||
os.symlink(str(target), str(link))\n\
|
||||
except FileExistsError:\n\
|
||||
pass\n\
|
||||
print('Symlinked oauth_dropins_static ->', target)\n\
|
||||
PY
|
||||
|
||||
# Final stage
|
||||
FROM ${PY_BASE}
|
||||
|
||||
ARG CONTAINER_PORT
|
||||
ENV PORT=${CONTAINER_PORT}
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=build /app /app
|
||||
|
||||
# Non-root good practice
|
||||
RUN useradd -r -m -d /nonroot appuser && chown -R appuser:appuser /app
|
||||
USER appuser
|
||||
|
||||
EXPOSE ${PORT}
|
||||
# Upstream flask app entry: 'flask_app:app'
|
||||
CMD ["sh", "-lc", "exec gunicorn -w 2 -k gthread -b 0.0.0.0:${PORT} flask_app:app"]
|
@@ -1,24 +1,22 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
description: "Checks if the pkgmgr command is available and runs 'pkgmgr update --all' to update all repositories."
|
||||
description: "Bridgy Fed: bridge between ActivityPub (Fediverse), ATProto/Bluesky and IndieWeb."
|
||||
license: "Infinito.Nexus NonCommercial License"
|
||||
license_url: "https://s.infinito.nexus/license"
|
||||
company: |
|
||||
Kevin Veen-Birkenbach
|
||||
Consulting & Coaching Solutions
|
||||
https://www.veen.world
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Linux
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- update
|
||||
- pkgmgr
|
||||
- pkgmgr
|
||||
- system
|
||||
- activitypub
|
||||
- bluesky
|
||||
- atproto
|
||||
- fediverse
|
||||
- bridge
|
||||
repository: "https://s.infinito.nexus/code"
|
||||
issue_tracker_url: "https://s.infinito.nexus/issues"
|
||||
documentation: "https://docs.infinito.nexus"
|
||||
dependencies: []
|
||||
documentation: "https://fed.brid.gy/docs"
|
||||
logo:
|
||||
class: "fa-solid fa-bridge"
|
||||
dependencies: []
|
0
roles/web-app-bridgy-fed/schema/main.yml
Normal file
0
roles/web-app-bridgy-fed/schema/main.yml
Normal file
9
roles/web-app-bridgy-fed/tasks/01_core.yml
Normal file
9
roles/web-app-bridgy-fed/tasks/01_core.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- name: "Load docker and front proxy for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-full-stateless
|
||||
|
||||
- name: "Include front proxy for {{ container_hostname}}:{{ ports.localhost.http[application_id] }}"
|
||||
include_role:
|
||||
name: sys-stk-front-proxy
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
3
roles/web-app-bridgy-fed/tasks/main.yml
Normal file
3
roles/web-app-bridgy-fed/tasks/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
- name: "Include core routines for '{{ application_id }}'"
|
||||
include_tasks: "01_core.yml"
|
||||
when: run_once_web_app_bridgy_fed is not defined
|
20
roles/web-app-bridgy-fed/templates/docker-compose.yml.j2
Normal file
20
roles/web-app-bridgy-fed/templates/docker-compose.yml.j2
Normal file
@@ -0,0 +1,20 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
application:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
BRIDGY_REPO_URL: "{{ BRIDGY_REPO_URL }}"
|
||||
BRIDGY_REPO_BRANCH: "{{ BRIDGY_REPO_BRANCH }}"
|
||||
CONTAINER_PORT: "{{ container_port | string }}"
|
||||
image: "{{ BRIDGY_IMAGE }}:{{ BRIDGY_VERSION }}"
|
||||
container_name: "{{ BRIDGY_CONTAINER }}"
|
||||
hostname: "{{ container_hostname }}"
|
||||
ports:
|
||||
- "127.0.0.1:{{ http_port }}:{{ container_port }}"
|
||||
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
13
roles/web-app-bridgy-fed/templates/env.j2
Normal file
13
roles/web-app-bridgy-fed/templates/env.j2
Normal file
@@ -0,0 +1,13 @@
|
||||
# Flask / Gunicorn basics
|
||||
FLASK_ENV="{{ ENVIRONMENT }}"
|
||||
PORT="{{ container_port }}"
|
||||
BRIDGY_ADMIN_EMAIL="{{ BRIDGY_ADMIN_EMAIL }}"
|
||||
|
||||
# Bridgy Fed upstream knobs (see README @ GitHub)
|
||||
BRIDGY_APPVIEW_HOST="{{ BRIDGY_APPVIEW_HOST }}"
|
||||
BRIDGY_PLC_HOST="{{ BRIDGY_PLC_HOST }}"
|
||||
BRIDGY_BGS_HOST="{{ BRIDGY_BGS_HOST }}"
|
||||
BRIDGY_PDS_HOST="{{ BRIDGY_PDS_HOST }}"
|
||||
|
||||
# Optional:
|
||||
# GUNICORN_CMD_ARGS="--log-level info"
|
25
roles/web-app-bridgy-fed/vars/main.yml
Normal file
25
roles/web-app-bridgy-fed/vars/main.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
# General
|
||||
application_id: "web-app-bridgy-fed"
|
||||
|
||||
# Container
|
||||
container_port: 8080
|
||||
domain: "{{ container_hostname }}"
|
||||
http_port: "{{ ports.localhost.http[application_id] }}"
|
||||
container_hostname: "{{ domains | get_domain(application_id) }}"
|
||||
|
||||
# App container
|
||||
BRIDGY_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.application.name') }}"
|
||||
BRIDGY_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.application.image') }}"
|
||||
BRIDGY_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.application.version')}}"
|
||||
|
||||
BRIDGY_ADMIN_EMAIL: "{{ users.administrator.email }}"
|
||||
|
||||
# Source
|
||||
BRIDGY_REPO_URL: "{{ applications | get_app_conf(application_id, 'docker.services.application.repository') }}"
|
||||
BRIDGY_REPO_BRANCH: "{{ applications | get_app_conf(application_id, 'docker.services.application.branch') }}"
|
||||
|
||||
# Runtime env defaults for Bridgy Fed (see upstream README)
|
||||
BRIDGY_APPVIEW_HOST: "api.bsky.app"
|
||||
BRIDGY_PLC_HOST: "plc.directory"
|
||||
BRIDGY_BGS_HOST: "bsky.network"
|
||||
BRIDGY_PDS_HOST: "atproto.brid.gy"
|
@@ -77,23 +77,16 @@
|
||||
}}
|
||||
include_tasks: _update.yml
|
||||
|
||||
- name: "Update REALM mail settings"
|
||||
- name: "Update REALM mail settings from realm dictionary (SPOT)"
|
||||
include_tasks: _update.yml
|
||||
vars:
|
||||
kc_object_kind: "realm"
|
||||
kc_object_kind: "realm"
|
||||
kc_lookup_field: "id"
|
||||
kc_lookup_value: "{{ KEYCLOAK_REALM }}"
|
||||
kc_desired:
|
||||
smtpServer:
|
||||
from: "no-reply@{{ DEFAULT_SYSTEM_EMAIL.DOMAIN }}"
|
||||
fromDisplayName: "{{ SOFTWARE_NAME | default('Infinito.Nexus') }}"
|
||||
host: "{{ DEFAULT_SYSTEM_EMAIL.HOST }}"
|
||||
port: "{{ DEFAULT_SYSTEM_EMAIL.PORT }}"
|
||||
# Keycloak expects strings "true"/"false"
|
||||
ssl: "{{ 'true' if not DEFAULT_SYSTEM_EMAIL.START_TLS and DEFAULT_SYSTEM_EMAIL.TLS else 'false' }}"
|
||||
starttls: "{{ 'true' if DEFAULT_SYSTEM_EMAIL.START_TLS else 'false' }}"
|
||||
user: "{{ DEFAULT_SYSTEM_EMAIL.USER | default('') }}"
|
||||
password: "{{ DEFAULT_SYSTEM_EMAIL.PASSWORD | default('') }}"
|
||||
smtpServer: "{{ KEYCLOAK_DICTIONARY_REALM.smtpServer | default({}, true) }}"
|
||||
kc_merge_path: "smtpServer"
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
|
||||
- include_tasks: 05_rbac_client_scope.yml
|
||||
|
||||
|
@@ -1443,20 +1443,7 @@
|
||||
"xXSSProtection": "1; mode=block",
|
||||
"strictTransportSecurity": "max-age=31536000; includeSubDomains"
|
||||
},
|
||||
"smtpServer": {
|
||||
"password": "{{ users['no-reply'].mailu_token }}",
|
||||
"replyToDisplayName": "",
|
||||
"starttls": "{{ SYSTEM_EMAIL.START_TLS | lower }}",
|
||||
"auth": "true",
|
||||
"port": "{{ SYSTEM_EMAIL.PORT }}",
|
||||
"replyTo": "",
|
||||
"host": "{{ SYSTEM_EMAIL.HOST }}",
|
||||
"from": "{{ users['no-reply'].email }}",
|
||||
"fromDisplayName": "Keycloak Authentification System - {{ KEYCLOAK_DOMAIN | upper }}",
|
||||
"envelopeFrom": "",
|
||||
"ssl": "true",
|
||||
"user": "{{ users['no-reply'].email }}"
|
||||
},
|
||||
{%- include "smtp_server.json.j2" -%},
|
||||
"eventsEnabled": false,
|
||||
"eventsListeners": [
|
||||
"jboss-logging"
|
||||
|
14
roles/web-app-keycloak/templates/import/smtp_server.json.j2
Normal file
14
roles/web-app-keycloak/templates/import/smtp_server.json.j2
Normal file
@@ -0,0 +1,14 @@
|
||||
"smtpServer": {
|
||||
"password": "{{ users['no-reply'].mailu_token }}",
|
||||
"replyToDisplayName": "",
|
||||
"starttls": "{{ SYSTEM_EMAIL.START_TLS | lower }}",
|
||||
"auth": "true",
|
||||
"port": "{{ SYSTEM_EMAIL.PORT }}",
|
||||
"replyTo": "",
|
||||
"host": "{{ SYSTEM_EMAIL.HOST }}",
|
||||
"from": "{{ users['no-reply'].email }}",
|
||||
"fromDisplayName": "Keycloak Authentication System - {{ KEYCLOAK_DOMAIN | upper }}",
|
||||
"envelopeFrom": "",
|
||||
"ssl": "{{ (SYSTEM_EMAIL.TLS and not SYSTEM_EMAIL.START_TLS) | ternary('true','false') }}",
|
||||
"user": "{{ users['no-reply'].email }}"
|
||||
}
|
@@ -30,6 +30,14 @@
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
when: "'No relations found.' in db_tables.stdout"
|
||||
|
||||
- name: "Listmonk | run DB/schema upgrade (non-interactive)"
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
echo "y" | docker compose run -T application ./listmonk --upgrade
|
||||
args:
|
||||
chdir: "{{ docker_compose.directories.instance }}"
|
||||
when: MODE_UPDATE | bool
|
||||
|
||||
- name: Build OIDC settings JSON
|
||||
set_fact:
|
||||
oidc_settings_json: >-
|
||||
@@ -73,3 +81,4 @@
|
||||
no_log: "{{ MASK_CREDENTIALS_IN_LOGS | bool }}"
|
||||
async: "{{ ASYNC_TIME if ASYNC_ENABLED | bool else omit }}"
|
||||
poll: "{{ ASYNC_POLL if ASYNC_ENABLED | bool else omit }}"
|
||||
|
||||
|
@@ -1,55 +0,0 @@
|
||||
# Administration
|
||||
|
||||
## 🗑️ Cleanup (Remove Instance & Volumes)
|
||||
```bash
|
||||
cd {{ PATH_DOCKER_COMPOSE_INSTANCES }}magento/
|
||||
docker compose down
|
||||
docker volume rm magento_data
|
||||
cd {{ PATH_DOCKER_COMPOSE_INSTANCES }} && rm -vR {{ PATH_DOCKER_COMPOSE_INSTANCES }}magento
|
||||
```
|
||||
|
||||
## 🔍 Access Container Shell
|
||||
```bash
|
||||
docker compose exec -it application /bin/bash
|
||||
```
|
||||
|
||||
## 🧰 Common Magento CLI Tasks
|
||||
```bash
|
||||
# Reindex
|
||||
docker compose exec -it application bin/magento indexer:reindex
|
||||
|
||||
# Flush caches
|
||||
docker compose exec -it application bin/magento cache:flush
|
||||
|
||||
# Enable maintenance mode
|
||||
docker compose exec -it application bin/magento maintenance:enable
|
||||
|
||||
# Disable maintenance mode
|
||||
docker compose exec -it application bin/magento maintenance:disable
|
||||
|
||||
# Recompile DI (when switching modes)
|
||||
docker compose exec -it application bin/magento setup:di:compile
|
||||
|
||||
# Deploy static content (example for English/German)
|
||||
docker compose exec -it application bin/magento setup:static-content:deploy en_US de_DE -f
|
||||
```
|
||||
|
||||
## 🚀 Performance
|
||||
```bash
|
||||
# Production mode
|
||||
docker compose exec -it application bin/magento deploy:mode:set production
|
||||
|
||||
# Developer mode
|
||||
docker compose exec -it application bin/magento deploy:mode:set developer
|
||||
```
|
||||
|
||||
## 🔐 Admin User
|
||||
```bash
|
||||
# Create another admin (example)
|
||||
docker compose exec -it application bin/magento admin:user:create \
|
||||
--admin-user="admin2" \
|
||||
--admin-password="ChangeMe_12345" \
|
||||
--admin-email="{{ users.administrator.email }}" \
|
||||
--admin-firstname="Admin" \
|
||||
--admin-lastname="User"
|
||||
```
|
@@ -1,30 +0,0 @@
|
||||
# ⚙️ Installation & First Run
|
||||
|
||||
## 1) Prepare DNS & Ports
|
||||
Ensure a canonical domain is mapped (e.g. `shop.{{ PRIMARY_DOMAIN }}`) and a free localhost port in `group_vars/all/10_ports.yml`:
|
||||
```
|
||||
web-app-magento: 80xx
|
||||
```
|
||||
|
||||
## 2) Seed Credentials
|
||||
Provide (at minimum) an admin password in your inventory (vault recommended):
|
||||
```yaml
|
||||
applications:
|
||||
web-app-magento:
|
||||
credentials:
|
||||
admin_password: "use-a-strong-secret"
|
||||
```
|
||||
The admin username/email are taken from `users.administrator.*`.
|
||||
|
||||
## 3) Deploy
|
||||
Run the Infinito.Nexus playbook for your host(s). The role will:
|
||||
- Start OpenSearch (single node)
|
||||
- Start MariaDB (if `central_database` is disabled, the app-local DB is used instead)
|
||||
- Start Magento application container
|
||||
- Wire environment via `templates/env.j2`
|
||||
|
||||
## 4) Verify
|
||||
Open your domain (e.g. `https://shop.{{ PRIMARY_DOMAIN }}`) and complete any remaining onboarding steps in the admin panel.
|
||||
|
||||
**Admin Panel:** `{{ domains | get_url('web-app-magento', WEB_PROTOCOL) }}/admin`
|
||||
(Default path can vary; set a custom `ADMINURI` later via `bin/magento setup:config:set` if desired.)
|
2
roles/web-app-magento/TODO.md
Normal file
2
roles/web-app-magento/TODO.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# To-dos
|
||||
- Finish implementation
|
@@ -1,19 +0,0 @@
|
||||
# 🔼 Upgrade
|
||||
|
||||
> Always back up the database and the `magento_data` volume before upgrades.
|
||||
|
||||
1. Update images/versions in the application config (`roles/web-app-magento/config/main.yml` or inventory overrides).
|
||||
2. Recreate containers:
|
||||
```bash
|
||||
cd {{ PATH_DOCKER_COMPOSE_INSTANCES }}magento/
|
||||
docker compose pull
|
||||
docker compose up -d --remove-orphans
|
||||
```
|
||||
3. Run upgrade routines:
|
||||
```bash
|
||||
docker compose exec -it application bin/magento maintenance:enable
|
||||
docker compose exec -it application bin/magento setup:upgrade
|
||||
docker compose exec -it application bin/magento setup:di:compile
|
||||
docker compose exec -it application bin/magento cache:flush
|
||||
docker compose exec -it application bin/magento maintenance:disable
|
||||
```
|
@@ -1,15 +0,0 @@
|
||||
# 👤 User Administration
|
||||
|
||||
- Access the admin panel at:
|
||||
`{{ domains | get_url('web-app-magento', WEB_PROTOCOL) }}/admin`
|
||||
*(or your custom admin path if configured)*
|
||||
|
||||
- New admin accounts can be created via the web UI or CLI:
|
||||
```bash
|
||||
docker compose exec -it application bin/magento admin:user:create \
|
||||
--admin-user="john" \
|
||||
--admin-password="SuperSecret_12345" \
|
||||
--admin-email="john@example.com" \
|
||||
--admin-firstname="John" \
|
||||
--admin-lastname="Doe"
|
||||
```
|
@@ -2,7 +2,7 @@ features:
|
||||
matomo: true
|
||||
css: true
|
||||
desktop: true
|
||||
central_database: true # Use platform DB (recommended). If false, app-local DB container is enabled.
|
||||
central_database: false # Impossible to use central database due to strict database checking
|
||||
oidc: false # Magento SSO via OIDC requires extensions; not wired by default
|
||||
logout: true
|
||||
ldap: false
|
||||
@@ -15,23 +15,29 @@ server:
|
||||
- "shop.{{ PRIMARY_DOMAIN }}"
|
||||
aliases:
|
||||
- "magento.{{ PRIMARY_DOMAIN }}"
|
||||
|
||||
docker:
|
||||
services:
|
||||
application:
|
||||
image: "bitnami/magento"
|
||||
version: "latest"
|
||||
name: "magento"
|
||||
php:
|
||||
image: "markoshust/magento-php"
|
||||
version: "8.2-fpm"
|
||||
name: "magento-php"
|
||||
backup:
|
||||
no_stop_required: true
|
||||
nginx:
|
||||
image: "markoshust/magento-nginx"
|
||||
version: "latest"
|
||||
name: "magento-nginx"
|
||||
backup:
|
||||
no_stop_required: true
|
||||
database:
|
||||
enabled: true # Central DB recommended; if disabled, app-local DB is created
|
||||
enabled: true
|
||||
version: "11.4"
|
||||
redis:
|
||||
enabled: true
|
||||
enabled: true
|
||||
search:
|
||||
enabled: true
|
||||
image: "opensearchproject/opensearch"
|
||||
version: "latest"
|
||||
name: "magento-opensearch"
|
||||
enabled: true
|
||||
image: "opensearchproject/opensearch"
|
||||
version: "latest"
|
||||
name: "magento-opensearch"
|
||||
volumes:
|
||||
data: "magento_data"
|
||||
data: "magento_data"
|
||||
|
@@ -1,6 +1,7 @@
|
||||
credentials:
|
||||
admin_password:
|
||||
description: "Password for the initial Magento admin account"
|
||||
algorithm: "alphanumeric"
|
||||
validation:
|
||||
min_length: 12
|
||||
adobe_public_key:
|
||||
description: "Adobe/Magento Marketplace Public Key"
|
||||
algorithm: "plain"
|
||||
adobe_private_key:
|
||||
description: "Adobe/Magento Marketplace Private Key"
|
||||
algorithm: "plain"
|
||||
|
@@ -1,6 +1,51 @@
|
||||
- name: "load docker, db/redis/proxy for {{ application_id }}"
|
||||
include_role:
|
||||
name: sys-stk-full-stateful
|
||||
vars:
|
||||
docker_compose_flush_handlers: true
|
||||
|
||||
- name: flush docker service
|
||||
meta: flush_handlers
|
||||
- name: "Bootstrap Magento 2.4.8 source (exact working variant)"
|
||||
command: >
|
||||
docker exec
|
||||
-e COMPOSER_AUTH='{"http-basic":{"repo.magento.com":{"username":"{{ MAGENTO_REPO_PUBLIC_KEY }}","password":"{{ MAGENTO_REPO_PRIVATE_KEY }}"}}}'
|
||||
-e COMPOSER_HOME=/tmp/composer
|
||||
-e COMPOSER_CACHE_DIR=/tmp/composer/cache
|
||||
--user {{ MAGENTO_USER }}
|
||||
{{ MAGENTO_PHP_CONTAINER }} bash -lc 'set -e
|
||||
mkdir -p /tmp/composer/cache
|
||||
cd /var/www/html
|
||||
composer create-project --no-interaction --no-progress --repository-url=https://repo.magento.com/ magento/project-community-edition=2.4.8 .
|
||||
mkdir -p var pub/static pub/media app/etc
|
||||
chmod -R 775 var pub/static pub/media app/etc
|
||||
'
|
||||
args:
|
||||
creates: "{{ [ (MAGENTO_VOLUME | docker_volume_path), 'bin/magento' ] | path_join }}"
|
||||
|
||||
- name: "Run Magento setup:install (in container)"
|
||||
command: >
|
||||
docker exec --user {{ MAGENTO_USER }} {{ MAGENTO_PHP_CONTAINER }} bash -lc "
|
||||
cd /var/www/html && bin/magento setup:install \
|
||||
--base-url='{{ MAGENTO_URL }}/' \
|
||||
--db-host=\"$MYSQL_HOST\" \
|
||||
--db-name=\"$MYSQL_DATABASE\" \
|
||||
--db-user=\"$MYSQL_USER\" \
|
||||
--db-password=\"$MYSQL_PASSWORD\" \
|
||||
--skip-db-validation \
|
||||
--db-engine=mysql \
|
||||
--search-engine='opensearch' \
|
||||
--opensearch-host=\"$OPENSEARCH_HOST\" \
|
||||
--opensearch-port=\"$OPENSEARCH_PORT_NUMBER\" \
|
||||
--admin-firstname=\"$MAGENTO_ADMIN_FIRSTNAME\" \
|
||||
--admin-lastname=\"$MAGENTO_ADMIN_LASTNAME\" \
|
||||
--admin-email=\"$MAGENTO_ADMIN_EMAIL\" \
|
||||
--admin-user=\"$MAGENTO_ADMIN_USERNAME\" \
|
||||
--admin-password=\"$MAGENTO_ADMIN_PASSWORD\""
|
||||
args:
|
||||
creates: "{{ [ (MAGENTO_VOLUME | docker_volume_path), 'app/etc/env.php' ] | path_join }}"
|
||||
register: magento_install
|
||||
changed_when: >
|
||||
(magento_install.stdout is defined and
|
||||
('Magento installation complete' in magento_install.stdout
|
||||
or 'successfully installed' in magento_install.stdout))
|
||||
|
||||
- include_tasks: utils/run_once.yml
|
||||
|
35
roles/web-app-magento/tasks/02_runtime_conf.yml
Normal file
35
roles/web-app-magento/tasks/02_runtime_conf.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Assert required vars (no defaults anywhere)
|
||||
assert:
|
||||
that:
|
||||
- MAGENTO_DOMAIN is defined and MAGENTO_DOMAIN | length > 0
|
||||
- MAGENTO_NGINX_PORT is defined
|
||||
- MAGENTO_PHP_HOST is defined and MAGENTO_PHP_HOST | length > 0
|
||||
- MAGENTO_PHP_PORT is defined
|
||||
- docker_compose.directories.config is defined and docker_compose.directories.config | length > 0
|
||||
fail_msg: "Missing one of: MAGENTO_DOMAIN, MAGENTO_NGINX_PORT, MAGENTO_PHP_HOST, MAGENTO_PHP_PORT, docker_compose.directories.config"
|
||||
|
||||
- name: Ensure subdirs exist (config root exists already)
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
loop:
|
||||
- "{{ MAGENTO_NGINX_DIR }}"
|
||||
- "{{ MAGENTO_PHP_DIR }}"
|
||||
|
||||
- name: Render nginx main config (no TLS; single source of truth)
|
||||
template:
|
||||
src: "nginx.conf.j2"
|
||||
dest: "{{ MAGENTO_NGINX_CONF_PATH }}"
|
||||
mode: '0644'
|
||||
force: true
|
||||
notify: docker compose up
|
||||
|
||||
- name: Render php-fpm pool override (TCP listen; clear_env=no)
|
||||
template:
|
||||
src: "php-fpm-zz-docker.conf.j2"
|
||||
dest: "{{ MAGENTO_PHP_ZZ_CONF_PATH }}"
|
||||
mode: '0644'
|
||||
force: true
|
||||
notify: docker compose up
|
@@ -2,8 +2,3 @@
|
||||
- name: "construct {{ role_name }}"
|
||||
include_tasks: 01_core.yml
|
||||
when: run_once_web_app_magento is not defined
|
||||
|
||||
- name: run the docker magento tasks once
|
||||
set_fact:
|
||||
run_once_web_app_magento: true
|
||||
when: run_once_web_app_magento is not defined
|
||||
|
@@ -1,24 +1,40 @@
|
||||
{% include 'roles/docker-compose/templates/base.yml.j2' %}
|
||||
|
||||
application:
|
||||
{% set container_port = 8080 %}
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: "{{ magento_image }}:{{ magento_version }}"
|
||||
container_name: "{{ magento_name }}"
|
||||
nginx:
|
||||
{% set container_port = 8000 %}
|
||||
image: "{{ MAGENTO_NGINX_IMAGE }}:{{ MAGENTO_NGINX_VERSION }}"
|
||||
container_name: "{{ MAGENTO_NGINX_CONTAINER }}"
|
||||
environment:
|
||||
PHP_HOST: "php"
|
||||
PHP_PORT: "9000"
|
||||
depends_on:
|
||||
- php
|
||||
- search
|
||||
volumes:
|
||||
- "data:/var/www/html"
|
||||
ports:
|
||||
- "127.0.0.1:{{ ports.localhost.http[application_id] }}:{{ container_port }}"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nginx -t >/dev/null 2>&1 && { grep -q ':1F40' /proc/net/tcp || grep -q ':1F40' /proc/net/tcp6; }"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
php:
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
image: "{{ MAGENTO_PHP_IMAGE }}:{{ MAGENTO_PHP_VERSION }}"
|
||||
container_name: "{{ MAGENTO_PHP_CONTAINER }}"
|
||||
volumes:
|
||||
- "data:/bitnami/magento"
|
||||
{% include 'roles/docker-container/templates/depends_on/dmbs_excl.yml.j2' %}
|
||||
depends_on:
|
||||
- search
|
||||
{% include 'roles/docker-container/templates/healthcheck/tcp.yml.j2' %}
|
||||
- "data:/var/www/html"
|
||||
{% include 'roles/docker-container/templates/depends_on/dmbs_incl.yml.j2' %}
|
||||
search:
|
||||
condition: service_started
|
||||
{% include 'roles/docker-container/templates/networks.yml.j2' %}
|
||||
|
||||
search:
|
||||
{% set container_port = 9200 %}
|
||||
image: "{{ magento_search_image }}:{{ magento_search_version }}"
|
||||
container_name: "{{ magento_search_name }}"
|
||||
image: "{{ MAGENTO_SEARCH_IMAGE }}:{{ MAGENTO_SEARCH_VERSION }}"
|
||||
container_name: "{{ MAGENTO_SEARCH_CONTAINER }}"
|
||||
{% include 'roles/docker-container/templates/base.yml.j2' %}
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
@@ -29,6 +45,7 @@
|
||||
|
||||
{% include 'roles/docker-compose/templates/volumes.yml.j2' %}
|
||||
data:
|
||||
name: {{ magento_data }}
|
||||
name: {{ MAGENTO_VOLUME }}
|
||||
|
||||
{% include 'roles/docker-compose/templates/networks.yml.j2' %}
|
||||
|
||||
|
@@ -3,30 +3,32 @@
|
||||
# - https://github.com/bitnami/containers/tree/main/bitnami/magento
|
||||
|
||||
# Host & URLs
|
||||
MAGENTO_HOST="{{ domains | get_domain(application_id) }}"
|
||||
MAGENTO_BASE_URL="{{ domains | get_url(application_id, WEB_PROTOCOL) }}/"
|
||||
MAGENTO_ENABLE_HTTPS={{ (WEB_PORT == 443) | string | lower }}
|
||||
MAGENTO_URL="{{ MAGENTO_URL }}"
|
||||
MAGENTO_BACKEND_FRONTNAME="admin"
|
||||
MAGENTO_USE_SECURE={{ (WEB_PORT == 443) | ternary('1','0') }}
|
||||
MAGENTO_BASE_URL_SECURE={{ (WEB_PORT == 443) | ternary('1','0') }}
|
||||
MAGENTO_USE_SECURE_ADMIN={{ (WEB_PORT == 443) | ternary('1','0') }}
|
||||
|
||||
# Admin (seed from global administrator)
|
||||
MAGENTO_USERNAME="{{ users.administrator.username }}"
|
||||
MAGENTO_PASSWORD="{{ applications | get_app_conf(application_id, 'credentials.admin_password') }}"
|
||||
MAGENTO_EMAIL="{{ users.administrator.email }}"
|
||||
MAGENTO_FIRST_NAME="{{ users.administrator.firstname | default('Admin') }}"
|
||||
MAGENTO_LAST_NAME="{{ users.administrator.lastname | default('User') }}"
|
||||
MAGENTO_ADMIN_USERNAME="{{ users.administrator.username }}"
|
||||
MAGENTO_ADMIN_PASSWORD="{{ users.administrator.password }}"
|
||||
MAGENTO_ADMIN_EMAIL="{{ users.administrator.email }}"
|
||||
MAGENTO_ADMIN_FIRSTNAME="{{ users.administrator.firstname | default('Admin') }}"
|
||||
MAGENTO_ADMIN_LASTNAME="{{ users.administrator.lastname | default('User') }}"
|
||||
|
||||
# Database (central DB preferred)
|
||||
MARIADB_HOST="{{ database_host }}"
|
||||
MARIADB_PORT_NUMBER="{{ database_port }}"
|
||||
MAGENTO_DATABASE_USER="{{ database_username }}"
|
||||
MAGENTO_DATABASE_PASSWORD="{{ database_password }}"
|
||||
MAGENTO_DATABASE_NAME="{{ database_name }}"
|
||||
MYSQL_HOST="{{ database_host }}"
|
||||
MYSQL_PORT="{{ database_port }}"
|
||||
MYSQL_USER="{{ database_username }}"
|
||||
MYSQL_PASSWORD="{{ database_password }}"
|
||||
MYSQL_DATABASE="{{ database_name }}"
|
||||
|
||||
# Search (Magento 2.4+)
|
||||
MAGENTO_SEARCH_ENGINE="opensearch"
|
||||
OPENSEARCH_HOST="search"
|
||||
OPENSEARCH_PORT_NUMBER="9200"
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD="{{ users.administrator.password }}"
|
||||
|
||||
# SMTP
|
||||
# SMTP (post-install you’ll wire these in Magento admin or env.php)
|
||||
SMTP_HOST="{{ SYSTEM_EMAIL.HOST }}"
|
||||
SMTP_PORT="{{ SYSTEM_EMAIL.PORT }}"
|
||||
SMTP_USER="{{ users['no-reply'].email }}"
|
||||
@@ -34,6 +36,6 @@ SMTP_PASSWORD="{{ users['no-reply'].mailu_token }}"
|
||||
SMTP_PROTOCOL={{ SYSTEM_EMAIL.TLS | ternary('tls','ssl') }}
|
||||
|
||||
# Misc
|
||||
ALLOW_EMPTY_PASSWORD="no"
|
||||
BITNAMI_DEBUG="false"
|
||||
PHP_MEMORY_LIMIT="1024M"
|
||||
PHP_MEMORY_LIMIT="768M"
|
||||
|
||||
APACHE_SERVERNAME={{ MAGENTO_DOMAIN }}
|
47
roles/web-app-magento/templates/nginx.conf.j2
Normal file
47
roles/web-app-magento/templates/nginx.conf.j2
Normal file
@@ -0,0 +1,47 @@
|
||||
worker_processes auto;
|
||||
|
||||
events { worker_connections 1024; }
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
sendfile on;
|
||||
keepalive_timeout 65;
|
||||
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stderr;
|
||||
|
||||
upstream fastcgi_backend {
|
||||
server {{ MAGENTO_PHP_HOST }}:{{ MAGENTO_PHP_PORT }};
|
||||
}
|
||||
|
||||
server {
|
||||
listen {{ MAGENTO_NGINX_PORT }};
|
||||
server_name {{ MAGENTO_DOMAIN }};
|
||||
|
||||
set $MAGE_ROOT /var/www/html;
|
||||
root $MAGE_ROOT/pub;
|
||||
index index.php;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ /index.php?$args;
|
||||
}
|
||||
|
||||
location ~ \.php$ {
|
||||
fastcgi_split_path_info ^(.+\.php)(/.+)$;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||
fastcgi_index index.php;
|
||||
fastcgi_pass fastcgi_backend;
|
||||
fastcgi_read_timeout 300;
|
||||
fastcgi_connect_timeout 5s;
|
||||
}
|
||||
|
||||
location ~* ^/(app|var|lib|dev|update|vendor|node_modules|\.git|\.svn)/ { deny all; }
|
||||
location ~ /\. { deny all; }
|
||||
|
||||
error_page 404 403 = /errors/404.php;
|
||||
}
|
||||
}
|
15
roles/web-app-magento/templates/php-fpm-zz-docker.conf.j2
Normal file
15
roles/web-app-magento/templates/php-fpm-zz-docker.conf.j2
Normal file
@@ -0,0 +1,15 @@
|
||||
[global]
|
||||
error_log = /proc/self/fd/2
|
||||
|
||||
[www]
|
||||
listen = 0.0.0.0:{{ MAGENTO_PHP_PORT }}
|
||||
clear_env = no
|
||||
|
||||
pm = dynamic
|
||||
pm.max_children = 10
|
||||
pm.start_servers = 2
|
||||
pm.min_spare_servers = 1
|
||||
pm.max_spare_servers = 5
|
||||
|
||||
access.log = /proc/self/fd/2
|
||||
catch_workers_output = yes
|
@@ -3,15 +3,43 @@ application_id: "web-app-magento"
|
||||
database_type: "mariadb"
|
||||
|
||||
# Magento (application container)
|
||||
magento_version: "{{ applications | get_app_conf(application_id, 'docker.services.application.version', True) | default('latest') }}"
|
||||
magento_image: "{{ applications | get_app_conf(application_id, 'docker.services.application.image', True) | default('bitnami/magento') }}"
|
||||
magento_name: "{{ applications | get_app_conf(application_id, 'docker.services.application.name', True) | default('magento') }}"
|
||||
magento_data: "{{ applications | get_app_conf(application_id, 'docker.volumes.data', True) | default('magento_data') }}"
|
||||
|
||||
# Search (OpenSearch)
|
||||
magento_search_version: "{{ applications | get_app_conf(application_id, 'docker.services.search.version', True) | default('latest') }}"
|
||||
magento_search_image: "{{ applications | get_app_conf(application_id, 'docker.services.search.image', True) | default('opensearchproject/opensearch') }}"
|
||||
magento_search_name: "{{ applications | get_app_conf(application_id, 'docker.services.search.name', True) | default('magento-opensearch') }}"
|
||||
## Meta
|
||||
MAGENTO_VOLUME: "{{ applications | get_app_conf(application_id, 'docker.volumes.data') }}"
|
||||
MAGENTO_USER: "app"
|
||||
|
||||
# Docker helpers
|
||||
docker_compose_flush_handlers: true
|
||||
## Address
|
||||
MAGENTO_DOMAIN: "{{ domains | get_domain(application_id) }}"
|
||||
MAGENTO_URL: "{{ domains | get_url(application_id, WEB_PROTOCOL) }}"
|
||||
|
||||
## Runtime config paths (used by 02_runtime_conf.yml)
|
||||
# Root config directory comes from the docker_compose subsystem (already asserted)
|
||||
MAGENTO_NGINX_DIR: "{{ [ docker_compose.directories.config, 'nginx' ] | path_join }}"
|
||||
MAGENTO_PHP_DIR: "{{ [ docker_compose.directories.config, 'php' ] | path_join }}"
|
||||
MAGENTO_NGINX_CONF_PATH: "{{ [ MAGENTO_NGINX_DIR, 'nginx.conf' ] | path_join }}"
|
||||
MAGENTO_PHP_ZZ_CONF_PATH: "{{ [ MAGENTO_PHP_DIR, 'php-fpm-zz-docker.conf' ] | path_join }}"
|
||||
|
||||
## Intra-container wiring (nginx -> php-fpm) and listen port
|
||||
# These values mirror the docker-compose template (environment variables and exposed port)
|
||||
MAGENTO_PHP_HOST: "php"
|
||||
MAGENTO_PHP_PORT: 9000
|
||||
MAGENTO_NGINX_PORT: 8000
|
||||
|
||||
## Search (OpenSearch)
|
||||
MAGENTO_SEARCH_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.search.version') }}"
|
||||
MAGENTO_SEARCH_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.search.image') }}"
|
||||
MAGENTO_SEARCH_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.search.name') }}"
|
||||
|
||||
## PHP
|
||||
MAGENTO_PHP_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.php.version') }}"
|
||||
MAGENTO_PHP_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.php.image') }}"
|
||||
MAGENTO_PHP_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.php.name') }}"
|
||||
|
||||
## NGINX
|
||||
MAGENTO_NGINX_VERSION: "{{ applications | get_app_conf(application_id, 'docker.services.nginx.version') }}"
|
||||
MAGENTO_NGINX_IMAGE: "{{ applications | get_app_conf(application_id, 'docker.services.nginx.image') }}"
|
||||
MAGENTO_NGINX_CONTAINER: "{{ applications | get_app_conf(application_id, 'docker.services.nginx.name') }}"
|
||||
|
||||
## Credentials
|
||||
MAGENTO_REPO_PUBLIC_KEY: "{{ applications | get_app_conf(application_id, 'credentials.adobe_public_key') }}"
|
||||
MAGENTO_REPO_PRIVATE_KEY: "{{ applications | get_app_conf(application_id, 'credentials.adobe_private_key') }}"
|
||||
|
@@ -6,7 +6,13 @@
|
||||
LOCAL_DOMAIN={{ domains | get_domain(application_id) }}
|
||||
ALTERNATE_DOMAINS="{{ domains['web-app-mastodon'][1:] | join(',') }}"
|
||||
SINGLE_USER_MODE={{ applications | get_app_conf(application_id, 'single_user_mode') }}
|
||||
ALLOWED_PRIVATE_ADDRESSES={{ MASTODON_ALLOWED_PRIVATE_ADDRESSES }}
|
||||
ALLOWED_PRIVATE_ADDRESSES="{{ MASTODON_ALLOWED_PRIVATE_ADDRESSES }}"
|
||||
|
||||
# Debug
|
||||
{% if MODE_DEBUG | bool %}
|
||||
RAILS_LOG_LEVEL=debug
|
||||
DEBUG=*
|
||||
{% endif %}
|
||||
|
||||
# Credentials
|
||||
|
||||
|
@@ -67,7 +67,6 @@
|
||||
notify: docker compose up
|
||||
|
||||
# Pull image when update is wished.
|
||||
# @todo This should be moved to update-docker
|
||||
- name: docker compose pull
|
||||
command:
|
||||
cmd: docker-compose -p "{{ MATRIX_PROJECT }}" pull
|
||||
|
@@ -1,4 +1,4 @@
|
||||
- name: "include role for {{ application_id }} to receive certs & do modification routines for '{{ domain }}:{{ port }}'"
|
||||
- name: "include role for {{ application_id }} to receive certs & do modification routines for '{{ NEXTCLOUD_DOMAIN }}:{{ NEXTCLOUD_PORT }}'"
|
||||
include_role:
|
||||
name: sys-util-csp-cert
|
||||
|
||||
|
@@ -28,8 +28,8 @@ class TestTreeIncludeRoleDependencies(unittest.TestCase):
|
||||
"sys-ctl-hlth-csp",
|
||||
"svc-db-postgres",
|
||||
"svc-db-mysql",
|
||||
"axb", # für a{{database_type}}b → a*b
|
||||
"ayyb", # für a{{database_type}}b → a*b
|
||||
"axb", # für a{{ database_type }}b → a*b
|
||||
"ayyb", # für a{{ database_type }}b → a*b
|
||||
"literal-role", # für reinen Literalnamen
|
||||
]
|
||||
for r in self.roles_to_create:
|
||||
@@ -46,15 +46,15 @@ class TestTreeIncludeRoleDependencies(unittest.TestCase):
|
||||
|
||||
- name: Pattern with literal + var suffix
|
||||
include_role:
|
||||
name: "svc-db-{{database_type}}"
|
||||
name: "svc-db-{{ database_type }}"
|
||||
|
||||
- name: Pattern with literal prefix/suffix around var
|
||||
include_role:
|
||||
name: "a{{database_type}}b"
|
||||
name: "a{{ database_type }}b"
|
||||
|
||||
- name: Pure variable only (should be ignored)
|
||||
include_role:
|
||||
name: "{{database_type}}"
|
||||
name: "{{ database_type }}"
|
||||
|
||||
- name: Pure literal include
|
||||
include_role:
|
||||
@@ -115,10 +115,10 @@ class TestTreeIncludeRoleDependencies(unittest.TestCase):
|
||||
expected = sorted([
|
||||
"sys-ctl-hlth-webserver", # aus loop
|
||||
"sys-ctl-hlth-csp", # aus loop
|
||||
"svc-db-postgres", # aus svc-db-{{database_type}}
|
||||
"svc-db-mysql", # aus svc-db-{{database_type}}
|
||||
"axb", # aus a{{database_type}}b
|
||||
"ayyb", # aus a{{database_type}}b
|
||||
"svc-db-postgres", # aus svc-db-{{ database_type }}
|
||||
"svc-db-mysql", # aus svc-db-{{ database_type }}
|
||||
"axb", # aus a{{ database_type }}b
|
||||
"ayyb", # aus a{{ database_type }}b
|
||||
"literal-role", # reiner Literalname
|
||||
])
|
||||
|
||||
@@ -129,8 +129,8 @@ class TestTreeIncludeRoleDependencies(unittest.TestCase):
|
||||
)
|
||||
self.assertEqual(deps, expected, "include_role dependencies mismatch")
|
||||
|
||||
# Sicherstellen, dass der pure Variable-Name "{{database_type}}" NICHT aufgenommen wurde
|
||||
self.assertNotIn("{{database_type}}", deps, "pure variable include should be ignored")
|
||||
# Sicherstellen, dass der pure Variable-Name "{{ database_type }}" NICHT aufgenommen wurde
|
||||
self.assertNotIn("{{ database_type }}", deps, "pure variable include should be ignored")
|
||||
|
||||
# Sicherstellen, dass im Original-meta der Producer-Role nichts geschrieben wurde
|
||||
original_tree_path = os.path.join(self.producer_path, "meta", "tree.json")
|
||||
|
Reference in New Issue
Block a user