Compare commits
10 Commits
ebeb6d5c6b
...
1cdad04a93
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1cdad04a93 | ||
|
|
1349ce9c19 | ||
|
|
10f4eb5817 | ||
|
|
c197f28013 | ||
|
|
b2a3ae6783 | ||
|
|
10e58eb990 | ||
|
|
ba94509bca | ||
|
|
787c171f65 | ||
|
|
d8eb53f096 | ||
|
|
150a032988 |
@ -1,5 +1,4 @@
|
|||||||
[defaults]
|
[defaults]
|
||||||
interpreter_python = /usr/bin/python3
|
|
||||||
roles_path = ./roles
|
roles_path = ./roles
|
||||||
inventory = inventory/hosts.yml
|
inventory = inventory/hosts.yml
|
||||||
remote_tmp = /tmp/.ansible-${USER}
|
remote_tmp = /tmp/.ansible-${USER}
|
||||||
|
|||||||
@ -14,6 +14,10 @@ network_interfaces:
|
|||||||
- name: lan1
|
- name: lan1
|
||||||
type: ethernet
|
type: ethernet
|
||||||
mac_address: 0a:3f:5b:1c:d2:e4
|
mac_address: 0a:3f:5b:1c:d2:e4
|
||||||
|
- name: podman-gw
|
||||||
|
type: bridge
|
||||||
|
ipv4:
|
||||||
|
address: "{{ podman_gw_gateway }}/10"
|
||||||
|
|
||||||
# NTP servers configuration
|
# NTP servers configuration
|
||||||
# -------------------------
|
# -------------------------
|
||||||
@ -115,24 +119,24 @@ nfs_bind_addresses:
|
|||||||
|
|
||||||
# Podman configuration
|
# Podman configuration
|
||||||
# --------------------
|
# --------------------
|
||||||
podman_external_networks:
|
podman_gw_gateway: 100.64.0.1
|
||||||
- name: immich
|
podman_gw_subnet: 100.64.0.0/10
|
||||||
subnet: 172.20.0.0/16
|
|
||||||
gateway: 172.20.0.1
|
|
||||||
|
|
||||||
# PostgreSQL configuration
|
# PostgreSQL configuration
|
||||||
# ------------------------
|
# ------------------------
|
||||||
postgres_admin_password: "{{ vault_postgres_admin_password }}"
|
postgres_admin_password: "{{ vault_postgres_admin_password }}"
|
||||||
postgres_bind: "127.0.0.1"
|
postgres_bind: "127.0.0.1,{{ podman_gw_gateway }}" # Comma-separated for PostgreSQL
|
||||||
postgres_firewall_allowed_sources:
|
postgres_firewall_allowed_sources:
|
||||||
- 127.0.0.0/8
|
- 127.0.0.0/8
|
||||||
|
- "{{ podman_gw_subnet }}"
|
||||||
|
|
||||||
# Valkey configuration
|
# Valkey configuration
|
||||||
# --------------------
|
# --------------------
|
||||||
valkey_admin_password: "{{ vault_valkey_admin_password }}"
|
valkey_admin_password: "{{ vault_valkey_admin_password }}"
|
||||||
valkey_bind: "127.0.0.1"
|
valkey_bind: "127.0.0.1 {{ podman_gw_gateway }}" # Space-separated for Valkey
|
||||||
valkey_firewall_allowed_sources:
|
valkey_firewall_allowed_sources:
|
||||||
- 127.0.0.0/8
|
- 127.0.0.0/8
|
||||||
|
- "{{ podman_gw_subnet }}"
|
||||||
|
|
||||||
# Valkey ACL users
|
# Valkey ACL users
|
||||||
valkey_acl_users:
|
valkey_acl_users:
|
||||||
|
|||||||
@ -12,3 +12,4 @@ all:
|
|||||||
ansible_user: jgarcia
|
ansible_user: jgarcia
|
||||||
ansible_become: true
|
ansible_become: true
|
||||||
ansible_become_method: sudo
|
ansible_become_method: sudo
|
||||||
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
|
|||||||
16
playbook.yml
16
playbook.yml
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
# - hosts: all
|
|
||||||
# become: true
|
|
||||||
# roles:
|
|
||||||
# - role: networking
|
|
||||||
# - role: sshd
|
|
||||||
# - role: disks
|
|
||||||
# - role: wireguard
|
|
||||||
# - role: zsh
|
|
||||||
# - role: archlinux
|
|
||||||
# - role: podman
|
|
||||||
|
|
||||||
- hosts: pinwheel
|
|
||||||
become: true
|
|
||||||
roles:
|
|
||||||
- role: sshd
|
|
||||||
33
roles/gitea/defaults/main.yml
Normal file
33
roles/gitea/defaults/main.yml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
# Gitea version
|
||||||
|
gitea_version: "1.22"
|
||||||
|
|
||||||
|
# Network configuration
|
||||||
|
gitea_port: 3000
|
||||||
|
|
||||||
|
# Container image
|
||||||
|
gitea_image: gitea/gitea
|
||||||
|
|
||||||
|
# Data directory
|
||||||
|
gitea_data_dir: "{{ podman_projects_dir }}/gitea/data"
|
||||||
|
|
||||||
|
# Database configuration (PostgreSQL)
|
||||||
|
gitea_postgres_db_name: gitea
|
||||||
|
gitea_postgres_user: gitea
|
||||||
|
gitea_postgres_schema: gitea
|
||||||
|
# gitea_postgres_password: "" # Required - set in inventory
|
||||||
|
|
||||||
|
# Application configuration
|
||||||
|
gitea_app_name: "Gitea"
|
||||||
|
gitea_domain: git.nas.local
|
||||||
|
gitea_root_url: "https://{{ gitea_domain }}"
|
||||||
|
|
||||||
|
# Disable SSH (HTTPS only for Git operations)
|
||||||
|
gitea_disable_ssh: true
|
||||||
|
|
||||||
|
# Disable registration
|
||||||
|
gitea_disable_registration: false
|
||||||
|
|
||||||
|
# Nginx reverse proxy configuration
|
||||||
|
gitea_nginx_enabled: true
|
||||||
|
gitea_nginx_hostname: "{{ gitea_domain }}"
|
||||||
15
roles/gitea/handlers/main.yml
Normal file
15
roles/gitea/handlers/main.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- name: Reload systemd
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Restart gitea
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: gitea
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Reload nginx
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nginx
|
||||||
|
state: reloaded
|
||||||
4
roles/gitea/meta/main.yml
Normal file
4
roles/gitea/meta/main.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: podman
|
||||||
|
- role: postgres
|
||||||
116
roles/gitea/tasks/main.yml
Normal file
116
roles/gitea/tasks/main.yml
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
---
|
||||||
|
- name: Validate required passwords are set
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- gitea_postgres_password is defined
|
||||||
|
- gitea_postgres_password | length >= 12
|
||||||
|
fail_msg: |
|
||||||
|
gitea_postgres_password is required (min 12 chars).
|
||||||
|
See roles/gitea/defaults/main.yml for configuration instructions.
|
||||||
|
success_msg: "Password validation passed"
|
||||||
|
|
||||||
|
- name: Create PostgreSQL user for Gitea
|
||||||
|
community.postgresql.postgresql_user:
|
||||||
|
name: "{{ gitea_postgres_user }}"
|
||||||
|
password: "{{ gitea_postgres_password }}"
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user }}"
|
||||||
|
|
||||||
|
- name: Create PostgreSQL database for Gitea
|
||||||
|
community.postgresql.postgresql_db:
|
||||||
|
name: "{{ gitea_postgres_db_name }}"
|
||||||
|
owner: "{{ gitea_postgres_user }}"
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user }}"
|
||||||
|
|
||||||
|
- name: Grant all privileges on database to Gitea user
|
||||||
|
community.postgresql.postgresql_privs:
|
||||||
|
login_db: "{{ gitea_postgres_db_name }}"
|
||||||
|
roles: "{{ gitea_postgres_user }}"
|
||||||
|
type: database
|
||||||
|
privs: ALL
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user }}"
|
||||||
|
|
||||||
|
- name: Ensure Gitea user has no superuser privileges
|
||||||
|
community.postgresql.postgresql_user:
|
||||||
|
name: "{{ gitea_postgres_user }}"
|
||||||
|
role_attr_flags: NOSUPERUSER,NOCREATEDB,NOCREATEROLE
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
|
- name: Create PostgreSQL schema for Gitea
|
||||||
|
community.postgresql.postgresql_schema:
|
||||||
|
name: "{{ gitea_postgres_schema }}"
|
||||||
|
database: "{{ gitea_postgres_db_name }}"
|
||||||
|
owner: "{{ gitea_postgres_user }}"
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
|
- name: Grant schema permissions to Gitea user
|
||||||
|
community.postgresql.postgresql_privs:
|
||||||
|
login_db: "{{ gitea_postgres_db_name }}"
|
||||||
|
roles: "{{ gitea_postgres_user }}"
|
||||||
|
type: schema
|
||||||
|
objs: "{{ gitea_postgres_schema }}"
|
||||||
|
privs: CREATE,USAGE
|
||||||
|
state: present
|
||||||
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
|
- name: Create Gitea project directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ podman_projects_dir | default('/opt/podman') }}/gitea"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create Gitea data directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ gitea_data_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Deploy Kubernetes YAML for Gitea
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: gitea.yaml.j2
|
||||||
|
dest: "{{ podman_projects_dir | default('/opt/podman') }}/gitea/gitea.yaml"
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Restart gitea
|
||||||
|
|
||||||
|
- name: Create systemd service for Gitea
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: gitea.service.j2
|
||||||
|
dest: /etc/systemd/system/gitea.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: Reload systemd
|
||||||
|
|
||||||
|
- name: Enable and start Gitea service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: gitea
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Deploy nginx vhost configuration for Gitea
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx-vhost.conf.j2
|
||||||
|
dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/gitea.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: gitea_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
|
|
||||||
|
- name: Remove nginx vhost configuration for Gitea
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/gitea.conf"
|
||||||
|
state: absent
|
||||||
|
when: not gitea_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
18
roles/gitea/templates/gitea.service.j2
Normal file
18
roles/gitea/templates/gitea.service.j2
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Gitea Git Service
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
User={{ ansible_user }}
|
||||||
|
Group={{ ansible_user }}
|
||||||
|
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/gitea
|
||||||
|
ExecStart=/usr/bin/podman play kube --replace gitea.yaml
|
||||||
|
ExecStop=/usr/bin/podman play kube --down gitea.yaml
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
54
roles/gitea/templates/gitea.yaml.j2
Normal file
54
roles/gitea/templates/gitea.yaml.j2
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: gitea
|
||||||
|
labels:
|
||||||
|
app: gitea
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: server
|
||||||
|
image: {{ gitea_image }}:{{ gitea_version }}
|
||||||
|
ports:
|
||||||
|
- containerPort: {{ gitea_port }}
|
||||||
|
hostPort: {{ gitea_port }}
|
||||||
|
env:
|
||||||
|
- name: GITEA__database__DB_TYPE
|
||||||
|
value: postgres
|
||||||
|
- name: GITEA__database__HOST
|
||||||
|
value: {{ immich_postgres_host | default('127.0.0.1') }}
|
||||||
|
- name: GITEA__database__PORT
|
||||||
|
value: "5432"
|
||||||
|
- name: GITEA__database__NAME
|
||||||
|
value: "{{ gitea_postgres_db_name }}"
|
||||||
|
- name: GITEA__database__USER
|
||||||
|
value: "{{ gitea_postgres_user }}"
|
||||||
|
- name: GITEA__database__PASSWD
|
||||||
|
value: "{{ gitea_postgres_password }}"
|
||||||
|
- name: GITEA__server__DOMAIN
|
||||||
|
value: "{{ gitea_domain }}"
|
||||||
|
- name: GITEA__server__ROOT_URL
|
||||||
|
value: "{{ gitea_root_url }}"
|
||||||
|
- name: GITEA__server__HTTP_PORT
|
||||||
|
value: "{{ gitea_port }}"
|
||||||
|
- name: GITEA__server__DISABLE_SSH
|
||||||
|
value: "{{ 'true' if gitea_disable_ssh else 'false' }}"
|
||||||
|
- name: GITEA__service__DISABLE_REGISTRATION
|
||||||
|
value: "{{ 'true' if gitea_disable_registration else 'false' }}"
|
||||||
|
volumeMounts:
|
||||||
|
- name: localtime
|
||||||
|
mountPath: /etc/localtime
|
||||||
|
readOnly: true
|
||||||
|
- name: gitea-data
|
||||||
|
mountPath: /data
|
||||||
|
restartPolicy: Always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: localtime
|
||||||
|
hostPath:
|
||||||
|
path: /etc/localtime
|
||||||
|
type: File
|
||||||
|
- name: gitea-data
|
||||||
|
hostPath:
|
||||||
|
path: {{ gitea_data_dir }}
|
||||||
|
type: Directory
|
||||||
0
roles/gitea/templates/mail.yml
Normal file
0
roles/gitea/templates/mail.yml
Normal file
53
roles/gitea/templates/nginx-vhost.conf.j2
Normal file
53
roles/gitea/templates/nginx-vhost.conf.j2
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Gitea vhost with Let's Encrypt (Certbot)
|
||||||
|
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name {{ gitea_nginx_hostname }};
|
||||||
|
|
||||||
|
# Certbot webroot for ACME challenges
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Redirect to HTTPS
|
||||||
|
location / {
|
||||||
|
return 301 https://$server_name$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name {{ gitea_nginx_hostname }};
|
||||||
|
|
||||||
|
# Let's Encrypt certificates (managed by Certbot)
|
||||||
|
ssl_certificate /etc/letsencrypt/live/{{ gitea_nginx_hostname }}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/{{ gitea_nginx_hostname }}/privkey.pem;
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }};
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
|
{% if nginx_log_backend | default('journald') == 'journald' %}
|
||||||
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_gitea;
|
||||||
|
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_gitea;
|
||||||
|
{% else %}
|
||||||
|
access_log /var/log/nginx/{{ gitea_nginx_hostname }}_access.log main;
|
||||||
|
error_log /var/log/nginx/{{ gitea_nginx_hostname }}_error.log;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Increase client max body size for large Git pushes
|
||||||
|
client_max_body_size 512M;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:{{ gitea_port }};
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# Required for Git LFS and large repository operations
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_request_buffering off;
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,394 +1,21 @@
|
|||||||
# Immich Role
|
# Immich Role
|
||||||
|
|
||||||
This Ansible role deploys [Immich](https://immich.app/) - a high performance self-hosted photo and video management solution - using Podman with docker-compose files.
|
This Ansible role deploys [Immich](https://immich.app/) - a high performance self-hosted photo and video management solution - using Podman with k8s files.
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- Podman installed on the target system (handled by the `podman` role dependency)
|
|
||||||
- Podman compose support (`podman compose` command available)
|
|
||||||
- Sufficient disk space for photos/videos at the upload location
|
|
||||||
|
|
||||||
## Role Variables
|
## Role Variables
|
||||||
|
|
||||||
See `defaults/main.yml` for all available variables and their default values.
|
See `defaults/main.yml` for all available variables and their default values.
|
||||||
|
|
||||||
### Key Configuration Requirements
|
### Required Passwords
|
||||||
|
|
||||||
#### Required Passwords
|
|
||||||
|
|
||||||
Both passwords must be set in your inventory (min 12 characters):
|
Both passwords must be set in your inventory (min 12 characters):
|
||||||
- `immich_postgres_password` - PostgreSQL database password
|
- `immich_postgres_password` - PostgreSQL database password
|
||||||
- `immich_valkey_password` - Valkey/Redis password
|
- `immich_valkey_password` - Valkey/Redis password
|
||||||
|
|
||||||
#### Valkey ACL Configuration
|
|
||||||
|
|
||||||
**Important:** Immich requires a dedicated Valkey ACL user with specific permissions. This role provides the ACL configuration, but you must register it with the Valkey role.
|
|
||||||
|
|
||||||
**Required Setup in Inventory:**
|
|
||||||
|
|
||||||
Add the Immich user to your `valkey_acl_users` list in your inventory or host_vars:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# inventory/host_vars/yourserver.yml or group_vars/all.yml
|
|
||||||
valkey_acl_users:
|
|
||||||
- username: immich
|
|
||||||
password: "{{ immich_valkey_password }}"
|
|
||||||
keypattern: "immich_bull* immich_channel*"
|
|
||||||
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
|
|
||||||
```
|
|
||||||
|
|
||||||
**ACL Breakdown:**
|
|
||||||
- `keypattern: "immich_bull* immich_channel*"` - Restricts access to BullMQ keys used by Immich
|
|
||||||
- `&*` - Allow all pub/sub channels (required for BullMQ job queues)
|
|
||||||
- `-@dangerous` - Deny dangerous commands (FLUSHDB, FLUSHALL, KEYS, etc.)
|
|
||||||
- `+@read +@write` - Allow read/write command groups
|
|
||||||
- `+@pubsub` - Allow pub/sub commands (SUBSCRIBE, PUBLISH, etc.)
|
|
||||||
- `+select` - Allow SELECT command (database switching)
|
|
||||||
- `+auth +ping +info` - Connection management commands
|
|
||||||
- `+eval +evalsha` - Lua scripting (required by BullMQ for atomic operations)
|
|
||||||
|
|
||||||
**Based on:** [Immich GitHub Discussion #19727](https://github.com/immich-app/immich/discussions/19727#discussioncomment-13668749)
|
|
||||||
|
|
||||||
**Security Benefits:**
|
|
||||||
- Immich cannot access keys from other services
|
|
||||||
- Cannot execute admin commands (FLUSHDB, CONFIG, etc.)
|
|
||||||
- Cannot view all keys (KEYS command denied)
|
|
||||||
- Defense-in-depth with ACL + key patterns + database numbers
|
|
||||||
|
|
||||||
#### External Network Configuration
|
|
||||||
|
|
||||||
Immich requires a dedicated external network to be defined in your inventory. Add this to your `host_vars` or `group_vars`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
podman_external_networks:
|
|
||||||
- name: immich
|
|
||||||
subnet: 172.20.0.0/16
|
|
||||||
gateway: 172.20.0.1
|
|
||||||
```
|
|
||||||
|
|
||||||
**How it works:**
|
|
||||||
1. Define the Immich network in `podman_external_networks` list in your inventory
|
|
||||||
2. The `podman` role (a dependency) creates the external network before Immich deployment
|
|
||||||
3. The Immich docker-compose file references this external network
|
|
||||||
4. The network persists across container restarts and compose stack rebuilds
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
|
|
||||||
This role depends on:
|
|
||||||
- `podman` - Container runtime
|
|
||||||
- `postgres` - PostgreSQL database
|
|
||||||
- `valkey` - Redis-compatible cache (formerly Redis)
|
|
||||||
|
|
||||||
**Note:** The Valkey role must be configured with the Immich ACL user (see Valkey Configuration section above) before running this role.
|
|
||||||
|
|
||||||
## Example Playbook
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
---
|
|
||||||
- hosts: servers
|
|
||||||
become: true
|
|
||||||
roles:
|
|
||||||
- role: podman
|
|
||||||
- role: immich
|
|
||||||
vars:
|
|
||||||
immich_postgres_password: "your-secure-postgres-password"
|
|
||||||
immich_valkey_password: "your-secure-valkey-password"
|
|
||||||
immich_upload_location: /mnt/storage/immich/upload
|
|
||||||
immich_timezone: America/New_York
|
|
||||||
```
|
|
||||||
|
|
||||||
**Complete Example with Valkey ACL:**
|
|
||||||
|
|
||||||
In `inventory/host_vars/yourserver.yml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Podman external networks
|
|
||||||
podman_external_networks:
|
|
||||||
- name: immich
|
|
||||||
subnet: 172.20.0.0/16
|
|
||||||
gateway: 172.20.0.1
|
|
||||||
|
|
||||||
# Valkey admin password
|
|
||||||
valkey_admin_password: "your-valkey-admin-password"
|
|
||||||
|
|
||||||
# Valkey ACL users - register all service users here
|
|
||||||
valkey_acl_users:
|
|
||||||
- username: immich
|
|
||||||
password: "{{ immich_valkey_password }}"
|
|
||||||
keypattern: "immich_bull* immich_channel*"
|
|
||||||
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
|
|
||||||
# Add other services here as needed
|
|
||||||
|
|
||||||
# Immich passwords
|
|
||||||
immich_postgres_password: "your-secure-postgres-password"
|
|
||||||
immich_valkey_password: "your-secure-valkey-password"
|
|
||||||
```
|
|
||||||
|
|
||||||
In your playbook:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
---
|
|
||||||
- hosts: servers
|
|
||||||
become: true
|
|
||||||
roles:
|
|
||||||
- role: valkey # Must run first to create ACL users
|
|
||||||
- role: postgres
|
|
||||||
- role: podman
|
|
||||||
- role: immich
|
|
||||||
```
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
The role deploys Immich using Podman containers that connect to shared system services:
|
|
||||||
|
|
||||||
**Immich Containers:**
|
|
||||||
1. **immich-server** - Main application server (exposed on configured port)
|
|
||||||
2. **immich-machine-learning** - ML service for facial recognition and object detection
|
|
||||||
|
|
||||||
**Shared System Services:**
|
|
||||||
3. **PostgreSQL** - Database with vector extensions (from `postgres` role)
|
|
||||||
4. **Valkey** - Redis-compatible cache (from `valkey` role)
|
|
||||||
|
|
||||||
### Container Networking
|
|
||||||
|
|
||||||
Both Immich containers run on a **dedicated external Podman network** with its own CIDR block. The network is created by the `podman` role as an external network, referenced in the compose file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
networks:
|
|
||||||
immich:
|
|
||||||
external: true
|
|
||||||
name: immich
|
|
||||||
```
|
|
||||||
|
|
||||||
The actual network configuration (subnet: `172.20.0.0/16`, gateway: `172.20.0.1`) is handled by the podman role based on the `immich_network_*` variables.
|
|
||||||
|
|
||||||
This provides:
|
|
||||||
- **Network isolation**: Separate subnet (defined in inventory, e.g., `172.20.0.0/16`) from other containers
|
|
||||||
- **Network persistence**: Network survives compose stack rebuilds and container recreation
|
|
||||||
- **Named bridge**: Explicit interface naming for the network
|
|
||||||
- **Container-to-container communication**: The server reaches the ML container via service name (`immich-machine-learning:3003`) using Docker/Podman internal DNS
|
|
||||||
- **Container-to-host communication**: Both containers can reach PostgreSQL and Valkey on the host via `host.containers.internal:{{ podman_subnet_gateway }}`
|
|
||||||
|
|
||||||
**Key Points:**
|
|
||||||
- The network must be defined in your inventory via `podman_external_networks`
|
|
||||||
- The network is created by the `podman` role before Immich deployment (via role dependency)
|
|
||||||
- The Immich network has its own gateway (e.g., `172.20.0.1` as defined in inventory)
|
|
||||||
- `extra_hosts` maps `host.containers.internal` to the **Podman default bridge gateway** (e.g., `10.88.0.1`), not the Immich network gateway
|
|
||||||
- This allows containers to route to the host machine for PostgreSQL/Valkey access
|
|
||||||
|
|
||||||
**Checking the network:**
|
|
||||||
```bash
|
|
||||||
# List all Podman networks
|
|
||||||
podman network ls
|
|
||||||
|
|
||||||
# Inspect the Immich network
|
|
||||||
podman network inspect immich
|
|
||||||
```
|
|
||||||
|
|
||||||
### Data Isolation
|
|
||||||
|
|
||||||
The role implements proper data isolation for both database backends:
|
|
||||||
|
|
||||||
- **PostgreSQL**: Immich gets its own database (`immich`) and dedicated user (`immich`) with restricted privileges (NOSUPERUSER, NOCREATEDB, NOCREATEROLE)
|
|
||||||
- **Valkey**: Immich uses a dedicated ACL user (`immich`) with:
|
|
||||||
- Dedicated password (independent from `valkey_admin_password`)
|
|
||||||
- Key pattern restriction (`immich_bull*` and `immich_channel*` only)
|
|
||||||
- Command restrictions (no admin/dangerous operations like FLUSHDB, CONFIG)
|
|
||||||
- Database number isolation (uses DB 0 by default, configurable)
|
|
||||||
- Pub/sub channel access for BullMQ job queues
|
|
||||||
|
|
||||||
**Security Benefits:**
|
|
||||||
- Each service has unique credentials
|
|
||||||
- Compromised service cannot access other services' data
|
|
||||||
- Cannot accidentally delete all data (FLUSHDB/FLUSHALL denied)
|
|
||||||
- Cannot view keys from other services (KEYS command denied)
|
|
||||||
- Defense-in-depth: ACL + key patterns + command restrictions + database numbers
|
|
||||||
|
|
||||||
The compose file is deployed to `{{ podman_projects_dir }}/immich/docker-compose.yml` and managed via a systemd service.
|
|
||||||
|
|
||||||
## Nginx Reverse Proxy with ACME/Let's Encrypt
|
|
||||||
|
|
||||||
The role includes an Nginx vhost template with native ACME support for automatic HTTPS certificate management.
|
|
||||||
|
|
||||||
**Prerequisites:**
|
|
||||||
1. Nginx role deployed with `acme_email` configured
|
|
||||||
2. Port 80/443 accessible from internet (for ACME HTTP-01 challenge)
|
|
||||||
3. DNS pointing to your server
|
|
||||||
|
|
||||||
**Configuration:**
|
|
||||||
```yaml
|
|
||||||
# Enable Nginx reverse proxy
|
|
||||||
immich_nginx_enabled: true
|
|
||||||
immich_nginx_hostname: "blog.hello.com"
|
|
||||||
|
|
||||||
# In nginx role configuration (host_vars or group_vars)
|
|
||||||
acme_email: "admin@carabosse.cloud"
|
|
||||||
```
|
|
||||||
|
|
||||||
**What it does:**
|
|
||||||
- Deploys HTTPS vhost with automatic Let's Encrypt certificate
|
|
||||||
- HTTP → HTTPS redirect
|
|
||||||
- Proxies to Immich container on localhost
|
|
||||||
- Handles WebSocket upgrades for live photos
|
|
||||||
- Large file upload support (50GB max)
|
|
||||||
|
|
||||||
**ACME automatic features:**
|
|
||||||
- Certificate issuance on first deployment
|
|
||||||
- Automatic renewal
|
|
||||||
- HTTP-01 challenge handling
|
|
||||||
|
|
||||||
## Post-Installation
|
|
||||||
|
|
||||||
After deployment:
|
|
||||||
|
|
||||||
1. Access Immich at:
|
|
||||||
- **With Nginx enabled**: `https://{{ immich_nginx_hostname }}`
|
|
||||||
- **Without Nginx**: `http://<host-ip>:{{ immich_port }}`
|
|
||||||
2. Create an admin account on first login
|
|
||||||
3. Configure mobile/desktop apps to point to your server
|
|
||||||
|
|
||||||
## Management
|
|
||||||
|
|
||||||
The role creates a systemd service for managing the compose stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check status
|
|
||||||
systemctl status immich
|
|
||||||
|
|
||||||
# Stop Immich
|
|
||||||
systemctl stop immich
|
|
||||||
|
|
||||||
# Start Immich
|
|
||||||
systemctl start immich
|
|
||||||
|
|
||||||
# Restart Immich
|
|
||||||
systemctl restart immich
|
|
||||||
|
|
||||||
# View logs for all containers
|
|
||||||
cd /opt/podman/immich && podman compose logs -f
|
|
||||||
|
|
||||||
# View logs for specific service
|
|
||||||
cd /opt/podman/immich && podman compose logs -f immich-server
|
|
||||||
```
|
|
||||||
|
|
||||||
### Manual Management
|
|
||||||
|
|
||||||
You can also manage containers directly with podman compose:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd /opt/podman/immich
|
|
||||||
|
|
||||||
# Start services
|
|
||||||
podman compose up -d
|
|
||||||
|
|
||||||
# Stop services
|
|
||||||
podman compose down
|
|
||||||
|
|
||||||
# Pull latest images
|
|
||||||
podman compose pull
|
|
||||||
|
|
||||||
# Recreate containers
|
|
||||||
podman compose up -d --force-recreate
|
|
||||||
```
|
|
||||||
|
|
||||||
## Updating Immich
|
|
||||||
|
|
||||||
To update to a newer version:
|
|
||||||
|
|
||||||
1. Update the `immich_version` variable in your playbook or inventory
|
|
||||||
2. Re-run the Ansible playbook
|
|
||||||
3. The systemd service will restart with the new version
|
|
||||||
|
|
||||||
Or manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd /opt/podman/immich
|
|
||||||
podman compose pull
|
|
||||||
systemctl restart immich
|
|
||||||
```
|
|
||||||
|
|
||||||
## Storage
|
|
||||||
|
|
||||||
- **Upload location**: Stores all photos, videos, and thumbnails
|
|
||||||
- **Database location**: PostgreSQL data (not suitable for network shares)
|
|
||||||
- **Model cache**: ML models for facial recognition
|
|
||||||
|
|
||||||
Ensure adequate disk space and regular backups of these directories.
|
|
||||||
|
|
||||||
## Files Deployed
|
|
||||||
|
|
||||||
- `{{ podman_projects_dir }}/immich/docker-compose.yml` - Compose definition
|
|
||||||
- `/etc/systemd/system/immich.service` - Systemd service unit
|
|
||||||
|
|
||||||
## Security Considerations
|
|
||||||
|
|
||||||
- **Set strong passwords** for both `immich_postgres_password` and `immich_valkey_password` (min 12 chars)
|
|
||||||
- **Use Ansible Vault** to encrypt passwords in production:
|
|
||||||
```bash
|
|
||||||
ansible-vault encrypt_string 'your-password' --name 'immich_postgres_password'
|
|
||||||
ansible-vault encrypt_string 'your-password' --name 'immich_valkey_password'
|
|
||||||
```
|
|
||||||
- **Configure Valkey ACL** properly (see Valkey Configuration section) - do not use `+@all`
|
|
||||||
- Consider using a reverse proxy (nginx/traefik) for HTTPS
|
|
||||||
- Restrict access via firewall rules if needed
|
|
||||||
- Keep Immich updated by changing `immich_version` and redeploying
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Check service status
|
|
||||||
```bash
|
|
||||||
systemctl status immich
|
|
||||||
```
|
|
||||||
|
|
||||||
### View compose file
|
|
||||||
```bash
|
|
||||||
cat /opt/podman/immich/docker-compose.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Check container status
|
|
||||||
```bash
|
|
||||||
cd /opt/podman/immich
|
|
||||||
podman compose ps
|
|
||||||
```
|
|
||||||
|
|
||||||
### View logs
|
|
||||||
```bash
|
|
||||||
cd /opt/podman/immich
|
|
||||||
podman compose logs
|
|
||||||
```
|
|
||||||
|
|
||||||
### Valkey ACL Issues
|
### Valkey ACL Issues
|
||||||
|
|
||||||
**Error: "NOPERM No permissions to access a channel"**
|
|
||||||
- The Valkey ACL is missing channel permissions
|
|
||||||
- Ensure `&*` or `+allchannels` is in the ACL commands
|
|
||||||
- Verify ACL is properly loaded: `valkey-cli ACL LIST`
|
|
||||||
|
|
||||||
**Error: "NOAUTH Authentication required"**
|
|
||||||
- Check `immich_valkey_password` is set correctly
|
|
||||||
- Verify the password matches in both inventory ACL config and immich vars
|
|
||||||
|
|
||||||
**Error: "WRONGPASS invalid username-password pair"**
|
|
||||||
- Ensure the Immich user is registered in `valkey_acl_users`
|
|
||||||
- Check the Valkey ACL file was deployed: `cat /etc/valkey/users.acl`
|
|
||||||
- Restart Valkey to reload ACL: `systemctl restart valkey`
|
|
||||||
|
|
||||||
**Verify Valkey ACL Configuration:**
|
|
||||||
```bash
|
|
||||||
# Connect as admin
|
|
||||||
valkey-cli
|
|
||||||
AUTH default <valkey_admin_password>
|
|
||||||
|
|
||||||
# List all ACL users
|
|
||||||
ACL LIST
|
|
||||||
|
|
||||||
# Check specific user
|
|
||||||
ACL GETUSER immich
|
|
||||||
|
|
||||||
# Monitor commands (useful for debugging permissions)
|
|
||||||
MONITOR
|
|
||||||
```
|
|
||||||
|
|
||||||
**Test Immich user credentials:**
|
**Test Immich user credentials:**
|
||||||
```bash
|
```bash
|
||||||
valkey-cli
|
valkey-cli
|
||||||
@ -402,10 +29,4 @@ FLUSHDB
|
|||||||
# Should return: (error) NOPERM
|
# Should return: (error) NOPERM
|
||||||
```
|
```
|
||||||
|
|
||||||
## License
|
**Going further:** [Immich GitHub Discussion #19727](https://github.com/immich-app/immich/discussions/19727#discussioncomment-13668749)
|
||||||
|
|
||||||
MIT
|
|
||||||
|
|
||||||
## Author Information
|
|
||||||
|
|
||||||
Created for deploying Immich on NAS systems using Podman and docker-compose.
|
|
||||||
|
|||||||
@ -9,13 +9,13 @@ immich_upload_location: "{{ podman_projects_dir }}/immich/data/upload"
|
|||||||
immich_postgres_db_name: immich
|
immich_postgres_db_name: immich
|
||||||
immich_postgres_user: immich
|
immich_postgres_user: immich
|
||||||
# immich_postgres_password: "" # Intentionally undefined - role will fail if not set
|
# immich_postgres_password: "" # Intentionally undefined - role will fail if not set
|
||||||
immich_postgres_host: postgres.local
|
# immich_postgres_host: "" # Must be set in inventory (e.g., podman_gw_gateway)
|
||||||
immich_postgres_port: 5432
|
immich_postgres_port: 5432
|
||||||
|
|
||||||
# Valkey configuration (REQUIRED password - must be set explicitly)
|
# Valkey configuration (REQUIRED password - must be set explicitly)
|
||||||
immich_valkey_user: immich
|
immich_valkey_user: immich
|
||||||
# immich_valkey_password: "" # Intentionally undefined - role will fail if not set
|
# immich_valkey_password: "" # Intentionally undefined - role will fail if not set
|
||||||
immich_valkey_host: valkey.local
|
# immich_valkey_host: "" # Must be set in inventory (e.g., podman_gw_gateway)
|
||||||
immich_valkey_port: 6379
|
immich_valkey_port: 6379
|
||||||
immich_valkey_db: 0 # Dedicated database number for isolation (0-15)
|
immich_valkey_db: 0 # Dedicated database number for isolation (0-15)
|
||||||
|
|
||||||
@ -37,14 +37,6 @@ immich_valkey_acl:
|
|||||||
# Network configuration
|
# Network configuration
|
||||||
immich_port: 2283
|
immich_port: 2283
|
||||||
|
|
||||||
# External network configuration
|
|
||||||
# Define in inventory via podman_external_networks list
|
|
||||||
# Example:
|
|
||||||
# podman_external_networks:
|
|
||||||
# - name: immich
|
|
||||||
# subnet: 172.20.0.0/16
|
|
||||||
# gateway: 172.20.0.1
|
|
||||||
|
|
||||||
# Container images
|
# Container images
|
||||||
immich_server_image: ghcr.io/immich-app/immich-server
|
immich_server_image: ghcr.io/immich-app/immich-server
|
||||||
immich_ml_image: ghcr.io/immich-app/immich-machine-learning
|
immich_ml_image: ghcr.io/immich-app/immich-machine-learning
|
||||||
|
|||||||
@ -16,14 +16,14 @@
|
|||||||
name: "{{ immich_postgres_db_name }}"
|
name: "{{ immich_postgres_db_name }}"
|
||||||
owner: "{{ immich_postgres_user }}"
|
owner: "{{ immich_postgres_user }}"
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
- name: Create PostgreSQL user for Immich
|
- name: Create PostgreSQL user for Immich
|
||||||
community.postgresql.postgresql_user:
|
community.postgresql.postgresql_user:
|
||||||
name: "{{ immich_postgres_user }}"
|
name: "{{ immich_postgres_user }}"
|
||||||
password: "{{ immich_postgres_password }}"
|
password: "{{ immich_postgres_password }}"
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
- name: Grant all privileges on database to Immich user
|
- name: Grant all privileges on database to Immich user
|
||||||
community.postgresql.postgresql_privs:
|
community.postgresql.postgresql_privs:
|
||||||
@ -32,21 +32,21 @@
|
|||||||
type: database
|
type: database
|
||||||
privs: ALL
|
privs: ALL
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
- name: Ensure Immich user has no superuser privileges
|
- name: Ensure Immich user has no superuser privileges
|
||||||
community.postgresql.postgresql_user:
|
community.postgresql.postgresql_user:
|
||||||
name: "{{ immich_postgres_user }}"
|
name: "{{ immich_postgres_user }}"
|
||||||
role_attr_flags: NOSUPERUSER,NOCREATEDB,NOCREATEROLE
|
role_attr_flags: NOSUPERUSER,NOCREATEDB,NOCREATEROLE
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
- name: Enable required PostgreSQL extensions in Immich database
|
- name: Enable required PostgreSQL extensions in Immich database
|
||||||
community.postgresql.postgresql_ext:
|
community.postgresql.postgresql_ext:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
login_db: "{{ immich_postgres_db_name }}"
|
login_db: "{{ immich_postgres_db_name }}"
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
loop:
|
loop:
|
||||||
- cube
|
- cube
|
||||||
- earthdistance
|
- earthdistance
|
||||||
@ -60,11 +60,11 @@
|
|||||||
objs: public
|
objs: public
|
||||||
privs: CREATE,USAGE
|
privs: CREATE,USAGE
|
||||||
state: present
|
state: present
|
||||||
become_user: "{{ postgres_admin_user }}"
|
become_user: "{{ postgres_admin_user | default('postgres') }}"
|
||||||
|
|
||||||
- name: Create Immich project directory
|
- name: Create Immich project directory
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: "{{ podman_projects_dir }}/immich"
|
path: "{{ podman_projects_dir | default('/opt/podman') }}/immich"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user }}"
|
||||||
group: "{{ ansible_user }}"
|
group: "{{ ansible_user }}"
|
||||||
@ -80,10 +80,10 @@
|
|||||||
loop:
|
loop:
|
||||||
- "{{ immich_upload_location }}"
|
- "{{ immich_upload_location }}"
|
||||||
|
|
||||||
- name: Deploy docker-compose.yml for Immich
|
- name: Deploy Kubernetes YAML for Immich
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: docker-compose.yml.j2
|
src: immich.yaml.j2
|
||||||
dest: "{{ podman_projects_dir }}/immich/docker-compose.yml"
|
dest: "{{ podman_projects_dir | default('/opt/podman') }}/immich/immich.yaml"
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user }}"
|
||||||
group: "{{ ansible_user }}"
|
group: "{{ ansible_user }}"
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
@ -108,7 +108,7 @@
|
|||||||
- name: Deploy nginx vhost configuration for Immich
|
- name: Deploy nginx vhost configuration for Immich
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: nginx-vhost.conf.j2
|
src: nginx-vhost.conf.j2
|
||||||
dest: /etc/nginx/conf.d/immich.conf
|
dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/immich.conf"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
@ -117,7 +117,7 @@
|
|||||||
|
|
||||||
- name: Remove nginx vhost configuration for Immich
|
- name: Remove nginx vhost configuration for Immich
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
path: /etc/nginx/conf.d/immich.conf
|
path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/immich.conf"
|
||||||
state: absent
|
state: absent
|
||||||
when: not immich_nginx_enabled
|
when: not immich_nginx_enabled
|
||||||
notify: Reload nginx
|
notify: Reload nginx
|
||||||
|
|||||||
@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
services:
|
|
||||||
immich-server:
|
|
||||||
container_name: immich_server
|
|
||||||
image: {{ immich_server_image }}:{{ immich_version }}
|
|
||||||
networks:
|
|
||||||
- databases
|
|
||||||
- immich
|
|
||||||
extra_hosts:
|
|
||||||
- "{{ immich_postgres_host }}:{{ podman_subnet_gateway }}"
|
|
||||||
- "{{ immich_valkey_host }}:{{ podman_subnet_gateway }}"
|
|
||||||
volumes:
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
- {{ immich_upload_location }}:/data:rw,Z
|
|
||||||
environment:
|
|
||||||
DB_HOSTNAME: {{ immich_postgres_host }}
|
|
||||||
DB_PORT: {{ immich_postgres_port }}
|
|
||||||
DB_USERNAME: {{ immich_postgres_user }}
|
|
||||||
DB_PASSWORD: {{ immich_postgres_password }}
|
|
||||||
DB_DATABASE_NAME: {{ immich_postgres_db_name }}
|
|
||||||
REDIS_HOSTNAME: {{ immich_valkey_host }}
|
|
||||||
REDIS_PORT: {{ immich_valkey_port }}
|
|
||||||
REDIS_USERNAME: {{ immich_valkey_user }}
|
|
||||||
REDIS_PASSWORD: {{ immich_valkey_password }}
|
|
||||||
REDIS_DBINDEX: {{ immich_valkey_db }}
|
|
||||||
IMMICH_MACHINE_LEARNING_URL: http://immich-machine-learning:3003
|
|
||||||
UPLOAD_LOCATION: {{ immich_upload_location }}
|
|
||||||
TZ: {{ immich_timezone }}
|
|
||||||
ports:
|
|
||||||
- "{{ immich_port }}:2283"
|
|
||||||
restart: always
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD-SHELL", "curl -f http://localhost:2283/api/server/ping"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 60s
|
|
||||||
|
|
||||||
immich-machine-learning:
|
|
||||||
container_name: immich_machine_learning
|
|
||||||
image: {{ immich_ml_image }}:{{ immich_version }}
|
|
||||||
networks:
|
|
||||||
- immich
|
|
||||||
volumes:
|
|
||||||
- model-cache:/cache
|
|
||||||
restart: always
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "python", "/usr/src/healthcheck.py"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 60s
|
|
||||||
|
|
||||||
networks:
|
|
||||||
databases:
|
|
||||||
name: podman
|
|
||||||
external: true
|
|
||||||
immich:
|
|
||||||
driver: bridge
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
model-cache:
|
|
||||||
@ -6,9 +6,11 @@ After=network-online.target
|
|||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
RemainAfterExit=true
|
RemainAfterExit=true
|
||||||
WorkingDirectory={{ podman_projects_dir }}/immich
|
User={{ ansible_user }}
|
||||||
ExecStart=/usr/bin/podman compose up -d
|
Group={{ ansible_user }}
|
||||||
ExecStop=/usr/bin/podman compose down
|
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/immich
|
||||||
|
ExecStart=/usr/bin/podman play kube --replace immich.yaml
|
||||||
|
ExecStop=/usr/bin/podman play kube --down immich.yaml
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
|
|
||||||
|
|||||||
102
roles/immich/templates/immich.yaml.j2
Normal file
102
roles/immich/templates/immich.yaml.j2
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: immich
|
||||||
|
labels:
|
||||||
|
app: immich
|
||||||
|
annotations:
|
||||||
|
io.podman.annotations.network.mode: bridge
|
||||||
|
io.podman.annotations.network.name: podman-gw
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: server
|
||||||
|
image: {{ immich_server_image }}:{{ immich_version }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 2283
|
||||||
|
hostPort: {{ immich_port }}
|
||||||
|
env:
|
||||||
|
- name: DB_HOSTNAME
|
||||||
|
value: "{{ immich_postgres_host }}"
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "{{ immich_postgres_port }}"
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: "{{ immich_postgres_user }}"
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: "{{ immich_postgres_password }}"
|
||||||
|
- name: DB_DATABASE_NAME
|
||||||
|
value: "{{ immich_postgres_db_name }}"
|
||||||
|
- name: REDIS_HOSTNAME
|
||||||
|
value: "{{ immich_valkey_host }}"
|
||||||
|
- name: REDIS_PORT
|
||||||
|
value: "{{ immich_valkey_port }}"
|
||||||
|
- name: REDIS_USERNAME
|
||||||
|
value: "{{ immich_valkey_user }}"
|
||||||
|
- name: REDIS_PASSWORD
|
||||||
|
value: "{{ immich_valkey_password }}"
|
||||||
|
- name: REDIS_DBINDEX
|
||||||
|
value: "{{ immich_valkey_db }}"
|
||||||
|
- name: IMMICH_MACHINE_LEARNING_URL
|
||||||
|
value: http://localhost:3003
|
||||||
|
- name: UPLOAD_LOCATION
|
||||||
|
value: /data
|
||||||
|
- name: TZ
|
||||||
|
value: "{{ immich_timezone }}"
|
||||||
|
volumeMounts:
|
||||||
|
- name: localtime
|
||||||
|
mountPath: /etc/localtime
|
||||||
|
readOnly: true
|
||||||
|
- name: immich-data
|
||||||
|
mountPath: /data
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /api/server/ping
|
||||||
|
port: 2283
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
failureThreshold: 3
|
||||||
|
restartPolicy: Always
|
||||||
|
|
||||||
|
- name: machine-learning
|
||||||
|
image: {{ immich_ml_image }}:{{ immich_version }}
|
||||||
|
env:
|
||||||
|
- name: TZ
|
||||||
|
value: "{{ immich_timezone }}"
|
||||||
|
volumeMounts:
|
||||||
|
- name: model-cache
|
||||||
|
mountPath: /cache
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- python
|
||||||
|
- /usr/src/healthcheck.py
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
failureThreshold: 3
|
||||||
|
restartPolicy: Always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: localtime
|
||||||
|
hostPath:
|
||||||
|
path: /etc/localtime
|
||||||
|
type: File
|
||||||
|
- name: immich-data
|
||||||
|
hostPath:
|
||||||
|
path: {{ immich_upload_location }}
|
||||||
|
type: Directory
|
||||||
|
- name: model-cache
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: immich-model-cache
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: immich-model-cache
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
@ -25,10 +25,10 @@ server {
|
|||||||
ssl_certificate_key /etc/letsencrypt/live/{{ immich_nginx_hostname }}/privkey.pem;
|
ssl_certificate_key /etc/letsencrypt/live/{{ immich_nginx_hostname }}/privkey.pem;
|
||||||
|
|
||||||
# SSL configuration
|
# SSL configuration
|
||||||
ssl_protocols {{ nginx_ssl_protocols }};
|
ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }};
|
||||||
ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }};
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
{% if nginx_log_backend == 'journald' %}
|
{% if nginx_log_backend | default('journald') == 'journald' %}
|
||||||
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich;
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich;
|
||||||
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich;
|
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich;
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@ -1,36 +1,34 @@
|
|||||||
---
|
---
|
||||||
- name: Check if the interface ipv4 address is defined
|
- name: Check if the interface ipv4 address is defined
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration"
|
||||||
when: interface.ipv4.address is not defined
|
when: interface.ipv4.address is not defined
|
||||||
|
|
||||||
|
- name: Process interface configuration
|
||||||
|
when: interface.ipv4.address is defined
|
||||||
block:
|
block:
|
||||||
- ansible.builtin.debug:
|
- name: Create systemd-netdev file for virtual interface
|
||||||
msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration"
|
when:
|
||||||
- name: Skip net-config role for {{ interface.name }}
|
- interface.type is defined
|
||||||
ansible.builtin.meta: end_play
|
- interface.type != 'ethernet'
|
||||||
- name: Check if the interface is already configured
|
ansible.builtin.template:
|
||||||
ansible.builtin.stat:
|
src: systemd.netdev.j2
|
||||||
path: /etc/systemd/network/20-{{ interface.name }}.network
|
dest: /etc/systemd/network/10-{{ interface.name }}.netdev
|
||||||
register: network_file
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
register: netdev_result
|
||||||
|
|
||||||
- name: What patch is needed
|
- name: Create systemd-network configuration file
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
{%- if network_file.stat.exists == true -%}
|
|
||||||
iface {{ interface.name }} is already configured, no action needed.
|
|
||||||
{%- else -%}
|
|
||||||
iface {{ interface.name }} will be configured.
|
|
||||||
{%- endif -%}
|
|
||||||
|
|
||||||
- name: Create systemd-network link file
|
|
||||||
when: network_file.stat.exists != true
|
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: systemd.network.j2
|
src: systemd.network.j2
|
||||||
dest: /etc/systemd/network/20-{{ interface.name }}.network
|
dest: /etc/systemd/network/20-{{ interface.name }}.network
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
|
register: network_result
|
||||||
|
|
||||||
- name: Notify a reload is required
|
- name: Notify a reload is required
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
network_reload_required: true
|
network_reload_required: true
|
||||||
when: network_file.stat.exists != true
|
when: netdev_result is changed or network_result is changed
|
||||||
|
|||||||
6
roles/net-config/templates/systemd.netdev.j2
Normal file
6
roles/net-config/templates/systemd.netdev.j2
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
# systemd.netdev(5)
|
||||||
|
|
||||||
|
[NetDev]
|
||||||
|
Name={{ interface.name }}
|
||||||
|
Kind={{ interface.type }}
|
||||||
@ -11,9 +11,14 @@ RouteMetric={{ interface.ipv4.metric }}
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
[Network]
|
[Network]
|
||||||
|
{% if interface.type is defined and interface.type == 'bridge' %}
|
||||||
|
ConfigureWithoutCarrier=yes
|
||||||
|
{% endif %}
|
||||||
|
{% if interface.ipv4.nameservers is defined %}
|
||||||
{% for dns in interface.ipv4.nameservers %}
|
{% for dns in interface.ipv4.nameservers %}
|
||||||
DNS={{ dns }}
|
DNS={{ dns }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% if interface.ipv4.gateway is defined %}
|
{% if interface.ipv4.gateway is defined %}
|
||||||
[Route]
|
[Route]
|
||||||
|
|||||||
@ -1,8 +1,15 @@
|
|||||||
---
|
---
|
||||||
|
- name: Skip net-persist for non-ethernet interfaces
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Skipping net-persist for {{ interface.name }} (type: {{ interface.type }})"
|
||||||
|
when: interface.type is defined and interface.type != 'ethernet'
|
||||||
|
|
||||||
|
- name: Process ethernet interface persistence
|
||||||
|
when: interface.type is not defined or interface.type == 'ethernet'
|
||||||
|
block:
|
||||||
- name: "Check {{ interface.name }} ({{ interface.mac_address }}) rule"
|
- name: "Check {{ interface.name }} ({{ interface.mac_address }}) rule"
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
interface_original_name: "{{ ansible_facts.interfaces | select('in', ansible_facts) | map('extract', ansible_facts) | selectattr('pciid', 'defined') | selectattr('macaddress',
|
interface_original_name: "{{ ansible_facts.interfaces | select('in', ansible_facts) | map('extract', ansible_facts) | selectattr('pciid', 'defined') | selectattr('macaddress', 'equalto', interface.mac_address) | map(attribute='device') | first }}"
|
||||||
'equalto', interface.mac_address) | map(attribute='device') | first }}"
|
|
||||||
|
|
||||||
- name: What patch is needed
|
- name: What patch is needed
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
|
|||||||
@ -1,4 +1,9 @@
|
|||||||
---
|
---
|
||||||
|
- name: Initialize network management variables
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
reboot_required: false
|
||||||
|
network_reload_required: false
|
||||||
|
|
||||||
- name: Setup persistent network interface(s)
|
- name: Setup persistent network interface(s)
|
||||||
ansible.builtin.include_role:
|
ansible.builtin.include_role:
|
||||||
name: net-persist
|
name: net-persist
|
||||||
|
|||||||
@ -16,7 +16,6 @@ nginx_client_max_body_size: 100M
|
|||||||
|
|
||||||
# SSL configuration (volontarily omit TLSv1.2 here)
|
# SSL configuration (volontarily omit TLSv1.2 here)
|
||||||
nginx_ssl_protocols: TLSv1.3
|
nginx_ssl_protocols: TLSv1.3
|
||||||
nginx_ssl_prefer_server_ciphers: true
|
|
||||||
|
|
||||||
# Logging configuration
|
# Logging configuration
|
||||||
# Backend: 'file' (traditional /var/log/nginx/*.log) or 'journald' (systemd journal)
|
# Backend: 'file' (traditional /var/log/nginx/*.log) or 'journald' (systemd journal)
|
||||||
|
|||||||
@ -51,11 +51,10 @@
|
|||||||
|
|
||||||
- name: Enable Certbot renewal timer
|
- name: Enable Certbot renewal timer
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: certbot-renew.timer
|
name: "{{ certbot_timer }}"
|
||||||
enabled: true
|
enabled: true
|
||||||
state: started
|
state: started
|
||||||
when: acme_email is defined
|
when: acme_email is defined
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Ensure nginx conf.d directory exists
|
- name: Ensure nginx conf.d directory exists
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
|
|||||||
@ -48,7 +48,7 @@ http {
|
|||||||
|
|
||||||
# SSL configuration
|
# SSL configuration
|
||||||
ssl_protocols {{ nginx_ssl_protocols }};
|
ssl_protocols {{ nginx_ssl_protocols }};
|
||||||
ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }};
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
# Load modular configuration files from the conf.d directory
|
# Load modular configuration files from the conf.d directory
|
||||||
include {{ nginx_conf_dir }}/*.conf;
|
include {{ nginx_conf_dir }}/*.conf;
|
||||||
|
|||||||
@ -26,7 +26,7 @@ server {
|
|||||||
|
|
||||||
# SSL configuration
|
# SSL configuration
|
||||||
ssl_protocols {{ nginx_ssl_protocols }};
|
ssl_protocols {{ nginx_ssl_protocols }};
|
||||||
ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }};
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
{% if nginx_log_backend == 'journald' %}
|
{% if nginx_log_backend == 'journald' %}
|
||||||
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_{{ server_name | replace('.', '_') }};
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_{{ server_name | replace('.', '_') }};
|
||||||
|
|||||||
@ -1,2 +1,3 @@
|
|||||||
---
|
---
|
||||||
nginx_user: http
|
nginx_user: http
|
||||||
|
certbot_timer: certbot-renew.timer
|
||||||
|
|||||||
@ -1,2 +1,3 @@
|
|||||||
---
|
---
|
||||||
nginx_user: www-data
|
nginx_user: www-data
|
||||||
|
certbot_timer: certbot.timer
|
||||||
|
|||||||
129
roles/ntfy/README.md
Normal file
129
roles/ntfy/README.md
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
# ntfy - Simple Notification Service
|
||||||
|
|
||||||
|
Deploys [ntfy](https://ntfy.sh/) - a simple HTTP-based pub-sub notification service.
|
||||||
|
|
||||||
|
## Security Model
|
||||||
|
|
||||||
|
**Secure by default:**
|
||||||
|
- `auth-default-access: deny-all` - No anonymous access
|
||||||
|
- `enable-signup: false` - No public registration
|
||||||
|
- `enable-login: true` - Authentication required
|
||||||
|
- `enable-reservations: true` - Only authenticated users can reserve topics
|
||||||
|
|
||||||
|
All notifications require authentication to send or receive.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Required Variables
|
||||||
|
|
||||||
|
Set in inventory or vault:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ntfy_admin_password: "your-secure-password-here" # Min 12 chars
|
||||||
|
```
|
||||||
|
|
||||||
|
### Optional Variables
|
||||||
|
|
||||||
|
See [defaults/main.yml](defaults/main.yml) for all configuration options.
|
||||||
|
|
||||||
|
Key settings:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ntfy_version: latest
|
||||||
|
ntfy_port: 8080
|
||||||
|
ntfy_base_url: http://localhost:8080
|
||||||
|
ntfy_admin_user: admin
|
||||||
|
|
||||||
|
# Nginx reverse proxy
|
||||||
|
ntfy_nginx_enabled: false
|
||||||
|
ntfy_nginx_hostname: ntfy.nas.local
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Managing Users
|
||||||
|
|
||||||
|
List users:
|
||||||
|
```bash
|
||||||
|
podman exec ntfy ntfy user list
|
||||||
|
```
|
||||||
|
|
||||||
|
Add user:
|
||||||
|
```bash
|
||||||
|
podman exec ntfy ntfy user add <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
Change password:
|
||||||
|
```bash
|
||||||
|
podman exec -i ntfy ntfy user change-pass <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
Remove user:
|
||||||
|
```bash
|
||||||
|
podman exec ntfy ntfy user remove <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Managing Topic Access
|
||||||
|
|
||||||
|
Grant access to topic:
|
||||||
|
```bash
|
||||||
|
podman exec ntfy ntfy access <username> <topic> <permission>
|
||||||
|
```
|
||||||
|
|
||||||
|
Permissions: `read-write`, `read-only`, `write-only`, `deny`
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```bash
|
||||||
|
# Allow user to publish and subscribe to "alerts" topic
|
||||||
|
podman exec ntfy ntfy access alice alerts read-write
|
||||||
|
|
||||||
|
# Allow user to only publish to "monitoring" topic
|
||||||
|
podman exec ntfy ntfy access bob monitoring write-only
|
||||||
|
```
|
||||||
|
|
||||||
|
List access control:
|
||||||
|
```bash
|
||||||
|
podman exec ntfy ntfy access
|
||||||
|
```
|
||||||
|
|
||||||
|
### Publishing Notifications
|
||||||
|
|
||||||
|
Using curl with authentication:
|
||||||
|
```bash
|
||||||
|
curl -u admin:password -d "Backup completed" http://localhost:8080/backups
|
||||||
|
```
|
||||||
|
|
||||||
|
Using ntfy CLI:
|
||||||
|
```bash
|
||||||
|
ntfy publish --token <access-token> ntfy.nas.local mytopic "Hello World"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Subscribing to Notifications
|
||||||
|
|
||||||
|
Web UI: https://ntfy.nas.local (if nginx enabled)
|
||||||
|
|
||||||
|
CLI:
|
||||||
|
```bash
|
||||||
|
ntfy subscribe --token <access-token> ntfy.nas.local mytopic
|
||||||
|
```
|
||||||
|
|
||||||
|
Mobile apps available for iOS and Android.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
- **Container**: Podman-based deployment
|
||||||
|
- **Storage**: Persistent cache and user database
|
||||||
|
- **Networking**: Localhost binding by default
|
||||||
|
- **Reverse Proxy**: Optional nginx with HTTPS
|
||||||
|
|
||||||
|
## File Locations
|
||||||
|
|
||||||
|
- Configuration: `{{ podman_projects_dir }}/ntfy/server.yml`
|
||||||
|
- User database: `{{ ntfy_data_dir }}/user.db`
|
||||||
|
- Cache database: `{{ ntfy_cache_dir }}/cache.db`
|
||||||
|
- Attachments: `{{ ntfy_cache_dir }}/attachments/`
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- podman
|
||||||
|
- nginx (if `ntfy_nginx_enabled: true`)
|
||||||
32
roles/ntfy/defaults/main.yml
Normal file
32
roles/ntfy/defaults/main.yml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
# Ntfy version to deploy
|
||||||
|
ntfy_version: latest
|
||||||
|
|
||||||
|
# Storage location
|
||||||
|
ntfy_data_dir: "{{ podman_projects_dir }}/ntfy/data"
|
||||||
|
ntfy_cache_dir: "{{ podman_projects_dir }}/ntfy/cache"
|
||||||
|
|
||||||
|
# Authentication configuration (REQUIRED - must be set explicitly)
|
||||||
|
# Ntfy admin user for managing topics and access control
|
||||||
|
ntfy_admin_user: admin
|
||||||
|
# ntfy_admin_password: "" # Intentionally undefined - role will fail if not set
|
||||||
|
|
||||||
|
# Network configuration
|
||||||
|
ntfy_port: 8080
|
||||||
|
|
||||||
|
# Container image
|
||||||
|
ntfy_image: binwiederhier/ntfy
|
||||||
|
|
||||||
|
# Timezone
|
||||||
|
ntfy_timezone: UTC
|
||||||
|
|
||||||
|
# Server configuration
|
||||||
|
ntfy_base_url: http://localhost:{{ ntfy_port }}
|
||||||
|
ntfy_behind_proxy: false
|
||||||
|
ntfy_enable_signup: false # Disable public signup for security
|
||||||
|
ntfy_enable_login: true # Enable authentication
|
||||||
|
ntfy_enable_reservations: true # Only authenticated users can reserve topics
|
||||||
|
|
||||||
|
# Nginx reverse proxy configuration
|
||||||
|
ntfy_nginx_enabled: false
|
||||||
|
ntfy_nginx_hostname: ntfy.nas.local
|
||||||
15
roles/ntfy/handlers/main.yml
Normal file
15
roles/ntfy/handlers/main.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- name: Reload systemd
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Restart ntfy
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: ntfy
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Reload nginx
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nginx
|
||||||
|
state: reloaded
|
||||||
3
roles/ntfy/meta/main.yml
Normal file
3
roles/ntfy/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: podman
|
||||||
109
roles/ntfy/tasks/main.yml
Normal file
109
roles/ntfy/tasks/main.yml
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
---
|
||||||
|
- name: Validate required passwords are set
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- ntfy_admin_password is defined
|
||||||
|
- ntfy_admin_password | length >= 12
|
||||||
|
fail_msg: |
|
||||||
|
ntfy_admin_password is required (min 12 chars).
|
||||||
|
See roles/ntfy/defaults/main.yml for configuration instructions.
|
||||||
|
success_msg: "Password validation passed"
|
||||||
|
|
||||||
|
- name: Create ntfy project directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create ntfy data directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
loop:
|
||||||
|
- "{{ ntfy_data_dir }}"
|
||||||
|
- "{{ ntfy_cache_dir }}"
|
||||||
|
|
||||||
|
- name: Deploy ntfy server configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: server.yml.j2
|
||||||
|
dest: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy/server.yml"
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Restart ntfy
|
||||||
|
|
||||||
|
- name: Deploy Kubernetes YAML for ntfy
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ntfy.yaml.j2
|
||||||
|
dest: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy/ntfy.yaml"
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Restart ntfy
|
||||||
|
|
||||||
|
- name: Create systemd service for ntfy
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: ntfy.service.j2
|
||||||
|
dest: /etc/systemd/system/ntfy.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: Reload systemd
|
||||||
|
|
||||||
|
- name: Enable and start ntfy service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: ntfy
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Wait for ntfy to be ready
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
port: "{{ ntfy_port }}"
|
||||||
|
host: 127.0.0.1
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Check if admin user already exists
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: podman exec ntfy-server ntfy user list
|
||||||
|
register: ntfy_user_list
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
|
||||||
|
- name: Create admin user in ntfy
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy-server ntfy user add --role=admin {{ ntfy_admin_user }}
|
||||||
|
when: ntfy_admin_user not in ntfy_user_list.stdout
|
||||||
|
register: ntfy_user_create
|
||||||
|
changed_when: ntfy_user_create.rc == 0
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
|
||||||
|
- name: Set admin user password
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy-server ntfy user change-pass {{ ntfy_admin_user }}
|
||||||
|
when: ntfy_admin_user in ntfy_user_list.stdout
|
||||||
|
changed_when: false
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
|
||||||
|
- name: Deploy nginx vhost configuration for ntfy
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx-vhost.conf.j2
|
||||||
|
dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/ntfy.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: ntfy_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
|
|
||||||
|
- name: Remove nginx vhost configuration for ntfy
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/ntfy.conf"
|
||||||
|
state: absent
|
||||||
|
when: not ntfy_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
60
roles/ntfy/templates/nginx-vhost.conf.j2
Normal file
60
roles/ntfy/templates/nginx-vhost.conf.j2
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Ntfy vhost with Let's Encrypt (Certbot)
|
||||||
|
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name {{ ntfy_nginx_hostname }};
|
||||||
|
|
||||||
|
# Certbot webroot for ACME challenges
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Redirect to HTTPS
|
||||||
|
location / {
|
||||||
|
return 301 https://$server_name$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name {{ ntfy_nginx_hostname }};
|
||||||
|
|
||||||
|
# Let's Encrypt certificates (managed by Certbot)
|
||||||
|
ssl_certificate /etc/letsencrypt/live/{{ ntfy_nginx_hostname }}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/{{ ntfy_nginx_hostname }}/privkey.pem;
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }};
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
|
{% if nginx_log_backend | default('journald') == 'journald' %}
|
||||||
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_ntfy;
|
||||||
|
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_ntfy;
|
||||||
|
{% else %}
|
||||||
|
access_log /var/log/nginx/{{ ntfy_nginx_hostname }}_access.log main;
|
||||||
|
error_log /var/log/nginx/{{ ntfy_nginx_hostname }}_error.log;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
client_max_body_size 20M;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:{{ ntfy_port }};
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# WebSocket and SSE support for ntfy
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
|
||||||
|
# Buffering must be off for SSE (Server-Sent Events)
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
# Timeouts for long-polling connections
|
||||||
|
proxy_read_timeout 86400s;
|
||||||
|
proxy_send_timeout 86400s;
|
||||||
|
}
|
||||||
|
}
|
||||||
18
roles/ntfy/templates/ntfy.service.j2
Normal file
18
roles/ntfy/templates/ntfy.service.j2
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Ntfy Notification Service
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
User={{ ansible_user }}
|
||||||
|
Group={{ ansible_user }}
|
||||||
|
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/ntfy
|
||||||
|
ExecStart=/usr/bin/podman play kube --replace ntfy.yaml
|
||||||
|
ExecStop=/usr/bin/podman play kube --down ntfy.yaml
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
57
roles/ntfy/templates/ntfy.yaml.j2
Normal file
57
roles/ntfy/templates/ntfy.yaml.j2
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: ntfy
|
||||||
|
labels:
|
||||||
|
app: ntfy
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: server
|
||||||
|
image: {{ ntfy_image }}:{{ ntfy_version }}
|
||||||
|
args:
|
||||||
|
- serve
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
hostPort: {{ ntfy_port }}
|
||||||
|
env:
|
||||||
|
- name: TZ
|
||||||
|
value: "{{ ntfy_timezone }}"
|
||||||
|
volumeMounts:
|
||||||
|
- name: localtime
|
||||||
|
mountPath: /etc/localtime
|
||||||
|
readOnly: true
|
||||||
|
- name: ntfy-config
|
||||||
|
mountPath: /etc/ntfy/server.yml
|
||||||
|
readOnly: true
|
||||||
|
- name: ntfy-cache
|
||||||
|
mountPath: /var/cache/ntfy
|
||||||
|
- name: ntfy-data
|
||||||
|
mountPath: /var/lib/ntfy
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /v1/health
|
||||||
|
port: 80
|
||||||
|
initialDelaySeconds: 40
|
||||||
|
periodSeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
failureThreshold: 3
|
||||||
|
restartPolicy: Always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: localtime
|
||||||
|
hostPath:
|
||||||
|
path: /etc/localtime
|
||||||
|
type: File
|
||||||
|
- name: ntfy-config
|
||||||
|
hostPath:
|
||||||
|
path: {{ podman_projects_dir | default('/opt/podman') }}/ntfy/server.yml
|
||||||
|
type: File
|
||||||
|
- name: ntfy-cache
|
||||||
|
hostPath:
|
||||||
|
path: {{ ntfy_cache_dir }}
|
||||||
|
type: Directory
|
||||||
|
- name: ntfy-data
|
||||||
|
hostPath:
|
||||||
|
path: {{ ntfy_data_dir }}
|
||||||
|
type: Directory
|
||||||
65
roles/ntfy/templates/server.yml.j2
Normal file
65
roles/ntfy/templates/server.yml.j2
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# Ntfy server configuration
|
||||||
|
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
# Public facing base URL of the service (e.g. https://ntfy.sh)
|
||||||
|
base-url: "{{ ntfy_base_url }}"
|
||||||
|
|
||||||
|
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
|
||||||
|
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
|
||||||
|
listen-http: ":80"
|
||||||
|
|
||||||
|
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
|
||||||
|
# This is optional and only required to support Android apps (which don't allow background
|
||||||
|
# tasks anymore). See https://ntfy.sh/docs/config/ for details.
|
||||||
|
# upstream-base-url: "https://ntfy.sh"
|
||||||
|
|
||||||
|
# Path to the private database file. If unset, the database is in memory.
|
||||||
|
cache-file: "/var/cache/ntfy/cache.db"
|
||||||
|
|
||||||
|
# Path to the attachment cache directory. Attachments are only stored if this is set.
|
||||||
|
attachment-cache-dir: "/var/cache/ntfy/attachments"
|
||||||
|
|
||||||
|
# If set, access tokens will be stored in this file. If unset, tokens are in-memory only.
|
||||||
|
auth-file: "/var/lib/ntfy/user.db"
|
||||||
|
|
||||||
|
# Default access level for new topics. Can be "read-write", "read-only", "write-only" or "deny-all".
|
||||||
|
# If "deny-all", no access is allowed by default and explicit ACLs must be configured.
|
||||||
|
auth-default-access: "deny-all"
|
||||||
|
|
||||||
|
# If enabled, allows users to sign up via the web app or API
|
||||||
|
enable-signup: {{ 'true' if ntfy_enable_signup else 'false' }}
|
||||||
|
|
||||||
|
# If enabled, allows users to log in via the web app or API
|
||||||
|
enable-login: {{ 'true' if ntfy_enable_login else 'false' }}
|
||||||
|
|
||||||
|
# If enabled, allows users to reserve topics via the web app or API (requires authentication)
|
||||||
|
enable-reservations: {{ 'true' if ntfy_enable_reservations else 'false' }}
|
||||||
|
|
||||||
|
# If set, the X-Forwarded-For header will be used to determine the visitor IP
|
||||||
|
behind-proxy: {{ 'true' if ntfy_behind_proxy else 'false' }}
|
||||||
|
|
||||||
|
# Interval in which keepalive messages are sent to the client. This is to prevent
|
||||||
|
# intermediaries from closing the connection for inactivity.
|
||||||
|
keepalive-interval: "45s"
|
||||||
|
|
||||||
|
# Interval in which the manager prunes old messages, deletes old attachments, and
|
||||||
|
# resets rate limiters. Note that these tasks are only executed if the interval has passed AND
|
||||||
|
# if there is traffic on the server.
|
||||||
|
manager-interval: "1m"
|
||||||
|
|
||||||
|
# Allowed origins for web app (CORS). Defaults to "*", which is fine for most cases.
|
||||||
|
# web-root: "/"
|
||||||
|
|
||||||
|
# Rate limiting: Number of requests allowed per visitor
|
||||||
|
visitor-request-limit-burst: 60
|
||||||
|
visitor-request-limit-replenish: "5s"
|
||||||
|
|
||||||
|
# Size limits
|
||||||
|
message-size-limit: "4096"
|
||||||
|
attachment-file-size-limit: "15M"
|
||||||
|
attachment-total-size-limit: "5G"
|
||||||
|
attachment-expiry-duration: "3h"
|
||||||
|
|
||||||
|
# Visitor limits
|
||||||
|
visitor-attachment-total-size-limit: "100M"
|
||||||
|
visitor-attachment-daily-bandwidth-limit: "500M"
|
||||||
@ -6,9 +6,8 @@ Installs and configures Podman for container management with support for Docker
|
|||||||
|
|
||||||
- Installs Podman, podman-compose, and crun (OCI runtime)
|
- Installs Podman, podman-compose, and crun (OCI runtime)
|
||||||
- Configurable logging backend (journald or k8s-file)
|
- Configurable logging backend (journald or k8s-file)
|
||||||
- External network creation for service isolation
|
|
||||||
- Container registry search configuration
|
- Container registry search configuration
|
||||||
- Shared projects directory for compose files
|
- Shared projects directory for Kubernetes YAML files
|
||||||
|
|
||||||
## Container Logging
|
## Container Logging
|
||||||
|
|
||||||
@ -22,19 +21,6 @@ Installs and configures Podman for container management with support for Docker
|
|||||||
|
|
||||||
Switch via `podman_log_driver` variable.
|
Switch via `podman_log_driver` variable.
|
||||||
|
|
||||||
## External Networks
|
|
||||||
|
|
||||||
Define networks in inventory for persistent, isolated container networks:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
podman_external_networks:
|
|
||||||
- name: immich
|
|
||||||
subnet: 172.20.0.0/16
|
|
||||||
gateway: 172.20.0.1
|
|
||||||
```
|
|
||||||
|
|
||||||
Networks persist across container restarts and compose rebuilds.
|
|
||||||
|
|
||||||
## Hands-on Commands
|
## Hands-on Commands
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -53,8 +39,14 @@ podman inspect <container> | jq '.[0].HostConfig.LogConfig'
|
|||||||
# Test configuration
|
# Test configuration
|
||||||
podman run --rm alpine echo "OK"
|
podman run --rm alpine echo "OK"
|
||||||
|
|
||||||
# List networks
|
# Play Kubernetes YAML
|
||||||
podman network ls
|
podman play kube --replace /path/to/pod.yaml
|
||||||
|
|
||||||
|
# Stop pod
|
||||||
|
podman play kube --down /path/to/pod.yaml
|
||||||
|
|
||||||
|
# List pods
|
||||||
|
podman pod ls
|
||||||
```
|
```
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|||||||
@ -7,14 +7,6 @@ podman_unqualified_search_registries:
|
|||||||
- quay.io
|
- quay.io
|
||||||
- ghcr.io
|
- ghcr.io
|
||||||
|
|
||||||
# Podman bridge network configuration
|
|
||||||
# Define the network where containers will operate
|
|
||||||
# Leave empty to use Podman's default dynamic network assignment
|
|
||||||
# Example: "10.89.0.0/24" if you want to explicitly set it
|
|
||||||
podman_subnet: ""
|
|
||||||
# Podman bridge gateway IP (typically .1 of the bridge network)
|
|
||||||
# Used by services that need to bind to the bridge interface
|
|
||||||
|
|
||||||
# OCI Runtime
|
# OCI Runtime
|
||||||
# crun (default, modern C runtime - fast) or runc (original Go runtime)
|
# crun (default, modern C runtime - fast) or runc (original Go runtime)
|
||||||
podman_runtime: crun
|
podman_runtime: crun
|
||||||
@ -26,14 +18,3 @@ podman_log_driver: journald
|
|||||||
# k8s-file driver settings (only used when podman_log_driver: k8s-file)
|
# k8s-file driver settings (only used when podman_log_driver: k8s-file)
|
||||||
podman_log_max_size: 10mb # Max size per log file before rotation
|
podman_log_max_size: 10mb # Max size per log file before rotation
|
||||||
podman_log_max_files: 5 # Max number of rotated log files to keep
|
podman_log_max_files: 5 # Max number of rotated log files to keep
|
||||||
|
|
||||||
# Each network should define: name, subnet, gateway
|
|
||||||
# podman_external_networks: []
|
|
||||||
# Example:
|
|
||||||
# podman_external_networks:
|
|
||||||
# - name: immich
|
|
||||||
# subnet: 172.20.0.0/16
|
|
||||||
# gateway: 172.20.0.1
|
|
||||||
# - name: nextcloud
|
|
||||||
# subnet: 172.21.0.0/16
|
|
||||||
# gateway: 172.21.0.1
|
|
||||||
|
|||||||
@ -7,6 +7,35 @@
|
|||||||
- crun
|
- crun
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
|
- name: Check if tun module is available
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "/lib/modules/{{ ansible_kernel }}/modules.builtin"
|
||||||
|
register: kernel_modules
|
||||||
|
|
||||||
|
- name: Load tun kernel module for rootless Podman networking
|
||||||
|
community.general.modprobe:
|
||||||
|
name: tun
|
||||||
|
state: present
|
||||||
|
when: kernel_modules.stat.exists
|
||||||
|
register: tun_loaded
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Ensure tun module loads on boot
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: "tun\n"
|
||||||
|
dest: /etc/modules-load.d/tun.conf
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Warn user about reboot requirement for tun module
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
WARNING: tun kernel module could not be loaded (kernel modules not available).
|
||||||
|
A REBOOT IS REQUIRED for the tun module to load and enable Pasta networking.
|
||||||
|
After reboot, rootless Podman containers will have better network performance.
|
||||||
|
when: not kernel_modules.stat.exists or (tun_loaded is defined and tun_loaded is failed)
|
||||||
|
|
||||||
- name: Enable Podman service
|
- name: Enable Podman service
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: podman.service
|
name: podman.service
|
||||||
@ -35,12 +64,3 @@
|
|||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
|
|
||||||
- name: Create external Podman networks
|
|
||||||
containers.podman.podman_network:
|
|
||||||
name: "{{ item.name }}"
|
|
||||||
subnet: "{{ item.subnet }}"
|
|
||||||
gateway: "{{ item.gateway }}"
|
|
||||||
state: present
|
|
||||||
loop: "{{ podman_external_networks | default([]) }}"
|
|
||||||
when: podman_external_networks is defined and podman_external_networks | length > 0
|
|
||||||
|
|||||||
@ -27,3 +27,7 @@ runtime = "{{ podman_runtime }}"
|
|||||||
|
|
||||||
# Default network backend
|
# Default network backend
|
||||||
network_backend = "netavark"
|
network_backend = "netavark"
|
||||||
|
|
||||||
|
[network]
|
||||||
|
# Default rootless network command (pasta for better performance)
|
||||||
|
default_rootless_network_cmd = "pasta"
|
||||||
|
|||||||
@ -22,16 +22,19 @@ See `CLAUDE.md` for detailed architecture documentation.
|
|||||||
|
|
||||||
## Container Access
|
## Container Access
|
||||||
|
|
||||||
For containers to reach PostgreSQL, configure in inventory:
|
For containers to reach PostgreSQL:
|
||||||
|
|
||||||
|
PostgreSQL binds to `127.0.0.1` by default (secure, localhost-only).
|
||||||
|
|
||||||
|
Containers can reach PostgreSQL via Pasta's `--map-host-loopback` feature, which routes container's `127.0.0.1` to the host's `127.0.0.1`.
|
||||||
|
|
||||||
|
In docker-compose files, use:
|
||||||
```yaml
|
```yaml
|
||||||
postgres_bind: "127.0.0.1,{{ podman_subnet_gateway }}"
|
extra_hosts:
|
||||||
postgres_firewall_allowed_sources:
|
- "postgres.local:127.0.0.1"
|
||||||
- 127.0.0.0/8
|
|
||||||
- "{{ podman_subnet }}"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Containers use `host.containers.internal` as hostname.
|
No additional bind addresses or firewall rules needed!
|
||||||
|
|
||||||
## Logging Backends
|
## Logging Backends
|
||||||
|
|
||||||
|
|||||||
110
roles/static-web/README.md
Normal file
110
roles/static-web/README.md
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
# static-web
|
||||||
|
|
||||||
|
Deploy static websites from Git repositories with Nginx.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Clone static sites from Git repositories
|
||||||
|
- Automatic Nginx vhost configuration
|
||||||
|
- HTTPS enabled by default with Let's Encrypt
|
||||||
|
- Support for build commands (npm, hugo, jekyll, etc.)
|
||||||
|
- Subdirectory serving (for built assets)
|
||||||
|
- Static file caching
|
||||||
|
- Security headers (including HSTS for HTTPS)
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- nginx role (automatically included via meta/main.yml)
|
||||||
|
|
||||||
|
## Variables
|
||||||
|
|
||||||
|
See [defaults/main.yml](defaults/main.yml)
|
||||||
|
|
||||||
|
**Main configuration:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
static_web_sites:
|
||||||
|
"portfolio.example.fr":
|
||||||
|
git_repo: "https://github.com/example/portfolio.git"
|
||||||
|
git_branch: "main" # Optional, defaults to main
|
||||||
|
git_depth: 1 # Optional, shallow clone
|
||||||
|
build_command: "npm install && npm run build" # Optional
|
||||||
|
root_dir: "dist" # Optional, serve subdirectory
|
||||||
|
ssl_enabled: true # Optional, defaults to true (HTTPS)
|
||||||
|
|
||||||
|
"blog.example.com":
|
||||||
|
git_repo: "https://github.com/example/blog.git"
|
||||||
|
# ssl_enabled defaults to true, set to false for HTTP only
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
**Inventory (host_vars or group_vars):**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
static_web_sites:
|
||||||
|
"portfolio.example.fr":
|
||||||
|
git_repo: "https://github.com/username/portfolio.git"
|
||||||
|
|
||||||
|
"docs.example.com":
|
||||||
|
git_repo: "https://github.com/company/documentation.git"
|
||||||
|
git_branch: "gh-pages"
|
||||||
|
root_dir: "_site"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Playbook:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- hosts: webservers
|
||||||
|
roles:
|
||||||
|
- static-web
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Structure
|
||||||
|
|
||||||
|
Sites are deployed to `/var/www/static/<hostname>/`
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
/var/www/static/
|
||||||
|
├── portfolio.example.fr/
|
||||||
|
│ └── index.html
|
||||||
|
└── blog.example.com/
|
||||||
|
├── _site/ # Built assets (if root_dir specified)
|
||||||
|
└── ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Examples
|
||||||
|
|
||||||
|
**Hugo site:**
|
||||||
|
```yaml
|
||||||
|
static_web_sites:
|
||||||
|
"blog.example.com":
|
||||||
|
git_repo: "https://github.com/example/hugo-blog.git"
|
||||||
|
build_command: "hugo --minify"
|
||||||
|
root_dir: "public"
|
||||||
|
```
|
||||||
|
|
||||||
|
**React app:**
|
||||||
|
```yaml
|
||||||
|
static_web_sites:
|
||||||
|
"app.example.com":
|
||||||
|
git_repo: "https://github.com/example/react-app.git"
|
||||||
|
build_command: "npm ci && npm run build"
|
||||||
|
root_dir: "build"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Updating Sites
|
||||||
|
|
||||||
|
Re-run the playbook to pull latest changes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i inventory playbook.yml --tags static-web
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Nginx configuration is deployed to `{{ nginx_conf_dir }}/<hostname>.conf`
|
||||||
|
- Sites are owned by nginx user (www-data on Debian, http on Arch)
|
||||||
|
- Git clones use shallow clone (depth=1) by default for efficiency
|
||||||
|
- Build commands run as nginx user
|
||||||
20
roles/static-web/defaults/main.yml
Normal file
20
roles/static-web/defaults/main.yml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
# Static web sites configuration
|
||||||
|
# Define sites as a dictionary with hostname as key
|
||||||
|
# Example:
|
||||||
|
# static_web_sites:
|
||||||
|
# "portfolio.example.fr":
|
||||||
|
# git_repo: "https://github.com/example/portfolio.git"
|
||||||
|
# git_branch: "main" # optional, defaults to main
|
||||||
|
# git_depth: 1 # optional, shallow clone depth
|
||||||
|
# build_command: "" # optional, command to run after git clone (e.g., npm build)
|
||||||
|
# root_dir: "" # optional, subdirectory to serve (e.g., "dist" or "build")
|
||||||
|
# ssl_enabled: true # optional, enable HTTPS with Let's Encrypt (default: true)
|
||||||
|
|
||||||
|
static_web_sites: {}
|
||||||
|
|
||||||
|
# Base directory for static web sites
|
||||||
|
static_web_base_dir: /var/www/static
|
||||||
|
|
||||||
|
# Nginx user (auto-detected from nginx role)
|
||||||
|
# static_web_nginx_user: www-data # Set by nginx role vars
|
||||||
5
roles/static-web/handlers/main.yml
Normal file
5
roles/static-web/handlers/main.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Reload nginx
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nginx
|
||||||
|
state: reloaded
|
||||||
3
roles/static-web/meta/main.yml
Normal file
3
roles/static-web/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: nginx
|
||||||
67
roles/static-web/tasks/main.yml
Normal file
67
roles/static-web/tasks/main.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
- name: Load OS-specific variables for nginx
|
||||||
|
ansible.builtin.include_vars: "{{ item }}"
|
||||||
|
with_first_found:
|
||||||
|
- "../../nginx/vars/{{ ansible_facts['os_family'] }}.yml"
|
||||||
|
- "../../nginx/vars/debian.yml"
|
||||||
|
|
||||||
|
- name: Install git
|
||||||
|
ansible.builtin.package:
|
||||||
|
name: git
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure static web base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ static_web_base_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create site directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ static_web_base_dir }}/{{ item.key }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ nginx_user }}"
|
||||||
|
group: "{{ nginx_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
loop: "{{ static_web_sites | dict2items }}"
|
||||||
|
when: static_web_sites | length > 0
|
||||||
|
|
||||||
|
- name: Clone or update git repositories
|
||||||
|
ansible.builtin.git:
|
||||||
|
repo: "{{ item.value.git_repo }}"
|
||||||
|
dest: "{{ static_web_base_dir }}/{{ item.key }}"
|
||||||
|
version: "{{ item.value.git_branch | default('main') }}"
|
||||||
|
depth: "{{ item.value.git_depth | default(1) }}"
|
||||||
|
force: true
|
||||||
|
loop: "{{ static_web_sites | dict2items }}"
|
||||||
|
when: static_web_sites | length > 0
|
||||||
|
become_user: "{{ nginx_user }}"
|
||||||
|
notify: Reload nginx
|
||||||
|
|
||||||
|
- name: Run build commands if specified
|
||||||
|
ansible.builtin.shell: "{{ item.value.build_command }}"
|
||||||
|
args:
|
||||||
|
chdir: "{{ static_web_base_dir }}/{{ item.key }}"
|
||||||
|
loop: "{{ static_web_sites | dict2items }}"
|
||||||
|
when:
|
||||||
|
- static_web_sites | length > 0
|
||||||
|
- item.value.build_command is defined
|
||||||
|
- item.value.build_command | length > 0
|
||||||
|
become_user: "{{ nginx_user }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Deploy nginx vhost configurations
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx-vhost.conf.j2
|
||||||
|
dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/{{ item.key }}.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
loop: "{{ static_web_sites | dict2items }}"
|
||||||
|
vars:
|
||||||
|
hostname: "{{ item.key }}"
|
||||||
|
site_config: "{{ item.value }}"
|
||||||
|
when: static_web_sites | length > 0
|
||||||
|
notify: Reload nginx
|
||||||
79
roles/static-web/templates/nginx-vhost.conf.j2
Normal file
79
roles/static-web/templates/nginx-vhost.conf.j2
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# Static web vhost for {{ hostname }}
|
||||||
|
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name {{ hostname }};
|
||||||
|
|
||||||
|
{% if site_config.ssl_enabled | default(true) %}
|
||||||
|
# Certbot webroot for ACME challenges
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Redirect to HTTPS
|
||||||
|
location / {
|
||||||
|
return 301 https://$server_name$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
listen [::]:443 ssl;
|
||||||
|
server_name {{ hostname }};
|
||||||
|
|
||||||
|
# Let's Encrypt certificates (managed by Certbot)
|
||||||
|
ssl_certificate /etc/letsencrypt/live/{{ hostname }}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/{{ hostname }}/privkey.pem;
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }};
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Document root
|
||||||
|
{% if site_config.root_dir is defined and site_config.root_dir | length > 0 %}
|
||||||
|
root {{ static_web_base_dir }}/{{ hostname }}/{{ site_config.root_dir }};
|
||||||
|
{% else %}
|
||||||
|
root {{ static_web_base_dir }}/{{ hostname }};
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Index files
|
||||||
|
index index.html index.htm;
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
{% if site_config.ssl_enabled | default(true) %}
|
||||||
|
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Logging
|
||||||
|
{% if nginx_log_backend | default('journald') == 'journald' %}
|
||||||
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_{{ hostname | replace('.', '_') | replace('-', '_') }};
|
||||||
|
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_{{ hostname | replace('.', '_') | replace('-', '_') }};
|
||||||
|
{% else %}
|
||||||
|
access_log /var/log/nginx/{{ hostname }}-access.log;
|
||||||
|
error_log /var/log/nginx/{{ hostname }}-error.log;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Main location
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ =404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deny access to hidden files
|
||||||
|
location ~ /\. {
|
||||||
|
deny all;
|
||||||
|
access_log off;
|
||||||
|
log_not_found off;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Static file caching
|
||||||
|
location ~* \.(jpg|jpeg|png|gif|ico|css|js|svg|woff|woff2|ttf|eot)$ {
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -165,6 +165,9 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
After=wg-quick@wg0.service
|
After=wg-quick@wg0.service
|
||||||
Requires=wg-quick@wg0.service
|
Requires=wg-quick@wg0.service
|
||||||
|
# Make Unbound part of network-online.target (provides DNS)
|
||||||
|
Before=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
notify: Reload systemd and restart unbound
|
notify: Reload systemd and restart unbound
|
||||||
|
|
||||||
- name: Enables unbound service
|
- name: Enables unbound service
|
||||||
|
|||||||
99
roles/uptime-kuma/README.md
Normal file
99
roles/uptime-kuma/README.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# uptime-kuma - Self-Hosted Monitoring Tool
|
||||||
|
|
||||||
|
Deploys [Uptime Kuma](https://uptime.kuma.pet/) - a self-hosted monitoring and status page application.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Website monitoring (HTTP/HTTPS)
|
||||||
|
- TCP port monitoring
|
||||||
|
- Ping monitoring
|
||||||
|
- DNS monitoring
|
||||||
|
- Status pages
|
||||||
|
- Notifications (Email, Discord, Slack, ntfy, etc.)
|
||||||
|
- Multi-language support
|
||||||
|
- Dark mode
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Optional Variables
|
||||||
|
|
||||||
|
See [defaults/main.yml](defaults/main.yml) for all configuration options.
|
||||||
|
|
||||||
|
Key settings:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
uptime_kuma_version: "2"
|
||||||
|
uptime_kuma_port: 3001
|
||||||
|
uptime_kuma_data_dir: "{{ podman_projects_dir }}/uptime-kuma/data"
|
||||||
|
|
||||||
|
# Nginx reverse proxy
|
||||||
|
uptime_kuma_nginx_enabled: false
|
||||||
|
uptime_kuma_nginx_hostname: uptime.nas.local
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storage Requirements
|
||||||
|
|
||||||
|
**CRITICAL:** Uptime Kuma uses SQLite and requires local storage with POSIX file lock support.
|
||||||
|
|
||||||
|
- ✅ **Supported:** Local filesystem, Docker volumes
|
||||||
|
- ❌ **NOT Supported:** NFS, network filesystems (will cause database corruption)
|
||||||
|
|
||||||
|
## First-Time Setup
|
||||||
|
|
||||||
|
1. Access the web UI: `https://uptime.nas.local` (if nginx enabled) or `http://localhost:3001`
|
||||||
|
2. Create admin account on first visit
|
||||||
|
3. No default credentials - account is created during initial setup
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Adding Monitors
|
||||||
|
|
||||||
|
Web UI → Add New Monitor:
|
||||||
|
- **Monitor Type:** HTTP(s), TCP Port, Ping, DNS, etc.
|
||||||
|
- **Friendly Name:** Display name
|
||||||
|
- **URL/Host:** Target to monitor
|
||||||
|
- **Heartbeat Interval:** Check frequency (seconds)
|
||||||
|
- **Retries:** Before marking as down
|
||||||
|
- **Notifications:** Select notification endpoints
|
||||||
|
|
||||||
|
### Notification Endpoints
|
||||||
|
|
||||||
|
Web UI → Settings → Notifications:
|
||||||
|
- Email (SMTP)
|
||||||
|
- Discord, Slack, Telegram
|
||||||
|
- ntfy (recommended for local notifications)
|
||||||
|
- Webhooks
|
||||||
|
- 50+ integrations available
|
||||||
|
|
||||||
|
### Status Pages
|
||||||
|
|
||||||
|
Create public or password-protected status pages showing monitor health.
|
||||||
|
|
||||||
|
Web UI → Status Pages → New Status Page
|
||||||
|
|
||||||
|
## Integration with ntfy
|
||||||
|
|
||||||
|
If you deployed the `ntfy` role:
|
||||||
|
|
||||||
|
1. Settings → Notifications → Add
|
||||||
|
2. Type: ntfy
|
||||||
|
3. ntfy Server URL: `https://ntfy.jokester.fr`
|
||||||
|
4. Topic: `uptime-alerts`
|
||||||
|
5. Username: `admin`
|
||||||
|
6. Password: Your ntfy admin password
|
||||||
|
7. Test notification
|
||||||
|
|
||||||
|
## File Locations
|
||||||
|
|
||||||
|
- Data directory: `{{ uptime_kuma_data_dir }}`
|
||||||
|
- SQLite database: `{{ uptime_kuma_data_dir }}/kuma.db`
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- podman
|
||||||
|
- nginx (if `uptime_kuma_nginx_enabled: true`)
|
||||||
|
|
||||||
|
## Sources
|
||||||
|
|
||||||
|
- [Install Uptime Kuma using Docker](https://uptimekuma.org/install-uptime-kuma-docker/)
|
||||||
|
- [Uptime Kuma GitHub Wiki](https://github.com/louislam/uptime-kuma/wiki)
|
||||||
20
roles/uptime-kuma/defaults/main.yml
Normal file
20
roles/uptime-kuma/defaults/main.yml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
# Uptime Kuma version to deploy
|
||||||
|
uptime_kuma_version: "2"
|
||||||
|
|
||||||
|
# Storage location (CRITICAL: Must be local storage, NOT NFS)
|
||||||
|
# NFS is NOT supported - will cause SQLite corruption
|
||||||
|
uptime_kuma_data_dir: "{{ podman_projects_dir }}/uptime-kuma/data"
|
||||||
|
|
||||||
|
# Network configuration
|
||||||
|
uptime_kuma_port: 3001
|
||||||
|
|
||||||
|
# Container image
|
||||||
|
uptime_kuma_image: louislam/uptime-kuma
|
||||||
|
|
||||||
|
# Timezone
|
||||||
|
uptime_kuma_timezone: UTC
|
||||||
|
|
||||||
|
# Nginx reverse proxy configuration
|
||||||
|
uptime_kuma_nginx_enabled: false
|
||||||
|
uptime_kuma_nginx_hostname: uptime.nas.local
|
||||||
15
roles/uptime-kuma/handlers/main.yml
Normal file
15
roles/uptime-kuma/handlers/main.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
- name: Reload systemd
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Restart uptime-kuma
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: uptime-kuma
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Reload nginx
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: nginx
|
||||||
|
state: reloaded
|
||||||
3
roles/uptime-kuma/meta/main.yml
Normal file
3
roles/uptime-kuma/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
dependencies:
|
||||||
|
- role: podman
|
||||||
58
roles/uptime-kuma/tasks/main.yml
Normal file
58
roles/uptime-kuma/tasks/main.yml
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- name: Create uptime-kuma project directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create uptime-kuma data directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ uptime_kuma_data_dir }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Deploy Kubernetes YAML for uptime-kuma
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: uptime-kuma.yaml.j2
|
||||||
|
dest: "{{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma/uptime-kuma.yaml"
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: "0644"
|
||||||
|
notify: Restart uptime-kuma
|
||||||
|
|
||||||
|
- name: Create systemd service for uptime-kuma
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: uptime-kuma.service.j2
|
||||||
|
dest: /etc/systemd/system/uptime-kuma.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
notify: Reload systemd
|
||||||
|
|
||||||
|
- name: Enable and start uptime-kuma service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: uptime-kuma
|
||||||
|
enabled: true
|
||||||
|
state: started
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Deploy nginx vhost configuration for uptime-kuma
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: nginx-vhost.conf.j2
|
||||||
|
dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/uptime-kuma.conf"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
when: uptime_kuma_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
|
|
||||||
|
- name: Remove nginx vhost configuration for uptime-kuma
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/uptime-kuma.conf"
|
||||||
|
state: absent
|
||||||
|
when: not uptime_kuma_nginx_enabled
|
||||||
|
notify: Reload nginx
|
||||||
13
roles/uptime-kuma/templates/docker-compose.yml.j2
Normal file
13
roles/uptime-kuma/templates/docker-compose.yml.j2
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
uptime-kuma:
|
||||||
|
container_name: uptime-kuma
|
||||||
|
image: {{ uptime_kuma_image }}:{{ uptime_kuma_version }}
|
||||||
|
volumes:
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
- {{ uptime_kuma_data_dir }}:/app/data:rw,Z
|
||||||
|
ports:
|
||||||
|
- "{{ uptime_kuma_port }}:3001"
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
TZ: {{ uptime_kuma_timezone }}
|
||||||
54
roles/uptime-kuma/templates/nginx-vhost.conf.j2
Normal file
54
roles/uptime-kuma/templates/nginx-vhost.conf.j2
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Uptime Kuma vhost with Let's Encrypt (Certbot)
|
||||||
|
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name {{ uptime_kuma_nginx_hostname }};
|
||||||
|
|
||||||
|
# Certbot webroot for ACME challenges
|
||||||
|
location /.well-known/acme-challenge/ {
|
||||||
|
root /var/www/certbot;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Redirect to HTTPS
|
||||||
|
location / {
|
||||||
|
return 301 https://$server_name$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
server_name {{ uptime_kuma_nginx_hostname }};
|
||||||
|
|
||||||
|
# Let's Encrypt certificates (managed by Certbot)
|
||||||
|
ssl_certificate /etc/letsencrypt/live/{{ uptime_kuma_nginx_hostname }}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/{{ uptime_kuma_nginx_hostname }}/privkey.pem;
|
||||||
|
|
||||||
|
# SSL configuration
|
||||||
|
ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }};
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
|
{% if nginx_log_backend | default('journald') == 'journald' %}
|
||||||
|
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma;
|
||||||
|
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma;
|
||||||
|
{% else %}
|
||||||
|
access_log /var/log/nginx/{{ uptime_kuma_nginx_hostname }}_access.log main;
|
||||||
|
error_log /var/log/nginx/{{ uptime_kuma_nginx_hostname }}_error.log;
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:{{ uptime_kuma_port }};
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
|
# WebSocket support for real-time updates
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
|
||||||
|
# Disable buffering for real-time updates
|
||||||
|
proxy_buffering off;
|
||||||
|
}
|
||||||
|
}
|
||||||
18
roles/uptime-kuma/templates/uptime-kuma.service.j2
Normal file
18
roles/uptime-kuma/templates/uptime-kuma.service.j2
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Uptime Kuma Monitoring Service
|
||||||
|
Requires=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
RemainAfterExit=true
|
||||||
|
User={{ ansible_user }}
|
||||||
|
Group={{ ansible_user }}
|
||||||
|
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma
|
||||||
|
ExecStart=/usr/bin/podman play kube --replace uptime-kuma.yaml
|
||||||
|
ExecStop=/usr/bin/podman play kube --down uptime-kuma.yaml
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
34
roles/uptime-kuma/templates/uptime-kuma.yaml.j2
Normal file
34
roles/uptime-kuma/templates/uptime-kuma.yaml.j2
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: uptime-kuma
|
||||||
|
labels:
|
||||||
|
app: uptime-kuma
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: server
|
||||||
|
image: {{ uptime_kuma_image }}:{{ uptime_kuma_version }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 3001
|
||||||
|
hostPort: {{ uptime_kuma_port }}
|
||||||
|
env:
|
||||||
|
- name: TZ
|
||||||
|
value: "{{ uptime_kuma_timezone }}"
|
||||||
|
volumeMounts:
|
||||||
|
- name: localtime
|
||||||
|
mountPath: /etc/localtime
|
||||||
|
readOnly: true
|
||||||
|
- name: uptime-kuma-data
|
||||||
|
mountPath: /app/data
|
||||||
|
restartPolicy: Always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- name: localtime
|
||||||
|
hostPath:
|
||||||
|
path: /etc/localtime
|
||||||
|
type: File
|
||||||
|
- name: uptime-kuma-data
|
||||||
|
hostPath:
|
||||||
|
path: {{ uptime_kuma_data_dir }}
|
||||||
|
type: Directory
|
||||||
@ -53,11 +53,18 @@ Service users must be registered via the `valkey_acl_users` list. See the ACL Co
|
|||||||
|
|
||||||
#### Container Access
|
#### Container Access
|
||||||
|
|
||||||
For containers to access Valkey, set `valkey_bind` to include the Podman gateway:
|
Valkey binds to `127.0.0.1` by default (secure, localhost-only).
|
||||||
|
|
||||||
|
Containers can reach Valkey via Pasta's `--map-host-loopback` feature, which routes container's `127.0.0.1` to the host's `127.0.0.1`.
|
||||||
|
|
||||||
|
In docker-compose files, use:
|
||||||
```yaml
|
```yaml
|
||||||
valkey_bind: "127.0.0.1 {{ podman_subnet_gateway }}"
|
extra_hosts:
|
||||||
|
- "host.containers.internal:127.0.0.1"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
No additional bind addresses needed!
|
||||||
|
|
||||||
**System Requirements:** This role automatically configures kernel parameters (`vm.overcommit_memory=1`) and transparent hugepage settings
|
**System Requirements:** This role automatically configures kernel parameters (`vm.overcommit_memory=1`) and transparent hugepage settings
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user