From 10f4eb5817675477d5c2c9d4173af1110188a949 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20D=C3=A9siles?= <1536672+cdesiles@users.noreply.github.com> Date: Sun, 21 Dec 2025 22:25:57 +0100 Subject: [PATCH] fix: podman connect --- roles/immich/README.md | 385 +----------------- roles/immich/defaults/main.yml | 12 +- roles/immich/tasks/main.yml | 24 +- roles/immich/templates/docker-compose.yml.j2 | 62 --- roles/immich/templates/immich.service.j2 | 6 +- roles/immich/templates/immich.yaml.j2 | 102 +++++ roles/immich/templates/nginx-vhost.conf.j2 | 6 +- roles/nginx/defaults/main.yml | 1 - roles/nginx/templates/nginx.conf.j2 | 2 +- roles/nginx/templates/vhost-acme.conf.j2 | 2 +- roles/ntfy/tasks/main.yml | 23 +- roles/ntfy/templates/docker-compose.yml.j2 | 23 -- roles/ntfy/templates/nginx-vhost.conf.j2 | 6 +- roles/ntfy/templates/ntfy.service.j2 | 6 +- roles/ntfy/templates/ntfy.yaml.j2 | 57 +++ roles/podman/README.md | 26 +- roles/podman/defaults/main.yml | 19 - roles/podman/tasks/main.yml | 38 +- roles/podman/templates/containers.conf.j2 | 4 + roles/uptime-kuma/tasks/main.yml | 12 +- .../uptime-kuma/templates/nginx-vhost.conf.j2 | 6 +- .../templates/uptime-kuma.service.j2 | 6 +- .../uptime-kuma/templates/uptime-kuma.yaml.j2 | 34 ++ 23 files changed, 291 insertions(+), 571 deletions(-) delete mode 100644 roles/immich/templates/docker-compose.yml.j2 create mode 100644 roles/immich/templates/immich.yaml.j2 delete mode 100644 roles/ntfy/templates/docker-compose.yml.j2 create mode 100644 roles/ntfy/templates/ntfy.yaml.j2 create mode 100644 roles/uptime-kuma/templates/uptime-kuma.yaml.j2 diff --git a/roles/immich/README.md b/roles/immich/README.md index cc40f55..61a351c 100644 --- a/roles/immich/README.md +++ b/roles/immich/README.md @@ -1,394 +1,21 @@ # Immich Role -This Ansible role deploys [Immich](https://immich.app/) - a high performance self-hosted photo and video management solution - using Podman with docker-compose files. - -## Requirements - -- Podman installed on the target system (handled by the `podman` role dependency) -- Podman compose support (`podman compose` command available) -- Sufficient disk space for photos/videos at the upload location +This Ansible role deploys [Immich](https://immich.app/) - a high performance self-hosted photo and video management solution - using Podman with k8s files. ## Role Variables See `defaults/main.yml` for all available variables and their default values. -### Key Configuration Requirements - -#### Required Passwords +### Required Passwords Both passwords must be set in your inventory (min 12 characters): - `immich_postgres_password` - PostgreSQL database password - `immich_valkey_password` - Valkey/Redis password -#### Valkey ACL Configuration - -**Important:** Immich requires a dedicated Valkey ACL user with specific permissions. This role provides the ACL configuration, but you must register it with the Valkey role. - -**Required Setup in Inventory:** - -Add the Immich user to your `valkey_acl_users` list in your inventory or host_vars: - -```yaml -# inventory/host_vars/yourserver.yml or group_vars/all.yml -valkey_acl_users: - - username: immich - password: "{{ immich_valkey_password }}" - keypattern: "immich_bull* immich_channel*" - commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha" -``` - -**ACL Breakdown:** -- `keypattern: "immich_bull* immich_channel*"` - Restricts access to BullMQ keys used by Immich -- `&*` - Allow all pub/sub channels (required for BullMQ job queues) -- `-@dangerous` - Deny dangerous commands (FLUSHDB, FLUSHALL, KEYS, etc.) -- `+@read +@write` - Allow read/write command groups -- `+@pubsub` - Allow pub/sub commands (SUBSCRIBE, PUBLISH, etc.) -- `+select` - Allow SELECT command (database switching) -- `+auth +ping +info` - Connection management commands -- `+eval +evalsha` - Lua scripting (required by BullMQ for atomic operations) - -**Based on:** [Immich GitHub Discussion #19727](https://github.com/immich-app/immich/discussions/19727#discussioncomment-13668749) - -**Security Benefits:** -- Immich cannot access keys from other services -- Cannot execute admin commands (FLUSHDB, CONFIG, etc.) -- Cannot view all keys (KEYS command denied) -- Defense-in-depth with ACL + key patterns + database numbers - -#### External Network Configuration - -Immich requires a dedicated external network to be defined in your inventory. Add this to your `host_vars` or `group_vars`: - -```yaml -podman_external_networks: - - name: immich - subnet: 172.20.0.0/16 - gateway: 172.20.0.1 -``` - -**How it works:** -1. Define the Immich network in `podman_external_networks` list in your inventory -2. The `podman` role (a dependency) creates the external network before Immich deployment -3. The Immich docker-compose file references this external network -4. The network persists across container restarts and compose stack rebuilds - -## Dependencies - -This role depends on: -- `podman` - Container runtime -- `postgres` - PostgreSQL database -- `valkey` - Redis-compatible cache (formerly Redis) - -**Note:** The Valkey role must be configured with the Immich ACL user (see Valkey Configuration section above) before running this role. - -## Example Playbook - -```yaml ---- -- hosts: servers - become: true - roles: - - role: podman - - role: immich - vars: - immich_postgres_password: "your-secure-postgres-password" - immich_valkey_password: "your-secure-valkey-password" - immich_upload_location: /mnt/storage/immich/upload - immich_timezone: America/New_York -``` - -**Complete Example with Valkey ACL:** - -In `inventory/host_vars/yourserver.yml`: - -```yaml -# Podman external networks -podman_external_networks: - - name: immich - subnet: 172.20.0.0/16 - gateway: 172.20.0.1 - -# Valkey admin password -valkey_admin_password: "your-valkey-admin-password" - -# Valkey ACL users - register all service users here -valkey_acl_users: - - username: immich - password: "{{ immich_valkey_password }}" - keypattern: "immich_bull* immich_channel*" - commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha" - # Add other services here as needed - -# Immich passwords -immich_postgres_password: "your-secure-postgres-password" -immich_valkey_password: "your-secure-valkey-password" -``` - -In your playbook: - -```yaml ---- -- hosts: servers - become: true - roles: - - role: valkey # Must run first to create ACL users - - role: postgres - - role: podman - - role: immich -``` - -## Architecture - -The role deploys Immich using Podman containers that connect to shared system services: - -**Immich Containers:** -1. **immich-server** - Main application server (exposed on configured port) -2. **immich-machine-learning** - ML service for facial recognition and object detection - -**Shared System Services:** -3. **PostgreSQL** - Database with vector extensions (from `postgres` role) -4. **Valkey** - Redis-compatible cache (from `valkey` role) - -### Container Networking - -Both Immich containers run on a **dedicated external Podman network** with its own CIDR block. The network is created by the `podman` role as an external network, referenced in the compose file: - -```yaml -networks: - immich: - external: true - name: immich -``` - -The actual network configuration (subnet: `172.20.0.0/16`, gateway: `172.20.0.1`) is handled by the podman role based on the `immich_network_*` variables. - -This provides: -- **Network isolation**: Separate subnet (defined in inventory, e.g., `172.20.0.0/16`) from other containers -- **Network persistence**: Network survives compose stack rebuilds and container recreation -- **Named bridge**: Explicit interface naming for the network -- **Container-to-container communication**: The server reaches the ML container via service name (`immich-machine-learning:3003`) using Docker/Podman internal DNS -- **Container-to-host communication**: Both containers can reach PostgreSQL and Valkey on the host via `host.containers.internal:{{ podman_subnet_gateway }}` - -**Key Points:** -- The network must be defined in your inventory via `podman_external_networks` -- The network is created by the `podman` role before Immich deployment (via role dependency) -- The Immich network has its own gateway (e.g., `172.20.0.1` as defined in inventory) -- `extra_hosts` maps `host.containers.internal` to the **Podman default bridge gateway** (e.g., `10.88.0.1`), not the Immich network gateway -- This allows containers to route to the host machine for PostgreSQL/Valkey access - -**Checking the network:** -```bash -# List all Podman networks -podman network ls - -# Inspect the Immich network -podman network inspect immich -``` - -### Data Isolation - -The role implements proper data isolation for both database backends: - -- **PostgreSQL**: Immich gets its own database (`immich`) and dedicated user (`immich`) with restricted privileges (NOSUPERUSER, NOCREATEDB, NOCREATEROLE) -- **Valkey**: Immich uses a dedicated ACL user (`immich`) with: - - Dedicated password (independent from `valkey_admin_password`) - - Key pattern restriction (`immich_bull*` and `immich_channel*` only) - - Command restrictions (no admin/dangerous operations like FLUSHDB, CONFIG) - - Database number isolation (uses DB 0 by default, configurable) - - Pub/sub channel access for BullMQ job queues - -**Security Benefits:** -- Each service has unique credentials -- Compromised service cannot access other services' data -- Cannot accidentally delete all data (FLUSHDB/FLUSHALL denied) -- Cannot view keys from other services (KEYS command denied) -- Defense-in-depth: ACL + key patterns + command restrictions + database numbers - -The compose file is deployed to `{{ podman_projects_dir }}/immich/docker-compose.yml` and managed via a systemd service. - -## Nginx Reverse Proxy with ACME/Let's Encrypt - -The role includes an Nginx vhost template with native ACME support for automatic HTTPS certificate management. - -**Prerequisites:** -1. Nginx role deployed with `acme_email` configured -2. Port 80/443 accessible from internet (for ACME HTTP-01 challenge) -3. DNS pointing to your server - -**Configuration:** -```yaml -# Enable Nginx reverse proxy -immich_nginx_enabled: true -immich_nginx_hostname: "blog.hello.com" - -# In nginx role configuration (host_vars or group_vars) -acme_email: "admin@blog.com" -``` - -**What it does:** -- Deploys HTTPS vhost with automatic Let's Encrypt certificate -- HTTP → HTTPS redirect -- Proxies to Immich container on localhost -- Handles WebSocket upgrades for live photos -- Large file upload support (50GB max) - -**ACME automatic features:** -- Certificate issuance on first deployment -- Automatic renewal -- HTTP-01 challenge handling - -## Post-Installation - -After deployment: - -1. Access Immich at: - - **With Nginx enabled**: `https://{{ immich_nginx_hostname }}` - - **Without Nginx**: `http://:{{ immich_port }}` -2. Create an admin account on first login -3. Configure mobile/desktop apps to point to your server - -## Management - -The role creates a systemd service for managing the compose stack: - -```bash -# Check status -systemctl status immich - -# Stop Immich -systemctl stop immich - -# Start Immich -systemctl start immich - -# Restart Immich -systemctl restart immich - -# View logs for all containers -cd /opt/podman/immich && podman compose logs -f - -# View logs for specific service -cd /opt/podman/immich && podman compose logs -f immich-server -``` - -### Manual Management - -You can also manage containers directly with podman compose: - -```bash -cd /opt/podman/immich - -# Start services -podman compose up -d - -# Stop services -podman compose down - -# Pull latest images -podman compose pull - -# Recreate containers -podman compose up -d --force-recreate -``` - -## Updating Immich - -To update to a newer version: - -1. Update the `immich_version` variable in your playbook or inventory -2. Re-run the Ansible playbook -3. The systemd service will restart with the new version - -Or manually: - -```bash -cd /opt/podman/immich -podman compose pull -systemctl restart immich -``` - -## Storage - -- **Upload location**: Stores all photos, videos, and thumbnails -- **Database location**: PostgreSQL data (not suitable for network shares) -- **Model cache**: ML models for facial recognition - -Ensure adequate disk space and regular backups of these directories. - -## Files Deployed - -- `{{ podman_projects_dir }}/immich/docker-compose.yml` - Compose definition -- `/etc/systemd/system/immich.service` - Systemd service unit - -## Security Considerations - -- **Set strong passwords** for both `immich_postgres_password` and `immich_valkey_password` (min 12 chars) -- **Use Ansible Vault** to encrypt passwords in production: - ```bash - ansible-vault encrypt_string 'your-password' --name 'immich_postgres_password' - ansible-vault encrypt_string 'your-password' --name 'immich_valkey_password' - ``` -- **Configure Valkey ACL** properly (see Valkey Configuration section) - do not use `+@all` -- Consider using a reverse proxy (nginx/traefik) for HTTPS -- Restrict access via firewall rules if needed -- Keep Immich updated by changing `immich_version` and redeploying - ## Troubleshooting -### Check service status -```bash -systemctl status immich -``` - -### View compose file -```bash -cat /opt/podman/immich/docker-compose.yml -``` - -### Check container status -```bash -cd /opt/podman/immich -podman compose ps -``` - -### View logs -```bash -cd /opt/podman/immich -podman compose logs -``` - ### Valkey ACL Issues -**Error: "NOPERM No permissions to access a channel"** -- The Valkey ACL is missing channel permissions -- Ensure `&*` or `+allchannels` is in the ACL commands -- Verify ACL is properly loaded: `valkey-cli ACL LIST` - -**Error: "NOAUTH Authentication required"** -- Check `immich_valkey_password` is set correctly -- Verify the password matches in both inventory ACL config and immich vars - -**Error: "WRONGPASS invalid username-password pair"** -- Ensure the Immich user is registered in `valkey_acl_users` -- Check the Valkey ACL file was deployed: `cat /etc/valkey/users.acl` -- Restart Valkey to reload ACL: `systemctl restart valkey` - -**Verify Valkey ACL Configuration:** -```bash -# Connect as admin -valkey-cli -AUTH default - -# List all ACL users -ACL LIST - -# Check specific user -ACL GETUSER immich - -# Monitor commands (useful for debugging permissions) -MONITOR -``` - **Test Immich user credentials:** ```bash valkey-cli @@ -402,10 +29,4 @@ FLUSHDB # Should return: (error) NOPERM ``` -## License - -MIT - -## Author Information - -Created for deploying Immich on NAS systems using Podman and docker-compose. +**Going further:** [Immich GitHub Discussion #19727](https://github.com/immich-app/immich/discussions/19727#discussioncomment-13668749) diff --git a/roles/immich/defaults/main.yml b/roles/immich/defaults/main.yml index 3e4808f..cdfbb0d 100644 --- a/roles/immich/defaults/main.yml +++ b/roles/immich/defaults/main.yml @@ -9,13 +9,13 @@ immich_upload_location: "{{ podman_projects_dir }}/immich/data/upload" immich_postgres_db_name: immich immich_postgres_user: immich # immich_postgres_password: "" # Intentionally undefined - role will fail if not set -immich_postgres_host: postgres.local +# immich_postgres_host: "" # Must be set in inventory (e.g., podman_gw_gateway) immich_postgres_port: 5432 # Valkey configuration (REQUIRED password - must be set explicitly) immich_valkey_user: immich # immich_valkey_password: "" # Intentionally undefined - role will fail if not set -immich_valkey_host: valkey.local +# immich_valkey_host: "" # Must be set in inventory (e.g., podman_gw_gateway) immich_valkey_port: 6379 immich_valkey_db: 0 # Dedicated database number for isolation (0-15) @@ -37,14 +37,6 @@ immich_valkey_acl: # Network configuration immich_port: 2283 -# External network configuration -# Define in inventory via podman_external_networks list -# Example: -# podman_external_networks: -# - name: immich -# subnet: 172.20.0.0/16 -# gateway: 172.20.0.1 - # Container images immich_server_image: ghcr.io/immich-app/immich-server immich_ml_image: ghcr.io/immich-app/immich-machine-learning diff --git a/roles/immich/tasks/main.yml b/roles/immich/tasks/main.yml index b2f9839..3193e34 100644 --- a/roles/immich/tasks/main.yml +++ b/roles/immich/tasks/main.yml @@ -16,14 +16,14 @@ name: "{{ immich_postgres_db_name }}" owner: "{{ immich_postgres_user }}" state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" - name: Create PostgreSQL user for Immich community.postgresql.postgresql_user: name: "{{ immich_postgres_user }}" password: "{{ immich_postgres_password }}" state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" - name: Grant all privileges on database to Immich user community.postgresql.postgresql_privs: @@ -32,21 +32,21 @@ type: database privs: ALL state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" - name: Ensure Immich user has no superuser privileges community.postgresql.postgresql_user: name: "{{ immich_postgres_user }}" role_attr_flags: NOSUPERUSER,NOCREATEDB,NOCREATEROLE state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" - name: Enable required PostgreSQL extensions in Immich database community.postgresql.postgresql_ext: name: "{{ item }}" login_db: "{{ immich_postgres_db_name }}" state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" loop: - cube - earthdistance @@ -60,11 +60,11 @@ objs: public privs: CREATE,USAGE state: present - become_user: "{{ postgres_admin_user }}" + become_user: "{{ postgres_admin_user | default('postgres') }}" - name: Create Immich project directory ansible.builtin.file: - path: "{{ podman_projects_dir }}/immich" + path: "{{ podman_projects_dir | default('/opt/podman') }}/immich" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" @@ -80,10 +80,10 @@ loop: - "{{ immich_upload_location }}" -- name: Deploy docker-compose.yml for Immich +- name: Deploy Kubernetes YAML for Immich ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ podman_projects_dir }}/immich/docker-compose.yml" + src: immich.yaml.j2 + dest: "{{ podman_projects_dir | default('/opt/podman') }}/immich/immich.yaml" owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: "0644" @@ -108,7 +108,7 @@ - name: Deploy nginx vhost configuration for Immich ansible.builtin.template: src: nginx-vhost.conf.j2 - dest: "{{ nginx_conf_dir }}/immich.conf" + dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/immich.conf" owner: root group: root mode: "0644" @@ -117,7 +117,7 @@ - name: Remove nginx vhost configuration for Immich ansible.builtin.file: - path: "{{ nginx_conf_dir }}/immich.conf" + path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/immich.conf" state: absent when: not immich_nginx_enabled notify: Reload nginx diff --git a/roles/immich/templates/docker-compose.yml.j2 b/roles/immich/templates/docker-compose.yml.j2 deleted file mode 100644 index 53a2b66..0000000 --- a/roles/immich/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,62 +0,0 @@ ---- -services: - immich-server: - container_name: immich_server - image: {{ immich_server_image }}:{{ immich_version }} - networks: - - databases - - immich - extra_hosts: - - "{{ immich_postgres_host }}:{{ podman_subnet_gateway }}" - - "{{ immich_valkey_host }}:{{ podman_subnet_gateway }}" - volumes: - - /etc/localtime:/etc/localtime:ro - - {{ immich_upload_location }}:/data:rw,Z - environment: - DB_HOSTNAME: {{ immich_postgres_host }} - DB_PORT: {{ immich_postgres_port }} - DB_USERNAME: {{ immich_postgres_user }} - DB_PASSWORD: {{ immich_postgres_password }} - DB_DATABASE_NAME: {{ immich_postgres_db_name }} - REDIS_HOSTNAME: {{ immich_valkey_host }} - REDIS_PORT: {{ immich_valkey_port }} - REDIS_USERNAME: {{ immich_valkey_user }} - REDIS_PASSWORD: {{ immich_valkey_password }} - REDIS_DBINDEX: {{ immich_valkey_db }} - IMMICH_MACHINE_LEARNING_URL: http://immich-machine-learning:3003 - UPLOAD_LOCATION: {{ immich_upload_location }} - TZ: {{ immich_timezone }} - ports: - - "{{ immich_port }}:2283" - restart: always - healthcheck: - test: ["CMD-SHELL", "curl -f http://localhost:2283/api/server/ping"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 60s - - immich-machine-learning: - container_name: immich_machine_learning - image: {{ immich_ml_image }}:{{ immich_version }} - networks: - - immich - volumes: - - model-cache:/cache - restart: always - healthcheck: - test: ["CMD", "python", "/usr/src/healthcheck.py"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 60s - -networks: - databases: - name: podman - external: true - immich: - driver: bridge - -volumes: - model-cache: diff --git a/roles/immich/templates/immich.service.j2 b/roles/immich/templates/immich.service.j2 index 0fc1006..38f598c 100644 --- a/roles/immich/templates/immich.service.j2 +++ b/roles/immich/templates/immich.service.j2 @@ -8,9 +8,9 @@ Type=oneshot RemainAfterExit=true User={{ ansible_user }} Group={{ ansible_user }} -WorkingDirectory={{ podman_projects_dir }}/immich -ExecStart=/usr/bin/podman-compose up -d -ExecStop=/usr/bin/podman-compose down +WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/immich +ExecStart=/usr/bin/podman play kube --replace immich.yaml +ExecStop=/usr/bin/podman play kube --down immich.yaml Restart=on-failure RestartSec=10 diff --git a/roles/immich/templates/immich.yaml.j2 b/roles/immich/templates/immich.yaml.j2 new file mode 100644 index 0000000..4fed5a7 --- /dev/null +++ b/roles/immich/templates/immich.yaml.j2 @@ -0,0 +1,102 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: immich + labels: + app: immich + annotations: + io.podman.annotations.network.mode: bridge + io.podman.annotations.network.name: podman-gw +spec: + containers: + - name: server + image: {{ immich_server_image }}:{{ immich_version }} + ports: + - containerPort: 2283 + hostPort: {{ immich_port }} + env: + - name: DB_HOSTNAME + value: "{{ immich_postgres_host }}" + - name: DB_PORT + value: "{{ immich_postgres_port }}" + - name: DB_USERNAME + value: "{{ immich_postgres_user }}" + - name: DB_PASSWORD + value: "{{ immich_postgres_password }}" + - name: DB_DATABASE_NAME + value: "{{ immich_postgres_db_name }}" + - name: REDIS_HOSTNAME + value: "{{ immich_valkey_host }}" + - name: REDIS_PORT + value: "{{ immich_valkey_port }}" + - name: REDIS_USERNAME + value: "{{ immich_valkey_user }}" + - name: REDIS_PASSWORD + value: "{{ immich_valkey_password }}" + - name: REDIS_DBINDEX + value: "{{ immich_valkey_db }}" + - name: IMMICH_MACHINE_LEARNING_URL + value: http://localhost:3003 + - name: UPLOAD_LOCATION + value: /data + - name: TZ + value: "{{ immich_timezone }}" + volumeMounts: + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: immich-data + mountPath: /data + livenessProbe: + httpGet: + path: /api/server/ping + port: 2283 + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + restartPolicy: Always + + - name: machine-learning + image: {{ immich_ml_image }}:{{ immich_version }} + env: + - name: TZ + value: "{{ immich_timezone }}" + volumeMounts: + - name: model-cache + mountPath: /cache + livenessProbe: + exec: + command: + - python + - /usr/src/healthcheck.py + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + restartPolicy: Always + + volumes: + - name: localtime + hostPath: + path: /etc/localtime + type: File + - name: immich-data + hostPath: + path: {{ immich_upload_location }} + type: Directory + - name: model-cache + persistentVolumeClaim: + claimName: immich-model-cache +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: immich-model-cache +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi diff --git a/roles/immich/templates/nginx-vhost.conf.j2 b/roles/immich/templates/nginx-vhost.conf.j2 index 7f6acfe..f9ad6ac 100644 --- a/roles/immich/templates/nginx-vhost.conf.j2 +++ b/roles/immich/templates/nginx-vhost.conf.j2 @@ -25,10 +25,10 @@ server { ssl_certificate_key /etc/letsencrypt/live/{{ immich_nginx_hostname }}/privkey.pem; # SSL configuration - ssl_protocols {{ nginx_ssl_protocols }}; - ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }}; + ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }}; + ssl_prefer_server_ciphers on; -{% if nginx_log_backend == 'journald' %} +{% if nginx_log_backend | default('journald') == 'journald' %} access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich; error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_immich; {% else %} diff --git a/roles/nginx/defaults/main.yml b/roles/nginx/defaults/main.yml index 3274c33..c41cddd 100644 --- a/roles/nginx/defaults/main.yml +++ b/roles/nginx/defaults/main.yml @@ -16,7 +16,6 @@ nginx_client_max_body_size: 100M # SSL configuration (volontarily omit TLSv1.2 here) nginx_ssl_protocols: TLSv1.3 -nginx_ssl_prefer_server_ciphers: true # Logging configuration # Backend: 'file' (traditional /var/log/nginx/*.log) or 'journald' (systemd journal) diff --git a/roles/nginx/templates/nginx.conf.j2 b/roles/nginx/templates/nginx.conf.j2 index c982165..5bc716b 100644 --- a/roles/nginx/templates/nginx.conf.j2 +++ b/roles/nginx/templates/nginx.conf.j2 @@ -48,7 +48,7 @@ http { # SSL configuration ssl_protocols {{ nginx_ssl_protocols }}; - ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }}; + ssl_prefer_server_ciphers on; # Load modular configuration files from the conf.d directory include {{ nginx_conf_dir }}/*.conf; diff --git a/roles/nginx/templates/vhost-acme.conf.j2 b/roles/nginx/templates/vhost-acme.conf.j2 index d1615c9..4a8d426 100644 --- a/roles/nginx/templates/vhost-acme.conf.j2 +++ b/roles/nginx/templates/vhost-acme.conf.j2 @@ -26,7 +26,7 @@ server { # SSL configuration ssl_protocols {{ nginx_ssl_protocols }}; - ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }}; + ssl_prefer_server_ciphers on; {% if nginx_log_backend == 'journald' %} access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_{{ server_name | replace('.', '_') }}; diff --git a/roles/ntfy/tasks/main.yml b/roles/ntfy/tasks/main.yml index 8e5e0ee..8cf64d1 100644 --- a/roles/ntfy/tasks/main.yml +++ b/roles/ntfy/tasks/main.yml @@ -11,7 +11,7 @@ - name: Create ntfy project directory ansible.builtin.file: - path: "{{ podman_projects_dir }}/ntfy" + path: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" @@ -31,16 +31,16 @@ - name: Deploy ntfy server configuration ansible.builtin.template: src: server.yml.j2 - dest: "{{ podman_projects_dir }}/ntfy/server.yml" + dest: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy/server.yml" owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: "0644" notify: Restart ntfy -- name: Deploy docker-compose.yml for ntfy +- name: Deploy Kubernetes YAML for ntfy ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ podman_projects_dir }}/ntfy/docker-compose.yml" + src: ntfy.yaml.j2 + dest: "{{ podman_projects_dir | default('/opt/podman') }}/ntfy/ntfy.yaml" owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: "0644" @@ -70,28 +70,31 @@ - name: Check if admin user already exists ansible.builtin.command: - cmd: podman exec ntfy ntfy user list + cmd: podman exec ntfy-server ntfy user list register: ntfy_user_list changed_when: false failed_when: false + become_user: "{{ ansible_user }}" - name: Create admin user in ntfy ansible.builtin.shell: | - printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy ntfy user add --role=admin {{ ntfy_admin_user }} + printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy-server ntfy user add --role=admin {{ ntfy_admin_user }} when: ntfy_admin_user not in ntfy_user_list.stdout register: ntfy_user_create changed_when: ntfy_user_create.rc == 0 + become_user: "{{ ansible_user }}" - name: Set admin user password ansible.builtin.shell: | - printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy ntfy user change-pass {{ ntfy_admin_user }} + printf '%s\n%s\n' '{{ ntfy_admin_password }}' '{{ ntfy_admin_password }}' | podman exec -i ntfy-server ntfy user change-pass {{ ntfy_admin_user }} when: ntfy_admin_user in ntfy_user_list.stdout changed_when: false + become_user: "{{ ansible_user }}" - name: Deploy nginx vhost configuration for ntfy ansible.builtin.template: src: nginx-vhost.conf.j2 - dest: "{{ nginx_conf_dir }}/ntfy.conf" + dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/ntfy.conf" owner: root group: root mode: "0644" @@ -100,7 +103,7 @@ - name: Remove nginx vhost configuration for ntfy ansible.builtin.file: - path: "{{ nginx_conf_dir }}/ntfy.conf" + path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/ntfy.conf" state: absent when: not ntfy_nginx_enabled notify: Reload nginx diff --git a/roles/ntfy/templates/docker-compose.yml.j2 b/roles/ntfy/templates/docker-compose.yml.j2 deleted file mode 100644 index 75e37fa..0000000 --- a/roles/ntfy/templates/docker-compose.yml.j2 +++ /dev/null @@ -1,23 +0,0 @@ ---- -services: - ntfy: - container_name: ntfy - image: {{ ntfy_image }}:{{ ntfy_version }} - command: - - serve - volumes: - - /etc/localtime:/etc/localtime:ro - - {{ podman_projects_dir }}/ntfy/server.yml:/etc/ntfy/server.yml:ro - - {{ ntfy_cache_dir }}:/var/cache/ntfy:rw,Z - - {{ ntfy_data_dir }}:/var/lib/ntfy:rw,Z - ports: - - "{{ ntfy_port }}:80" - restart: always - healthcheck: - test: ["CMD-SHELL", "wget -q --tries=1 http://localhost:80/v1/health -O - | grep -Eo '\"healthy\"\\s*:\\s*true' || exit 1"] - interval: 30s - timeout: 10s - retries: 3 - start_period: 40s - environment: - TZ: {{ ntfy_timezone }} diff --git a/roles/ntfy/templates/nginx-vhost.conf.j2 b/roles/ntfy/templates/nginx-vhost.conf.j2 index 5e09b6c..e5bef40 100644 --- a/roles/ntfy/templates/nginx-vhost.conf.j2 +++ b/roles/ntfy/templates/nginx-vhost.conf.j2 @@ -25,10 +25,10 @@ server { ssl_certificate_key /etc/letsencrypt/live/{{ ntfy_nginx_hostname }}/privkey.pem; # SSL configuration - ssl_protocols {{ nginx_ssl_protocols }}; - ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }}; + ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }}; + ssl_prefer_server_ciphers on; -{% if nginx_log_backend == 'journald' %} +{% if nginx_log_backend | default('journald') == 'journald' %} access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_ntfy; error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_ntfy; {% else %} diff --git a/roles/ntfy/templates/ntfy.service.j2 b/roles/ntfy/templates/ntfy.service.j2 index 6072e2e..dec3d39 100644 --- a/roles/ntfy/templates/ntfy.service.j2 +++ b/roles/ntfy/templates/ntfy.service.j2 @@ -8,9 +8,9 @@ Type=oneshot RemainAfterExit=true User={{ ansible_user }} Group={{ ansible_user }} -WorkingDirectory={{ podman_projects_dir }}/ntfy -ExecStart=/usr/bin/podman-compose up -d -ExecStop=/usr/bin/podman-compose down +WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/ntfy +ExecStart=/usr/bin/podman play kube --replace ntfy.yaml +ExecStop=/usr/bin/podman play kube --down ntfy.yaml Restart=on-failure RestartSec=10 diff --git a/roles/ntfy/templates/ntfy.yaml.j2 b/roles/ntfy/templates/ntfy.yaml.j2 new file mode 100644 index 0000000..94e38ce --- /dev/null +++ b/roles/ntfy/templates/ntfy.yaml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: ntfy + labels: + app: ntfy +spec: + containers: + - name: server + image: {{ ntfy_image }}:{{ ntfy_version }} + args: + - serve + ports: + - containerPort: 80 + hostPort: {{ ntfy_port }} + env: + - name: TZ + value: "{{ ntfy_timezone }}" + volumeMounts: + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: ntfy-config + mountPath: /etc/ntfy/server.yml + readOnly: true + - name: ntfy-cache + mountPath: /var/cache/ntfy + - name: ntfy-data + mountPath: /var/lib/ntfy + livenessProbe: + httpGet: + path: /v1/health + port: 80 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + restartPolicy: Always + + volumes: + - name: localtime + hostPath: + path: /etc/localtime + type: File + - name: ntfy-config + hostPath: + path: {{ podman_projects_dir | default('/opt/podman') }}/ntfy/server.yml + type: File + - name: ntfy-cache + hostPath: + path: {{ ntfy_cache_dir }} + type: Directory + - name: ntfy-data + hostPath: + path: {{ ntfy_data_dir }} + type: Directory diff --git a/roles/podman/README.md b/roles/podman/README.md index f301158..41aeae7 100644 --- a/roles/podman/README.md +++ b/roles/podman/README.md @@ -6,9 +6,8 @@ Installs and configures Podman for container management with support for Docker - Installs Podman, podman-compose, and crun (OCI runtime) - Configurable logging backend (journald or k8s-file) -- External network creation for service isolation - Container registry search configuration -- Shared projects directory for compose files +- Shared projects directory for Kubernetes YAML files ## Container Logging @@ -22,19 +21,6 @@ Installs and configures Podman for container management with support for Docker Switch via `podman_log_driver` variable. -## External Networks - -Define networks in inventory for persistent, isolated container networks: - -```yaml -podman_external_networks: - - name: immich - subnet: 172.20.0.0/16 - gateway: 172.20.0.1 -``` - -Networks persist across container restarts and compose rebuilds. - ## Hands-on Commands ```bash @@ -53,8 +39,14 @@ podman inspect | jq '.[0].HostConfig.LogConfig' # Test configuration podman run --rm alpine echo "OK" -# List networks -podman network ls +# Play Kubernetes YAML +podman play kube --replace /path/to/pod.yaml + +# Stop pod +podman play kube --down /path/to/pod.yaml + +# List pods +podman pod ls ``` ## References diff --git a/roles/podman/defaults/main.yml b/roles/podman/defaults/main.yml index 7e1718a..8b46050 100644 --- a/roles/podman/defaults/main.yml +++ b/roles/podman/defaults/main.yml @@ -7,14 +7,6 @@ podman_unqualified_search_registries: - quay.io - ghcr.io -# Podman bridge network configuration -# Define the network where containers will operate -# Leave empty to use Podman's default dynamic network assignment -# Example: "10.89.0.0/24" if you want to explicitly set it -podman_subnet: "" -# Podman bridge gateway IP (typically .1 of the bridge network) -# Used by services that need to bind to the bridge interface - # OCI Runtime # crun (default, modern C runtime - fast) or runc (original Go runtime) podman_runtime: crun @@ -26,14 +18,3 @@ podman_log_driver: journald # k8s-file driver settings (only used when podman_log_driver: k8s-file) podman_log_max_size: 10mb # Max size per log file before rotation podman_log_max_files: 5 # Max number of rotated log files to keep - -# Each network should define: name, subnet, gateway -# podman_external_networks: [] -# Example: -# podman_external_networks: -# - name: immich -# subnet: 172.20.0.0/16 -# gateway: 172.20.0.1 -# - name: nextcloud -# subnet: 172.21.0.0/16 -# gateway: 172.21.0.1 diff --git a/roles/podman/tasks/main.yml b/roles/podman/tasks/main.yml index 3bcf7a6..ca34f50 100644 --- a/roles/podman/tasks/main.yml +++ b/roles/podman/tasks/main.yml @@ -7,6 +7,35 @@ - crun state: present +- name: Check if tun module is available + ansible.builtin.stat: + path: "/lib/modules/{{ ansible_kernel }}/modules.builtin" + register: kernel_modules + +- name: Load tun kernel module for rootless Podman networking + community.general.modprobe: + name: tun + state: present + when: kernel_modules.stat.exists + register: tun_loaded + ignore_errors: true + +- name: Ensure tun module loads on boot + ansible.builtin.copy: + content: "tun\n" + dest: /etc/modules-load.d/tun.conf + owner: root + group: root + mode: "0644" + +- name: Warn user about reboot requirement for tun module + ansible.builtin.debug: + msg: | + WARNING: tun kernel module could not be loaded (kernel modules not available). + A REBOOT IS REQUIRED for the tun module to load and enable Pasta networking. + After reboot, rootless Podman containers will have better network performance. + when: not kernel_modules.stat.exists or (tun_loaded is defined and tun_loaded is failed) + - name: Enable Podman service ansible.builtin.systemd: name: podman.service @@ -35,12 +64,3 @@ owner: root group: root mode: "0644" - -- name: Create external Podman networks - containers.podman.podman_network: - name: "{{ item.name }}" - subnet: "{{ item.subnet }}" - gateway: "{{ item.gateway }}" - state: present - loop: "{{ podman_external_networks | default([]) }}" - when: podman_external_networks is defined and podman_external_networks | length > 0 diff --git a/roles/podman/templates/containers.conf.j2 b/roles/podman/templates/containers.conf.j2 index ffd4141..8fa0270 100644 --- a/roles/podman/templates/containers.conf.j2 +++ b/roles/podman/templates/containers.conf.j2 @@ -27,3 +27,7 @@ runtime = "{{ podman_runtime }}" # Default network backend network_backend = "netavark" + +[network] +# Default rootless network command (pasta for better performance) +default_rootless_network_cmd = "pasta" diff --git a/roles/uptime-kuma/tasks/main.yml b/roles/uptime-kuma/tasks/main.yml index c78089c..d481caa 100644 --- a/roles/uptime-kuma/tasks/main.yml +++ b/roles/uptime-kuma/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Create uptime-kuma project directory ansible.builtin.file: - path: "{{ podman_projects_dir }}/uptime-kuma" + path: "{{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" @@ -15,10 +15,10 @@ group: "{{ ansible_user }}" mode: "0755" -- name: Deploy docker-compose.yml for uptime-kuma +- name: Deploy Kubernetes YAML for uptime-kuma ansible.builtin.template: - src: docker-compose.yml.j2 - dest: "{{ podman_projects_dir }}/uptime-kuma/docker-compose.yml" + src: uptime-kuma.yaml.j2 + dest: "{{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma/uptime-kuma.yaml" owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: "0644" @@ -43,7 +43,7 @@ - name: Deploy nginx vhost configuration for uptime-kuma ansible.builtin.template: src: nginx-vhost.conf.j2 - dest: "{{ nginx_conf_dir }}/uptime-kuma.conf" + dest: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/uptime-kuma.conf" owner: root group: root mode: "0644" @@ -52,7 +52,7 @@ - name: Remove nginx vhost configuration for uptime-kuma ansible.builtin.file: - path: "{{ nginx_conf_dir }}/uptime-kuma.conf" + path: "{{ nginx_conf_dir | default('/etc/nginx/conf.d') }}/uptime-kuma.conf" state: absent when: not uptime_kuma_nginx_enabled notify: Reload nginx diff --git a/roles/uptime-kuma/templates/nginx-vhost.conf.j2 b/roles/uptime-kuma/templates/nginx-vhost.conf.j2 index 95ac72b..4b5c13e 100644 --- a/roles/uptime-kuma/templates/nginx-vhost.conf.j2 +++ b/roles/uptime-kuma/templates/nginx-vhost.conf.j2 @@ -25,10 +25,10 @@ server { ssl_certificate_key /etc/letsencrypt/live/{{ uptime_kuma_nginx_hostname }}/privkey.pem; # SSL configuration - ssl_protocols {{ nginx_ssl_protocols }}; - ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }}; + ssl_protocols {{ nginx_ssl_protocols | default('TLSv1.3') }}; + ssl_prefer_server_ciphers on; -{% if nginx_log_backend == 'journald' %} +{% if nginx_log_backend | default('journald') == 'journald' %} access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma; error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma; {% else %} diff --git a/roles/uptime-kuma/templates/uptime-kuma.service.j2 b/roles/uptime-kuma/templates/uptime-kuma.service.j2 index 46d8fce..9f53399 100644 --- a/roles/uptime-kuma/templates/uptime-kuma.service.j2 +++ b/roles/uptime-kuma/templates/uptime-kuma.service.j2 @@ -8,9 +8,9 @@ Type=oneshot RemainAfterExit=true User={{ ansible_user }} Group={{ ansible_user }} -WorkingDirectory={{ podman_projects_dir }}/uptime-kuma -ExecStart=/usr/bin/podman-compose up -d -ExecStop=/usr/bin/podman-compose down +WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma +ExecStart=/usr/bin/podman play kube --replace uptime-kuma.yaml +ExecStop=/usr/bin/podman play kube --down uptime-kuma.yaml Restart=on-failure RestartSec=10 diff --git a/roles/uptime-kuma/templates/uptime-kuma.yaml.j2 b/roles/uptime-kuma/templates/uptime-kuma.yaml.j2 new file mode 100644 index 0000000..191bb7b --- /dev/null +++ b/roles/uptime-kuma/templates/uptime-kuma.yaml.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: uptime-kuma + labels: + app: uptime-kuma +spec: + containers: + - name: server + image: {{ uptime_kuma_image }}:{{ uptime_kuma_version }} + ports: + - containerPort: 3001 + hostPort: {{ uptime_kuma_port }} + env: + - name: TZ + value: "{{ uptime_kuma_timezone }}" + volumeMounts: + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: uptime-kuma-data + mountPath: /app/data + restartPolicy: Always + + volumes: + - name: localtime + hostPath: + path: /etc/localtime + type: File + - name: uptime-kuma-data + hostPath: + path: {{ uptime_kuma_data_dir }} + type: Directory