fix: user systemd

This commit is contained in:
Clément Désiles 2025-12-23 09:08:43 +01:00
parent 1cdad04a93
commit 229f9f6b5d
No known key found for this signature in database
21 changed files with 299 additions and 162 deletions

1
.gitignore vendored
View File

@ -4,6 +4,7 @@ inventory/*
inventory/host_vars/*
!inventory/host_vars/example.yml
inventory_data/
playbook.yml
playbooks/*
!playbooks/example.yml
TODO.md

View File

@ -3,11 +3,15 @@
ansible.builtin.systemd:
daemon_reload: true
- name: Reload systemd user
ansible.builtin.command: "systemctl --user daemon-reload"
become: true
become_user: "{{ ansible_user }}"
- name: Restart gitea
ansible.builtin.systemd:
name: gitea
state: restarted
daemon_reload: true
ansible.builtin.command: "systemctl --user restart gitea.service"
become: true
become_user: "{{ ansible_user }}"
- name: Reload nginx
ansible.builtin.systemd:

View File

@ -82,21 +82,39 @@
mode: "0644"
notify: Restart gitea
- name: Create systemd service for Gitea
- name: Get home directory for {{ ansible_user }}
ansible.builtin.getent:
database: passwd
key: "{{ ansible_user }}"
- name: Set user home directory fact
ansible.builtin.set_fact:
user_home_dir: "{{ getent_passwd[ansible_user][4] }}"
- name: Create systemd user directory for Gitea
ansible.builtin.file:
path: "{{ user_home_dir }}/.config/systemd/user"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
- name: Create systemd service for Gitea (user scope)
ansible.builtin.template:
src: gitea.service.j2
dest: /etc/systemd/system/gitea.service
owner: root
group: root
dest: "{{ user_home_dir }}/.config/systemd/user/gitea.service"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
notify: Reload systemd
notify: Reload systemd user
- name: Enable and start Gitea service
ansible.builtin.systemd:
name: gitea
enabled: true
state: started
daemon_reload: true
- name: Enable lingering for user {{ ansible_user }}
ansible.builtin.command: "loginctl enable-linger {{ ansible_user }}"
when: ansible_user != 'root'
- name: Enable and start Gitea service (user scope)
ansible.builtin.command: "systemctl --user enable --now gitea.service"
become_user: "{{ ansible_user }}"
- name: Deploy nginx vhost configuration for Gitea
ansible.builtin.template:

View File

@ -1,13 +1,9 @@
[Unit]
Description=Gitea Git Service
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=true
User={{ ansible_user }}
Group={{ ansible_user }}
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/gitea
ExecStart=/usr/bin/podman play kube --replace gitea.yaml
ExecStop=/usr/bin/podman play kube --down gitea.yaml
@ -15,4 +11,4 @@ Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
WantedBy=default.target

View File

@ -9,6 +9,7 @@ See `defaults/main.yml` for all available variables and their default values.
### Required Passwords
Both passwords must be set in your inventory (min 12 characters):
- `immich_postgres_password` - PostgreSQL database password
- `immich_valkey_password` - Valkey/Redis password
@ -17,6 +18,7 @@ Both passwords must be set in your inventory (min 12 characters):
### Valkey ACL Issues
**Test Immich user credentials:**
```bash
valkey-cli
AUTH immich <immich_valkey_password>

View File

@ -3,11 +3,15 @@
ansible.builtin.systemd:
daemon_reload: true
- name: Reload systemd user
ansible.builtin.command: "systemctl --user daemon-reload"
become: true
become_user: "{{ ansible_user }}"
- name: Restart Immich
ansible.builtin.systemd:
name: immich
state: restarted
daemon_reload: true
ansible.builtin.command: "systemctl --user restart immich.service"
become: true
become_user: "{{ ansible_user }}"
- name: Reload nginx
ansible.builtin.systemd:

View File

@ -89,21 +89,39 @@
mode: "0644"
notify: Restart Immich
- name: Create systemd service for Immich
- name: Get home directory for {{ ansible_user }}
ansible.builtin.getent:
database: passwd
key: "{{ ansible_user }}"
- name: Set user home directory fact
ansible.builtin.set_fact:
user_home_dir: "{{ getent_passwd[ansible_user][4] }}"
- name: Create systemd user directory for Immich
ansible.builtin.file:
path: "{{ user_home_dir }}/.config/systemd/user"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
- name: Create systemd service for Immich (user scope)
ansible.builtin.template:
src: immich.service.j2
dest: /etc/systemd/system/immich.service
owner: root
group: root
dest: "{{ user_home_dir }}/.config/systemd/user/immich.service"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
notify: Reload systemd
notify: Reload systemd user
- name: Enable and start Immich service
ansible.builtin.systemd:
name: immich
enabled: true
state: started
daemon_reload: true
- name: Enable lingering for user {{ ansible_user }}
ansible.builtin.command: "loginctl enable-linger {{ ansible_user }}"
when: ansible_user != 'root'
- name: Enable and start Immich service (user scope)
ansible.builtin.command: "systemctl --user enable --now immich.service"
become_user: "{{ ansible_user }}"
- name: Deploy nginx vhost configuration for Immich
ansible.builtin.template:

View File

@ -1,13 +1,9 @@
[Unit]
Description=Immich Media Server
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=true
User={{ ansible_user }}
Group={{ ansible_user }}
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/immich
ExecStart=/usr/bin/podman play kube --replace immich.yaml
ExecStop=/usr/bin/podman play kube --down immich.yaml
@ -15,4 +11,4 @@ Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
WantedBy=default.target

View File

@ -17,19 +17,20 @@ Installs and configures Nginx as a reverse proxy for web applications with modul
Each service role should deploy its own vhost config:
**In service role tasks:**
```yaml
- name: Deploy nginx vhost
ansible.builtin.template:
src: nginx-vhost.conf.j2
dest: /etc/nginx/conf.d/myservice.conf
validate: nginx -t
src: nginx-vhost.conf.j2
dest: /etc/nginx/conf.d/myservice.conf
validate: nginx -t
when: myservice_nginx_enabled
notify: Reload nginx
- name: Remove nginx vhost when disabled
ansible.builtin.file:
path: /etc/nginx/conf.d/myservice.conf
state: absent
path: /etc/nginx/conf.d/myservice.conf
state: absent
when: not myservice_nginx_enabled
notify: Reload nginx
```
@ -39,15 +40,17 @@ Each service role should deploy its own vhost config:
Forward TCP traffic from this Nginx instance to services on other hosts using the `stream` module (layer 4 proxy).
**Configuration:**
```yaml
nginx_forwarder:
"blog.hello.com":
forward_to: "my.host.lan"
http: true # Forward port 80 (default: true)
https: true # Forward port 443 (default: true)
"blog.hello.com":
forward_to: "my.host.lan"
http: true # Forward port 80 (default: true)
https: true # Forward port 443 (default: true)
```
**How it works:**
- **Stream-based TCP proxy** (layer 4, not HTTP layer 7)
- No protocol inspection - just forwards raw TCP packets
- **HTTPS passes through encrypted** - backend host handles TLS termination
@ -56,6 +59,7 @@ nginx_forwarder:
**Use case:** Omega (gateway) forwards all traffic to Andromeda (internal server) that handles its own TLS certificates.
**Important notes:**
- Stream configs deployed to `/etc/nginx/streams.d/`
- No HTTP logging (stream doesn't understand HTTP protocol)
- No X-Forwarded-For headers (transparent TCP forwarding)
@ -64,10 +68,12 @@ nginx_forwarder:
## Logging Backends
**journald (default):**
- Logs sent to systemd journal via syslog
- View: `journalctl -u nginx -f`
**file:**
- Traditional `/var/log/nginx/*.log` files
- Automatic logrotate configuration

View File

@ -5,6 +5,7 @@ Deploys [ntfy](https://ntfy.sh/) - a simple HTTP-based pub-sub notification serv
## Security Model
**Secure by default:**
- `auth-default-access: deny-all` - No anonymous access
- `enable-signup: false` - No public registration
- `enable-login: true` - Authentication required
@ -19,7 +20,7 @@ All notifications require authentication to send or receive.
Set in inventory or vault:
```yaml
ntfy_admin_password: "your-secure-password-here" # Min 12 chars
ntfy_admin_password: "your-secure-password-here" # Min 12 chars
```
### Optional Variables
@ -44,21 +45,25 @@ ntfy_nginx_hostname: ntfy.nas.local
### Managing Users
List users:
```bash
podman exec ntfy ntfy user list
```
Add user:
```bash
podman exec ntfy ntfy user add <username>
```
Change password:
```bash
podman exec -i ntfy ntfy user change-pass <username>
```
Remove user:
```bash
podman exec ntfy ntfy user remove <username>
```
@ -66,6 +71,7 @@ podman exec ntfy ntfy user remove <username>
### Managing Topic Access
Grant access to topic:
```bash
podman exec ntfy ntfy access <username> <topic> <permission>
```
@ -73,6 +79,7 @@ podman exec ntfy ntfy access <username> <topic> <permission>
Permissions: `read-write`, `read-only`, `write-only`, `deny`
Example:
```bash
# Allow user to publish and subscribe to "alerts" topic
podman exec ntfy ntfy access alice alerts read-write
@ -82,6 +89,7 @@ podman exec ntfy ntfy access bob monitoring write-only
```
List access control:
```bash
podman exec ntfy ntfy access
```
@ -89,11 +97,13 @@ podman exec ntfy ntfy access
### Publishing Notifications
Using curl with authentication:
```bash
curl -u admin:password -d "Backup completed" http://localhost:8080/backups
```
Using ntfy CLI:
```bash
ntfy publish --token <access-token> ntfy.nas.local mytopic "Hello World"
```
@ -103,6 +113,7 @@ ntfy publish --token <access-token> ntfy.nas.local mytopic "Hello World"
Web UI: https://ntfy.nas.local (if nginx enabled)
CLI:
```bash
ntfy subscribe --token <access-token> ntfy.nas.local mytopic
```

View File

@ -23,9 +23,9 @@ ntfy_timezone: UTC
# Server configuration
ntfy_base_url: http://localhost:{{ ntfy_port }}
ntfy_behind_proxy: false
ntfy_enable_signup: false # Disable public signup for security
ntfy_enable_login: true # Enable authentication
ntfy_enable_reservations: true # Only authenticated users can reserve topics
ntfy_enable_signup: false # Disable public signup for security
ntfy_enable_login: true # Enable authentication
ntfy_enable_reservations: true # Only authenticated users can reserve topics
# Nginx reverse proxy configuration
ntfy_nginx_enabled: false

View File

@ -3,11 +3,15 @@
ansible.builtin.systemd:
daemon_reload: true
- name: Reload systemd user
ansible.builtin.command: "systemctl --user daemon-reload"
become: true
become_user: "{{ ansible_user }}"
- name: Restart ntfy
ansible.builtin.systemd:
name: ntfy
state: restarted
daemon_reload: true
ansible.builtin.command: "systemctl --user restart ntfy.service"
become: true
become_user: "{{ ansible_user }}"
- name: Reload nginx
ansible.builtin.systemd:

View File

@ -46,21 +46,39 @@
mode: "0644"
notify: Restart ntfy
- name: Create systemd service for ntfy
- name: Get home directory for {{ ansible_user }}
ansible.builtin.getent:
database: passwd
key: "{{ ansible_user }}"
- name: Set user home directory fact
ansible.builtin.set_fact:
user_home_dir: "{{ getent_passwd[ansible_user][4] }}"
- name: Create systemd user directory for ntfy
ansible.builtin.file:
path: "{{ user_home_dir }}/.config/systemd/user"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
- name: Create systemd service for ntfy (user scope)
ansible.builtin.template:
src: ntfy.service.j2
dest: /etc/systemd/system/ntfy.service
owner: root
group: root
dest: "{{ user_home_dir }}/.config/systemd/user/ntfy.service"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
notify: Reload systemd
notify: Reload systemd user
- name: Enable and start ntfy service
ansible.builtin.systemd:
name: ntfy
enabled: true
state: started
daemon_reload: true
- name: Enable lingering for user {{ ansible_user }}
ansible.builtin.command: "loginctl enable-linger {{ ansible_user }}"
when: ansible_user != 'root'
- name: Enable and start ntfy service (user scope)
ansible.builtin.command: "systemctl --user enable --now ntfy.service"
become_user: "{{ ansible_user }}"
- name: Wait for ntfy to be ready
ansible.builtin.wait_for:

View File

@ -1,13 +1,9 @@
[Unit]
Description=Ntfy Notification Service
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=true
User={{ ansible_user }}
Group={{ ansible_user }}
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/ntfy
ExecStart=/usr/bin/podman play kube --replace ntfy.yaml
ExecStop=/usr/bin/podman play kube --down ntfy.yaml
@ -15,4 +11,4 @@ Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
WantedBy=default.target

View File

@ -12,10 +12,12 @@ Installs and configures Podman for container management with support for Docker
## Container Logging
**journald (default):**
- Logs sent to systemd journal
- View: `journalctl CONTAINER_NAME=<name> -f`
**k8s-file:**
- Logs stored as JSON files with automatic rotation
- Configured via `podman_log_max_size` and `podman_log_max_files`
@ -53,4 +55,4 @@ podman pod ls
- [Podman Documentation](https://docs.podman.io/)
- [Podman Logging](https://docs.podman.io/en/latest/markdown/podman-run.1.html#log-driver-driver)
- [containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)
- [containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)

View File

@ -14,6 +14,7 @@ Installs and configures PostgreSQL as a shared database service for multiple app
## Architecture Pattern
**Decentralized database management:**
- PostgreSQL role: Installs and configures the server
- Service roles: Create their own databases/users (e.g., immich, nextcloud)
- Isolation: Each service user can only access their own database
@ -29,9 +30,10 @@ PostgreSQL binds to `127.0.0.1` by default (secure, localhost-only).
Containers can reach PostgreSQL via Pasta's `--map-host-loopback` feature, which routes container's `127.0.0.1` to the host's `127.0.0.1`.
In docker-compose files, use:
```yaml
extra_hosts:
- "postgres.local:127.0.0.1"
- "postgres.local:127.0.0.1"
```
No additional bind addresses or firewall rules needed!
@ -39,10 +41,12 @@ No additional bind addresses or firewall rules needed!
## Logging Backends
**journald (default):**
- Logs via stderr → systemd journal
- View: `journalctl -u postgresql -f`
**file:**
- Logs to data directory or `/var/log/postgresql/`
- Automatic logrotate configuration
@ -86,4 +90,4 @@ sudo -u postgres psql -c "SHOW effective_cache_size;"
- [PostgreSQL Documentation](https://www.postgresql.org/docs/current/)
- [PostgreSQL Logging](https://www.postgresql.org/docs/current/runtime-config-logging.html)
- [PostgreSQL Performance Tuning](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server)
- [pg_hba.conf Documentation](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html)
- [pg_hba.conf Documentation](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html)

View File

@ -49,6 +49,7 @@ uptime_kuma_nginx_hostname: uptime.nas.local
### Adding Monitors
Web UI → Add New Monitor:
- **Monitor Type:** HTTP(s), TCP Port, Ping, DNS, etc.
- **Friendly Name:** Display name
- **URL/Host:** Target to monitor
@ -59,6 +60,7 @@ Web UI → Add New Monitor:
### Notification Endpoints
Web UI → Settings → Notifications:
- Email (SMTP)
- Discord, Slack, Telegram
- ntfy (recommended for local notifications)

View File

@ -3,11 +3,15 @@
ansible.builtin.systemd:
daemon_reload: true
- name: Reload systemd user
ansible.builtin.command: "systemctl --user daemon-reload"
become: true
become_user: "{{ ansible_user }}"
- name: Restart uptime-kuma
ansible.builtin.systemd:
name: uptime-kuma
state: restarted
daemon_reload: true
ansible.builtin.command: "systemctl --user restart uptime-kuma.service"
become: true
become_user: "{{ ansible_user }}"
- name: Reload nginx
ansible.builtin.systemd:

View File

@ -24,21 +24,39 @@
mode: "0644"
notify: Restart uptime-kuma
- name: Create systemd service for uptime-kuma
- name: Get home directory for {{ ansible_user }}
ansible.builtin.getent:
database: passwd
key: "{{ ansible_user }}"
- name: Set user home directory fact
ansible.builtin.set_fact:
user_home_dir: "{{ getent_passwd[ansible_user][4] }}"
- name: Create systemd user directory for uptime-kuma
ansible.builtin.file:
path: "{{ user_home_dir }}/.config/systemd/user"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
- name: Create systemd service for uptime-kuma (user scope)
ansible.builtin.template:
src: uptime-kuma.service.j2
dest: /etc/systemd/system/uptime-kuma.service
owner: root
group: root
dest: "{{ user_home_dir }}/.config/systemd/user/uptime-kuma.service"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
notify: Reload systemd
notify: Reload systemd user
- name: Enable and start uptime-kuma service
ansible.builtin.systemd:
name: uptime-kuma
enabled: true
state: started
daemon_reload: true
- name: Enable lingering for user {{ ansible_user }}
ansible.builtin.command: "loginctl enable-linger {{ ansible_user }}"
when: ansible_user != 'root'
- name: Enable and start uptime-kuma service (user scope)
ansible.builtin.command: "systemctl --user enable --now uptime-kuma.service"
become_user: "{{ ansible_user }}"
- name: Deploy nginx vhost configuration for uptime-kuma
ansible.builtin.template:

View File

@ -1,13 +1,9 @@
[Unit]
Description=Uptime Kuma Monitoring Service
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
RemainAfterExit=true
User={{ ansible_user }}
Group={{ ansible_user }}
WorkingDirectory={{ podman_projects_dir | default('/opt/podman') }}/uptime-kuma
ExecStart=/usr/bin/podman play kube --replace uptime-kuma.yaml
ExecStop=/usr/bin/podman play kube --down uptime-kuma.yaml
@ -15,4 +11,4 @@ Restart=on-failure
RestartSec=10
[Install]
WantedBy=multi-user.target
WantedBy=default.target

View File

@ -9,6 +9,7 @@ The role also performs required kernel tuning for optimal Valkey performance.
Valkey is a high-performance key/value datastore and a drop-in replacement for Redis. It was created as a community-driven fork after Redis changed its license from BSD to proprietary licenses (RSALv2 and SSPLv1) in March 2024.
**Key points:**
- Valkey is 100% API-compatible with Redis
- Backed by the Linux Foundation
- Uses permissive open-source license (BSD 3-Clause)
@ -16,6 +17,7 @@ Valkey is a high-performance key/value datastore and a drop-in replacement for R
- Same commands, same protocol, same performance
**Distribution support:**
- **Arch Linux**: Installs Valkey (redis package replaced in April 2024)
- **Debian/Ubuntu**: Installs Valkey from official repositories
@ -58,9 +60,10 @@ Valkey binds to `127.0.0.1` by default (secure, localhost-only).
Containers can reach Valkey via Pasta's `--map-host-loopback` feature, which routes container's `127.0.0.1` to the host's `127.0.0.1`.
In docker-compose files, use:
```yaml
extra_hosts:
- "host.containers.internal:127.0.0.1"
- "host.containers.internal:127.0.0.1"
```
No additional bind addresses needed!
@ -78,8 +81,8 @@ None.
- hosts: servers
become: true
roles:
- role: valkey
- role: immich # Will connect to system Valkey
- role: valkey
- role: immich # Will connect to system Valkey
```
### Custom Configuration with ACL Users
@ -89,25 +92,26 @@ None.
- hosts: servers
become: true
roles:
- role: valkey
vars:
valkey_admin_password: "{{ vault_valkey_password }}"
valkey_maxmemory: 512mb
valkey_maxmemory_policy: volatile-lru
valkey_acl_users:
- username: immich
password: "{{ immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
- username: nextcloud
password: "{{ nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info"
- role: valkey
vars:
valkey_admin_password: "{{ vault_valkey_password }}"
valkey_maxmemory: 512mb
valkey_maxmemory_policy: volatile-lru
valkey_acl_users:
- username: immich
password: "{{ immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
- username: nextcloud
password: "{{ nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info"
```
## How Services Connect
Services running on the same host can connect to Valkey at:
- **Host**: `localhost` or `127.0.0.1`
- **Port**: `6379` (default)
@ -116,6 +120,7 @@ Services running on the same host can connect to Valkey at:
Containers need special handling to reach the host's Valkey:
**Use `host.containers.internal`:**
```yaml
REDIS_HOSTNAME: host.containers.internal
REDIS_PORT: 6379
@ -135,6 +140,7 @@ This special DNS name resolves to the host machine from inside containers.
### ACL-Based Authentication
This role uses Valkey's ACL (Access Control List) system for fine-grained security. Each service gets:
- **Dedicated credentials**: Unique username and password
- **Key pattern restrictions**: Can only access specific key patterns
- **Command restrictions**: Limited to required commands only
@ -149,25 +155,26 @@ Define ACL users in your inventory or host_vars:
valkey_admin_password: "your-strong-admin-password"
valkey_acl_users:
- username: immich
password: "{{ immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
- username: nextcloud
password: "{{ nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info"
- username: gitea
password: "{{ gitea_valkey_password }}"
keypattern: "gitea*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
- username: immich
password: "{{ immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
- username: nextcloud
password: "{{ nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info"
- username: gitea
password: "{{ gitea_valkey_password }}"
keypattern: "gitea*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
```
### ACL Configuration Guide
**Key Pattern (`keypattern`):**
- Single pattern: `"myservice*"` - matches keys starting with `myservice`
- Multiple patterns: `"pattern1* pattern2*"` - space-separated (automatically converted to `~pattern1* ~pattern2*` in ACL file)
- All keys: `"*"` - not recommended for security
@ -179,17 +186,21 @@ valkey_acl_users:
The role automatically configures kernel parameters required by Valkey (see `tasks/kernel-tuning.yml`):
**1. Memory Overcommit:**
```
vm.overcommit_memory = 1
```
- Required for background saves and replication
- Configured via `/etc/sysctl.conf`
- Applied immediately and persists across reboots
**2. Transparent Huge Pages (THP):**
```
transparent_hugepage=madvise
```
- Reduces latency and memory usage issues
- Safely appended to existing GRUB kernel parameters (does not overwrite)
- Only adds parameter if `transparent_hugepage=` is not already present
@ -202,6 +213,7 @@ These settings are required to eliminate Valkey startup warnings and ensure opti
**Note:** The role preserves existing GRUB parameters. If you have `GRUB_CMDLINE_LINUX_DEFAULT="loglevel=3 quiet"`, it will become `GRUB_CMDLINE_LINUX_DEFAULT="loglevel=3 quiet transparent_hugepage=madvise"`.
**Commands (`commands`):**
- `&*` - Allow all pub/sub channels (required for job queues like BullMQ)
- `+allchannels` - Alternative to `&*`
- `+@read` - Allow all read commands (GET, MGET, etc.)
@ -213,14 +225,15 @@ These settings are required to eliminate Valkey startup warnings and ensure opti
**Common Command Sets:**
| Service Type | Recommended Commands |
|-------------|---------------------|
| **Simple cache** | `+@read +@write -@dangerous +auth +ping +info` |
| **Session store** | `+@read +@write -@dangerous +auth +ping +info +select` |
| Service Type | Recommended Commands |
| ---------------------- | --------------------------------------------------------------------------------- |
| **Simple cache** | `+@read +@write -@dangerous +auth +ping +info` |
| **Session store** | `+@read +@write -@dangerous +auth +ping +info +select` |
| **Job queue (BullMQ)** | `&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha` |
| **Pub/sub** | `+@pubsub +@read +@write -@dangerous +auth +ping +info` |
| **Pub/sub** | `+@pubsub +@read +@write -@dangerous +auth +ping +info` |
**Security Best Practices:**
- Always include `-@dangerous` to prevent accidental data loss
- Use specific key patterns to isolate services
- Only grant `+eval` and `+evalsha` if required (job queues)
@ -244,12 +257,12 @@ Add encrypted values to your inventory:
```yaml
valkey_admin_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
...
$ANSIBLE_VAULT;1.1;AES256
...
immich_valkey_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
...
$ANSIBLE_VAULT;1.1;AES256
...
```
## Service Management
@ -275,6 +288,7 @@ valkey-cli # Also available on Arch Linux
## Persistence
Valkey is configured with RDB persistence:
- Save after 900 seconds if at least 1 key changed
- Save after 300 seconds if at least 10 keys changed
- Save after 60 seconds if at least 10000 keys changed
@ -293,6 +307,7 @@ When `valkey_maxmemory` is reached, Valkey will behave based on `valkey_maxmemor
**Important for Immich and BullMQ:**
Services using BullMQ for job queues (like Immich) require `noeviction` policy. Evicting job queue data can cause:
- Lost background tasks
- Failed job processing
- Data corruption
@ -302,6 +317,7 @@ Only use eviction policies (`allkeys-lru`, etc.) for pure caching use cases wher
## Monitoring
Check Valkey info (authenticate as admin):
```bash
redis-cli
AUTH default <valkey_admin_password>
@ -311,6 +327,7 @@ INFO stats
```
Check connected clients:
```bash
redis-cli
AUTH default <valkey_admin_password>
@ -318,6 +335,7 @@ CLIENT LIST
```
View ACL configuration:
```bash
redis-cli
AUTH default <valkey_admin_password>
@ -328,6 +346,7 @@ ACL CAT # List all command categories
```
Check generated ACL file:
```bash
cat /etc/valkey/users.acl
# Example output:
@ -339,12 +358,14 @@ cat /etc/valkey/users.acl
## Troubleshooting
### Check if Valkey is running
```bash
systemctl status valkey # Arch Linux
systemctl status valkey-server # Debian/Ubuntu
```
### Test admin connection
```bash
# With authentication (default user)
redis-cli
@ -354,6 +375,7 @@ PING
```
### Test service user connection
```bash
# Test Immich user
redis-cli
@ -368,6 +390,7 @@ FLUSHDB
```
### View ACL configuration
```bash
# Check ACL file
cat /etc/valkey/users.acl
@ -380,6 +403,7 @@ ACL GETUSER immich
```
### Debug permission issues
```bash
# Monitor all commands (useful for debugging)
redis-cli
@ -390,6 +414,7 @@ MONITOR
```
### View configuration
```bash
redis-cli
AUTH default <valkey_admin_password>
@ -397,6 +422,7 @@ CONFIG GET "*"
```
### Check memory usage
```bash
redis-cli
AUTH default <valkey_admin_password>
@ -406,25 +432,30 @@ INFO memory
### Common ACL Errors
**"NOAUTH Authentication required"**
- Client didn't authenticate
- Service needs to set `REDIS_USERNAME` and `REDIS_PASSWORD`
**"WRONGPASS invalid username-password pair"**
- Incorrect username or password
- Verify ACL user exists: `ACL GETUSER username`
- Check password in inventory matches service configuration
**"NOPERM No permissions to run the 'command' command"**
- Command not allowed in ACL
- Check ACL: `ACL GETUSER username`
- Add required command to `commands:` in inventory
**"NOPERM No permissions to access a key"**
- Key doesn't match allowed patterns
- Check key pattern: `ACL GETUSER username`
- Verify service is using correct key prefix
**"NOPERM No permissions to access a channel"**
- Pub/sub channel not allowed
- Add `&*` or `+allchannels` to ACL commands
- Required for BullMQ and other job queues
@ -434,18 +465,20 @@ INFO memory
For high-traffic services, consider:
```yaml
valkey_maxmemory: 1gb # Increase memory limit
valkey_maxmemory_policy: noeviction # No eviction (for job queues)
valkey_maxmemory: 1gb # Increase memory limit
valkey_maxmemory_policy: noeviction # No eviction (for job queues)
# Or for pure caching:
# valkey_maxmemory_policy: allkeys-lru # LRU eviction
```
**Kernel Tuning (automatically configured):**
The role automatically sets optimal kernel parameters:
- Memory overcommit enabled (`vm.overcommit_memory=1`)
- Transparent Huge Pages set to `madvise`
To verify kernel settings:
```bash
# Check memory overcommit
sysctl vm.overcommit_memory
@ -469,23 +502,27 @@ Created for managing shared Valkey instances in NAS/homelab environments.
This role implements **defense-in-depth** with three isolation layers:
### 1. ACL Users (Primary Isolation)
Each service gets its own user with restricted permissions:
- Unique credentials
- Key pattern restrictions
- Command restrictions
### 2. Database Numbers (Secondary Isolation)
Valkey provides 16 logical databases (0-15) for additional isolation:
| Service | Database | Key Pattern | ACL User |
|---------|----------|-------------|----------|
| Immich | 0 | `immich_bull*` `immich_channel*` | `immich` |
| Nextcloud | 1 | `nextcloud*` | `nextcloud` |
| Gitea | 2 | `gitea*` | `gitea` |
| Grafana | 3 | `grafana*` | `grafana` |
| Custom | 4-15 | Custom | Custom |
| Service | Database | Key Pattern | ACL User |
| --------- | -------- | -------------------------------- | ----------- |
| Immich | 0 | `immich_bull*` `immich_channel*` | `immich` |
| Nextcloud | 1 | `nextcloud*` | `nextcloud` |
| Gitea | 2 | `gitea*` | `gitea` |
| Grafana | 3 | `grafana*` | `grafana` |
| Custom | 4-15 | Custom | Custom |
### 3. Key Prefixes (Tertiary Isolation)
Services use unique key prefixes enforced by ACL patterns.
### Testing Isolation
@ -517,23 +554,23 @@ FLUSHDB
valkey_admin_password: "{{ vault_valkey_admin_password }}"
valkey_acl_users:
# Immich - Photo management (needs BullMQ job queue)
- username: immich
password: "{{ vault_immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
# Nextcloud - Simple caching
- username: nextcloud
password: "{{ vault_nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
# Gitea - Session store
- username: gitea
password: "{{ vault_gitea_valkey_password }}"
keypattern: "gitea*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
# Immich - Photo management (needs BullMQ job queue)
- username: immich
password: "{{ vault_immich_valkey_password }}"
keypattern: "immich_bull* immich_channel*"
commands: "&* -@dangerous +@read +@write +@pubsub +select +auth +ping +info +eval +evalsha"
# Nextcloud - Simple caching
- username: nextcloud
password: "{{ vault_nextcloud_valkey_password }}"
keypattern: "nextcloud*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
# Gitea - Session store
- username: gitea
password: "{{ vault_gitea_valkey_password }}"
keypattern: "gitea*"
commands: "+@read +@write -@dangerous +auth +ping +info +select"
# Service variables
immich_valkey_db: 0