feat: new services and fixes
This commit is contained in:
parent
d8eb53f096
commit
787c171f65
@ -222,7 +222,7 @@ immich_nginx_enabled: true
|
||||
immich_nginx_hostname: "blog.hello.com"
|
||||
|
||||
# In nginx role configuration (host_vars or group_vars)
|
||||
acme_email: "admin@carabosse.cloud"
|
||||
acme_email: "admin@blog.com"
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
|
||||
@ -108,7 +108,7 @@
|
||||
- name: Deploy nginx vhost configuration for Immich
|
||||
ansible.builtin.template:
|
||||
src: nginx-vhost.conf.j2
|
||||
dest: /etc/nginx/conf.d/immich.conf
|
||||
dest: "{{ nginx_conf_dir }}/immich.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
@ -117,7 +117,7 @@
|
||||
|
||||
- name: Remove nginx vhost configuration for Immich
|
||||
ansible.builtin.file:
|
||||
path: /etc/nginx/conf.d/immich.conf
|
||||
path: "{{ nginx_conf_dir }}/immich.conf"
|
||||
state: absent
|
||||
when: not immich_nginx_enabled
|
||||
notify: Reload nginx
|
||||
|
||||
@ -7,8 +7,8 @@ After=network-online.target
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory={{ podman_projects_dir }}/immich
|
||||
ExecStart=/usr/bin/podman compose up -d
|
||||
ExecStop=/usr/bin/podman compose down
|
||||
ExecStart=/usr/bin/podman-compose up -d
|
||||
ExecStop=/usr/bin/podman-compose down
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
|
||||
@ -51,11 +51,10 @@
|
||||
|
||||
- name: Enable Certbot renewal timer
|
||||
ansible.builtin.systemd:
|
||||
name: certbot-renew.timer
|
||||
name: "{{ certbot_timer }}"
|
||||
enabled: true
|
||||
state: started
|
||||
when: acme_email is defined
|
||||
ignore_errors: true
|
||||
|
||||
- name: Ensure nginx conf.d directory exists
|
||||
ansible.builtin.file:
|
||||
|
||||
@ -3,23 +3,19 @@
|
||||
# Transparent TCP proxy (no protocol inspection)
|
||||
|
||||
{% if config.http | default(true) %}
|
||||
upstream {{ domain | replace('.', '_') | replace('-', '_') }}_http {
|
||||
server {{ config.forward_to }}:80;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
proxy_pass {{ domain | replace('.', '_') | replace('-', '_') }}_http;
|
||||
# Using variable forces runtime DNS resolution (avoids startup failures)
|
||||
set $upstream_http {{ config.forward_to }};
|
||||
proxy_pass $upstream_http:80;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
{% if config.https | default(true) %}
|
||||
upstream {{ domain | replace('.', '_') | replace('-', '_') }}_https {
|
||||
server {{ config.forward_to }}:443;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443;
|
||||
proxy_pass {{ domain | replace('.', '_') | replace('-', '_') }}_https;
|
||||
# Using variable forces runtime DNS resolution (avoids startup failures)
|
||||
set $upstream_https {{ config.forward_to }};
|
||||
proxy_pass $upstream_https:443;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
@ -57,6 +57,11 @@ http {
|
||||
{% if nginx_forwarder and nginx_forwarder | length > 0 %}
|
||||
# Stream block for TCP/UDP proxying
|
||||
stream {
|
||||
# DNS resolver for runtime hostname resolution
|
||||
# Using 127.0.0.1 (systemd-resolved) with 30s cache and 5s timeout
|
||||
resolver 127.0.0.1 valid=30s ipv6=off;
|
||||
resolver_timeout 5s;
|
||||
|
||||
# Load stream configurations
|
||||
include {{ nginx_streams_dir }}/*.conf;
|
||||
}
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
---
|
||||
nginx_user: http
|
||||
certbot_timer: certbot-renew.timer
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
---
|
||||
nginx_user: www-data
|
||||
certbot_timer: certbot.timer
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: podman
|
||||
- role: nginx
|
||||
|
||||
@ -91,7 +91,7 @@
|
||||
- name: Deploy nginx vhost configuration for ntfy
|
||||
ansible.builtin.template:
|
||||
src: nginx-vhost.conf.j2
|
||||
dest: /etc/nginx/conf.d/ntfy.conf
|
||||
dest: "{{ nginx_conf_dir }}/ntfy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
@ -100,7 +100,7 @@
|
||||
|
||||
- name: Remove nginx vhost configuration for ntfy
|
||||
ansible.builtin.file:
|
||||
path: /etc/nginx/conf.d/ntfy.conf
|
||||
path: "{{ nginx_conf_dir }}/ntfy.conf"
|
||||
state: absent
|
||||
when: not ntfy_nginx_enabled
|
||||
notify: Reload nginx
|
||||
|
||||
@ -7,8 +7,8 @@ After=network-online.target
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory={{ podman_projects_dir }}/ntfy
|
||||
ExecStart=/usr/bin/podman compose up -d
|
||||
ExecStop=/usr/bin/podman compose down
|
||||
ExecStart=/usr/bin/podman-compose up -d
|
||||
ExecStop=/usr/bin/podman-compose down
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
|
||||
99
roles/uptime-kuma/README.md
Normal file
99
roles/uptime-kuma/README.md
Normal file
@ -0,0 +1,99 @@
|
||||
# uptime-kuma - Self-Hosted Monitoring Tool
|
||||
|
||||
Deploys [Uptime Kuma](https://uptime.kuma.pet/) - a self-hosted monitoring and status page application.
|
||||
|
||||
## Features
|
||||
|
||||
- Website monitoring (HTTP/HTTPS)
|
||||
- TCP port monitoring
|
||||
- Ping monitoring
|
||||
- DNS monitoring
|
||||
- Status pages
|
||||
- Notifications (Email, Discord, Slack, ntfy, etc.)
|
||||
- Multi-language support
|
||||
- Dark mode
|
||||
|
||||
## Configuration
|
||||
|
||||
### Optional Variables
|
||||
|
||||
See [defaults/main.yml](defaults/main.yml) for all configuration options.
|
||||
|
||||
Key settings:
|
||||
|
||||
```yaml
|
||||
uptime_kuma_version: "2"
|
||||
uptime_kuma_port: 3001
|
||||
uptime_kuma_data_dir: "{{ podman_projects_dir }}/uptime-kuma/data"
|
||||
|
||||
# Nginx reverse proxy
|
||||
uptime_kuma_nginx_enabled: false
|
||||
uptime_kuma_nginx_hostname: uptime.nas.local
|
||||
```
|
||||
|
||||
## Storage Requirements
|
||||
|
||||
**CRITICAL:** Uptime Kuma uses SQLite and requires local storage with POSIX file lock support.
|
||||
|
||||
- ✅ **Supported:** Local filesystem, Docker volumes
|
||||
- ❌ **NOT Supported:** NFS, network filesystems (will cause database corruption)
|
||||
|
||||
## First-Time Setup
|
||||
|
||||
1. Access the web UI: `https://uptime.nas.local` (if nginx enabled) or `http://localhost:3001`
|
||||
2. Create admin account on first visit
|
||||
3. No default credentials - account is created during initial setup
|
||||
|
||||
## Usage
|
||||
|
||||
### Adding Monitors
|
||||
|
||||
Web UI → Add New Monitor:
|
||||
- **Monitor Type:** HTTP(s), TCP Port, Ping, DNS, etc.
|
||||
- **Friendly Name:** Display name
|
||||
- **URL/Host:** Target to monitor
|
||||
- **Heartbeat Interval:** Check frequency (seconds)
|
||||
- **Retries:** Before marking as down
|
||||
- **Notifications:** Select notification endpoints
|
||||
|
||||
### Notification Endpoints
|
||||
|
||||
Web UI → Settings → Notifications:
|
||||
- Email (SMTP)
|
||||
- Discord, Slack, Telegram
|
||||
- ntfy (recommended for local notifications)
|
||||
- Webhooks
|
||||
- 50+ integrations available
|
||||
|
||||
### Status Pages
|
||||
|
||||
Create public or password-protected status pages showing monitor health.
|
||||
|
||||
Web UI → Status Pages → New Status Page
|
||||
|
||||
## Integration with ntfy
|
||||
|
||||
If you deployed the `ntfy` role:
|
||||
|
||||
1. Settings → Notifications → Add
|
||||
2. Type: ntfy
|
||||
3. ntfy Server URL: `https://ntfy.jokester.fr`
|
||||
4. Topic: `uptime-alerts`
|
||||
5. Username: `admin`
|
||||
6. Password: Your ntfy admin password
|
||||
7. Test notification
|
||||
|
||||
## File Locations
|
||||
|
||||
- Data directory: `{{ uptime_kuma_data_dir }}`
|
||||
- SQLite database: `{{ uptime_kuma_data_dir }}/kuma.db`
|
||||
|
||||
## Dependencies
|
||||
|
||||
- podman
|
||||
- nginx (if `uptime_kuma_nginx_enabled: true`)
|
||||
|
||||
## Sources
|
||||
|
||||
- [Install Uptime Kuma using Docker](https://uptimekuma.org/install-uptime-kuma-docker/)
|
||||
- [Uptime Kuma GitHub Wiki](https://github.com/louislam/uptime-kuma/wiki)
|
||||
20
roles/uptime-kuma/defaults/main.yml
Normal file
20
roles/uptime-kuma/defaults/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
# Uptime Kuma version to deploy
|
||||
uptime_kuma_version: "2"
|
||||
|
||||
# Storage location (CRITICAL: Must be local storage, NOT NFS)
|
||||
# NFS is NOT supported - will cause SQLite corruption
|
||||
uptime_kuma_data_dir: "{{ podman_projects_dir }}/uptime-kuma/data"
|
||||
|
||||
# Network configuration
|
||||
uptime_kuma_port: 3001
|
||||
|
||||
# Container image
|
||||
uptime_kuma_image: louislam/uptime-kuma
|
||||
|
||||
# Timezone
|
||||
uptime_kuma_timezone: UTC
|
||||
|
||||
# Nginx reverse proxy configuration
|
||||
uptime_kuma_nginx_enabled: false
|
||||
uptime_kuma_nginx_hostname: uptime.nas.local
|
||||
15
roles/uptime-kuma/handlers/main.yml
Normal file
15
roles/uptime-kuma/handlers/main.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Restart uptime-kuma
|
||||
ansible.builtin.systemd:
|
||||
name: uptime-kuma
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
|
||||
- name: Reload nginx
|
||||
ansible.builtin.systemd:
|
||||
name: nginx
|
||||
state: reloaded
|
||||
3
roles/uptime-kuma/meta/main.yml
Normal file
3
roles/uptime-kuma/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: podman
|
||||
58
roles/uptime-kuma/tasks/main.yml
Normal file
58
roles/uptime-kuma/tasks/main.yml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Create uptime-kuma project directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ podman_projects_dir }}/uptime-kuma"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Create uptime-kuma data directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ uptime_kuma_data_dir }}"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy docker-compose.yml for uptime-kuma
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ podman_projects_dir }}/uptime-kuma/docker-compose.yml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0644"
|
||||
notify: Restart uptime-kuma
|
||||
|
||||
- name: Create systemd service for uptime-kuma
|
||||
ansible.builtin.template:
|
||||
src: uptime-kuma.service.j2
|
||||
dest: /etc/systemd/system/uptime-kuma.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Reload systemd
|
||||
|
||||
- name: Enable and start uptime-kuma service
|
||||
ansible.builtin.systemd:
|
||||
name: uptime-kuma
|
||||
enabled: true
|
||||
state: started
|
||||
daemon_reload: true
|
||||
|
||||
- name: Deploy nginx vhost configuration for uptime-kuma
|
||||
ansible.builtin.template:
|
||||
src: nginx-vhost.conf.j2
|
||||
dest: "{{ nginx_conf_dir }}/uptime-kuma.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: uptime_kuma_nginx_enabled
|
||||
notify: Reload nginx
|
||||
|
||||
- name: Remove nginx vhost configuration for uptime-kuma
|
||||
ansible.builtin.file:
|
||||
path: "{{ nginx_conf_dir }}/uptime-kuma.conf"
|
||||
state: absent
|
||||
when: not uptime_kuma_nginx_enabled
|
||||
notify: Reload nginx
|
||||
13
roles/uptime-kuma/templates/docker-compose.yml.j2
Normal file
13
roles/uptime-kuma/templates/docker-compose.yml.j2
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
services:
|
||||
uptime-kuma:
|
||||
container_name: uptime-kuma
|
||||
image: {{ uptime_kuma_image }}:{{ uptime_kuma_version }}
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- {{ uptime_kuma_data_dir }}:/app/data:rw,Z
|
||||
ports:
|
||||
- "{{ uptime_kuma_port }}:3001"
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TZ: {{ uptime_kuma_timezone }}
|
||||
54
roles/uptime-kuma/templates/nginx-vhost.conf.j2
Normal file
54
roles/uptime-kuma/templates/nginx-vhost.conf.j2
Normal file
@ -0,0 +1,54 @@
|
||||
# Uptime Kuma vhost with Let's Encrypt (Certbot)
|
||||
# Managed by Ansible - DO NOT EDIT MANUALLY
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ uptime_kuma_nginx_hostname }};
|
||||
|
||||
# Certbot webroot for ACME challenges
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
# Redirect to HTTPS
|
||||
location / {
|
||||
return 301 https://$server_name$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name {{ uptime_kuma_nginx_hostname }};
|
||||
|
||||
# Let's Encrypt certificates (managed by Certbot)
|
||||
ssl_certificate /etc/letsencrypt/live/{{ uptime_kuma_nginx_hostname }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ uptime_kuma_nginx_hostname }}/privkey.pem;
|
||||
|
||||
# SSL configuration
|
||||
ssl_protocols {{ nginx_ssl_protocols }};
|
||||
ssl_prefer_server_ciphers {{ 'on' if nginx_ssl_prefer_server_ciphers else 'off' }};
|
||||
|
||||
{% if nginx_log_backend == 'journald' %}
|
||||
access_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma;
|
||||
error_log syslog:server=unix:/dev/log,nohostname,tag=nginx_uptime_kuma;
|
||||
{% else %}
|
||||
access_log /var/log/nginx/{{ uptime_kuma_nginx_hostname }}_access.log main;
|
||||
error_log /var/log/nginx/{{ uptime_kuma_nginx_hostname }}_error.log;
|
||||
{% endif %}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:{{ uptime_kuma_port }};
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support for real-time updates
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# Disable buffering for real-time updates
|
||||
proxy_buffering off;
|
||||
}
|
||||
}
|
||||
16
roles/uptime-kuma/templates/uptime-kuma.service.j2
Normal file
16
roles/uptime-kuma/templates/uptime-kuma.service.j2
Normal file
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Uptime Kuma Monitoring Service
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory={{ podman_projects_dir }}/uptime-kuma
|
||||
ExecStart=/usr/bin/podman-compose up -d
|
||||
ExecStop=/usr/bin/podman-compose down
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Loading…
Reference in New Issue
Block a user