fix: leaner readmes

This commit is contained in:
Clément Désiles 2025-11-15 00:17:22 +01:00
parent 9c10116dcb
commit 1d3af8dc45
No known key found for this signature in database
4 changed files with 120 additions and 722 deletions

View File

@ -1,172 +0,0 @@
#!/bin/bash
# Immich Networking Debug Script
# This script helps diagnose container communication issues between
# immich-server and immich-machine-learning containers
set -e
COMPOSE_DIR="/opt/podman/immich"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo "========================================"
echo "Immich Networking Debug Script"
echo "========================================"
echo ""
# Check if compose directory exists
if [ ! -d "$COMPOSE_DIR" ]; then
echo -e "${RED}ERROR: Compose directory not found: $COMPOSE_DIR${NC}"
exit 1
fi
cd "$COMPOSE_DIR"
# 1. Check container status
echo -e "${YELLOW}1. Container Status${NC}"
echo "---"
podman compose ps
echo ""
# 2. Check if containers are running
echo -e "${YELLOW}2. Verifying Containers are Running${NC}"
echo "---"
if podman ps | grep -q "immich_server"; then
echo -e "${GREEN}✓ immich_server is running${NC}"
else
echo -e "${RED}✗ immich_server is NOT running${NC}"
fi
if podman ps | grep -q "immich_machine_learning"; then
echo -e "${GREEN}✓ immich_machine_learning is running${NC}"
else
echo -e "${RED}✗ immich_machine_learning is NOT running${NC}"
fi
echo ""
# 3. Check network configuration
echo -e "${YELLOW}3. Network Configuration${NC}"
echo "---"
echo "Immich networks:"
podman network ls | grep -i immich || echo "No Immich networks found"
echo ""
# 4. Check which networks containers are on
echo -e "${YELLOW}4. Container Network Membership${NC}"
echo "---"
echo "immich_server networks:"
podman inspect immich_server 2>/dev/null | grep -A 5 '"Networks"' || echo "Container not found"
echo ""
echo "immich_machine_learning networks:"
podman inspect immich_machine_learning 2>/dev/null | grep -A 5 '"Networks"' || echo "Container not found"
echo ""
# 5. Check environment variables
echo -e "${YELLOW}5. Machine Learning URL Configuration${NC}"
echo "---"
ML_URL=$(podman inspect immich_server 2>/dev/null | grep IMMICH_MACHINE_LEARNING_URL | head -1 || echo "Not set")
echo "IMMICH_MACHINE_LEARNING_URL: $ML_URL"
echo ""
# 6. Check if ML container is listening
echo -e "${YELLOW}6. ML Container Listening Status${NC}"
echo "---"
if podman exec immich_machine_learning sh -c 'command -v netstat' >/dev/null 2>&1; then
podman exec immich_machine_learning netstat -ln | grep 3003 || echo "Port 3003 not listening (or netstat not available)"
else
echo "netstat not available in container, checking logs instead:"
podman logs immich_machine_learning 2>&1 | grep -i "listening\|started" | tail -5 || echo "No listening messages found in logs"
fi
echo ""
# 7. Test connectivity from server to ML
echo -e "${YELLOW}7. Testing Server → ML Connectivity${NC}"
echo "---"
if podman exec immich_server sh -c 'command -v curl' >/dev/null 2>&1; then
echo "Testing HTTP connection to ML service..."
if podman exec immich_server curl -sf http://immich-machine-learning:3003/ping >/dev/null 2>&1; then
echo -e "${GREEN}✓ Successfully connected to ML service via service name${NC}"
else
echo -e "${RED}✗ Failed to connect to ML service${NC}"
echo "Attempting to diagnose..."
# Try to resolve DNS
if podman exec immich_server sh -c 'command -v nslookup' >/dev/null 2>&1; then
echo "DNS resolution test:"
podman exec immich_server nslookup immich-machine-learning || echo "DNS resolution failed"
fi
# Try with verbose curl
echo "Verbose curl output:"
podman exec immich_server curl -v http://immich-machine-learning:3003/ping 2>&1 || true
fi
else
echo "curl not available in server container, skipping connectivity test"
fi
echo ""
# 8. Check health status
echo -e "${YELLOW}8. Container Health Status${NC}"
echo "---"
echo "immich_server health:"
podman inspect immich_server 2>/dev/null | grep -A 10 '"Health"' | head -15 || echo "No health data available"
echo ""
echo "immich_machine_learning health:"
podman inspect immich_machine_learning 2>/dev/null | grep -A 10 '"Health"' | head -15 || echo "No health data available"
echo ""
# 9. Check recent logs for errors
echo -e "${YELLOW}9. Recent Error Logs${NC}"
echo "---"
echo "Server errors (last 10):"
podman logs immich_server 2>&1 | grep -i "error\|fail\|unhealthy" | tail -10 || echo "No errors found"
echo ""
echo "ML errors (last 10):"
podman logs immich_machine_learning 2>&1 | grep -i "error\|fail" | tail -10 || echo "No errors found"
echo ""
# 10. Check compose file configuration
echo -e "${YELLOW}10. Docker Compose Configuration${NC}"
echo "---"
echo "Network configuration in docker-compose.yml:"
grep -A 3 "^networks:" docker-compose.yml 2>/dev/null || echo "No networks section found in compose file"
echo ""
echo "Server network config:"
grep -A 2 "immich-server:" docker-compose.yml | grep -A 2 "networks:" || echo "No network config for server"
echo ""
echo "ML network config:"
grep -A 2 "immich-machine-learning:" docker-compose.yml | grep -A 2 "networks:" || echo "No network config for ML"
echo ""
# Summary
echo "========================================"
echo -e "${YELLOW}Summary${NC}"
echo "========================================"
echo ""
# Check if both containers are healthy
SERVER_HEALTHY=$(podman inspect immich_server 2>/dev/null | grep -c '"Status": "healthy"' || echo "0")
ML_HEALTHY=$(podman inspect immich_machine_learning 2>/dev/null | grep -c '"Status": "healthy"' || echo "0")
if [ "$SERVER_HEALTHY" -gt 0 ] && [ "$ML_HEALTHY" -gt 0 ]; then
echo -e "${GREEN}✓ Both containers appear healthy${NC}"
elif [ "$SERVER_HEALTHY" -gt 0 ]; then
echo -e "${YELLOW}⚠ Server healthy, but ML container may have issues${NC}"
elif [ "$ML_HEALTHY" -gt 0 ]; then
echo -e "${YELLOW}⚠ ML healthy, but server container may have issues${NC}"
else
echo -e "${RED}✗ One or both containers are unhealthy${NC}"
fi
echo ""
echo "For more detailed logs, run:"
echo " cd $COMPOSE_DIR && podman compose logs -f"
echo ""
echo "To restart containers:"
echo " cd $COMPOSE_DIR && podman compose restart"
echo ""
echo "To recreate containers with updated config:"
echo " cd $COMPOSE_DIR && podman compose down && podman compose up -d"
echo ""

View File

@ -1,114 +1,22 @@
# Nginx Role
This Ansible role installs and configures Nginx as a reverse proxy for web applications.
Installs and configures Nginx as a reverse proxy for web applications with modular vhost management.
## Features
- Installs Nginx
- Configurable worker processes and connections
- Gzip compression support
- SSL/TLS configuration
- Modular vhost configuration via `/etc/nginx/conf.d/`
- Zero-downtime reloads
- Configurable logging backend (journald or traditional files)
- Automatic logrotate configuration for file-based logging
- Automatic logrotate for file-based logging
- SSL/TLS configuration
## Requirements
## Service Integration Pattern
- Systemd-based Linux distribution
- Root/sudo access
## Role Variables
See `defaults/main.yml` for all available variables and their default values.
### Key Configuration
The role provides sensible defaults for worker processes, connection limits, upload sizes, compression, and SSL/TLS settings. Override as needed in your inventory.
### Logging Configuration
Each service role should deploy its own vhost config:
**In service role tasks:**
```yaml
# Logging backend: 'journald' (systemd journal) or 'file' (traditional logs)
nginx_log_backend: journald # Default: journald
# Logrotate settings (only used when nginx_log_backend: file)
nginx_logrotate_rotate: 14 # Keep 14 days of logs
nginx_logrotate_frequency: daily # daily|weekly|monthly
nginx_logrotate_compress: true # Compress rotated logs
```
**journald backend (default):**
- Logs sent to systemd journal via syslog
- Centralized with other system logs
- Managed by systemd-journald (size limits, retention, compression)
- View with: `journalctl -u nginx`
**file backend:**
- Traditional `/var/log/nginx/*.log` files
- Automatic logrotate configuration deployed
- Useful for external log aggregation tools
## Dependencies
None.
## Example Playbook
### Basic Installation
```yaml
---
- hosts: servers
become: true
roles:
- role: nginx
```
### Custom Configuration
```yaml
---
- hosts: servers
become: true
roles:
- role: nginx
vars:
nginx_worker_processes: 4
nginx_worker_connections: 2048
nginx_client_max_body_size: 500M
```
## Service Management
The role creates handlers for managing nginx:
```yaml
notify: Reload nginx # Graceful reload (zero downtime)
notify: Restart nginx # Full restart
```
## Vhost Configuration Pattern
This role is designed to work with service-specific vhost configurations. Each service role should:
1. Deploy its vhost config to `/etc/nginx/conf.d/<service>.conf`
2. Notify the nginx reload handler
3. Use a variable to enable/disable nginx integration
### Example Service Integration
In your service role (e.g., `immich`):
**defaults/main.yml:**
```yaml
immich_nginx_enabled: false
immich_nginx_hostname: immich.example.com
```
**tasks/main.yml:**
```yaml
- name: Deploy nginx vhost for service
- name: Deploy nginx vhost
ansible.builtin.template:
src: nginx-vhost.conf.j2
dest: /etc/nginx/conf.d/myservice.conf
@ -124,171 +32,42 @@ immich_nginx_hostname: immich.example.com
notify: Reload nginx
```
**templates/nginx-vhost.conf.j2:**
```nginx
server {
listen 80;
server_name {{ myservice_nginx_hostname }};
## Logging Backends
location / {
proxy_pass http://127.0.0.1:{{ myservice_port }};
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
```
**journald (default):**
- Logs sent to systemd journal via syslog
- View: `journalctl -u nginx -f`
**handlers/main.yml:**
```yaml
- name: Reload nginx
ansible.builtin.systemd:
name: nginx
state: reloaded
```
**file:**
- Traditional `/var/log/nginx/*.log` files
- Automatic logrotate configuration
## Independent Deployments
Switch via `nginx_log_backend` variable.
This pattern allows for independent service deployments:
1. **Deploy service A** → Only touches `/etc/nginx/conf.d/serviceA.conf` → Reload nginx
2. **Deploy service B** → Only touches `/etc/nginx/conf.d/serviceB.conf` → Reload nginx
3. **No downtime** for other services during deployment
## Log Management
### Journald Backend (Default)
When `nginx_log_backend: journald`, logs are sent to systemd journal:
## Hands-on Commands
```bash
# View all nginx logs
# Test configuration
nginx -t
# Reload (zero downtime)
systemctl reload nginx
# View logs (journald)
journalctl -u nginx -f
# Last 100 lines
journalctl -u nginx -n 100
# Filter by priority (error, warning, info)
journalctl -u nginx -p err
# Time range
journalctl -u nginx --since "1 hour ago"
# Export to file
journalctl -u nginx > nginx-logs.txt
```
**Benefits:**
- Centralized with all system logs
- Automatic rotation/compression via systemd-journald
- Structured metadata (timestamps, priorities)
- No separate logrotate configuration needed
### File Backend
When `nginx_log_backend: file`, logs are written to:
- `/var/log/nginx/access.log` - Access logs
- `/var/log/nginx/error.log` - Error logs
```bash
# View traditional log files
# View logs (file)
tail -f /var/log/nginx/access.log
tail -f /var/log/nginx/error.log
```
Logrotate is automatically configured to:
- Rotate daily (configurable)
- Keep 14 days (configurable)
- Compress old logs
- Reload nginx gracefully after rotation
### Switching Backends
To switch from journald to file logging:
```yaml
- hosts: servers
roles:
- role: nginx
vars:
nginx_log_backend: file
nginx_logrotate_rotate: 30 # Keep 30 days
```
To switch back to journald:
```yaml
- hosts: servers
roles:
- role: nginx
vars:
nginx_log_backend: journald
```
The role automatically removes logrotate config when using journald.
## Configuration Validation
The role automatically validates nginx configuration before applying changes using `nginx -t`.
Manual validation:
```bash
nginx -t # Test configuration
nginx -t -c /path/to/conf # Test specific config file
```
## Troubleshooting
### Check nginx status
```bash
systemctl status nginx
```
### Test configuration
```bash
nginx -t
```
### Reload configuration
```bash
systemctl reload nginx
```
### View error logs
```bash
journalctl -u nginx -n 100
# or
tail -f /var/log/nginx/error.log
```
### List loaded vhost configs
```bash
# List loaded vhosts
ls -la /etc/nginx/conf.d/
```
## SSL/TLS Support
## References
For SSL support, you can:
1. **Manual certificates:** Place certs in `/etc/ssl/` and reference in vhost configs
2. **Let's Encrypt:** Use certbot or similar tools (can be added to playbook)
3. **Self-signed:** Generate with `openssl` for testing
The base nginx.conf includes SSL protocol configuration that applies to all vhosts.
## Performance Tuning
Adjust these variables based on your workload:
- `nginx_worker_processes`: Set to number of CPU cores
- `nginx_worker_connections`: Increase for high traffic (check `ulimit -n`)
- `nginx_client_max_body_size`: Increase for large file uploads
## License
MIT
## Author Information
Created for managing reverse proxy configurations in NAS/homelab environments.
- [Nginx Documentation](https://nginx.org/en/docs/)
- [Nginx Logging](https://nginx.org/en/docs/syslog.html)
- [Nginx SSL/TLS](https://nginx.org/en/docs/http/configuring_https_servers.html)

View File

@ -1,114 +1,64 @@
# Podman Role
This Ansible role installs and configures Podman for container management on NAS/homelab systems.
Installs and configures Podman for container management with support for Docker Compose compatibility.
## Features
- Installs Podman and podman-compose
- Configures container registry search paths
- Creates shared projects directory for compose files
- Enables short image name resolution (e.g., `redis:alpine``docker.io/library/redis:alpine`)
- Creates external networks for services (e.g., dedicated Immich network)
- Installs Podman, podman-compose, and crun (OCI runtime)
- Configurable logging backend (journald or k8s-file)
- External network creation for service isolation
- Container registry search configuration
- Shared projects directory for compose files
## Requirements
## Container Logging
- systemd-based Linux distribution
- Root/sudo access
**journald (default):**
- Logs sent to systemd journal
- View: `journalctl CONTAINER_NAME=<name> -f`
## Role Variables
**k8s-file:**
- Logs stored as JSON files with automatic rotation
- Configured via `podman_log_max_size` and `podman_log_max_files`
See `defaults/main.yml` for all available variables and their default values.
Switch via `podman_log_driver` variable.
### Key Configuration
## External Networks
#### Unqualified Search Registries
When you use short image names (without registry prefix), Podman searches configured registries in order (e.g., `redis:alpine``docker.io/library/redis:alpine`).
Customize via the `podman_unqualified_search_registries` variable.
#### External Networks
The role can create external Podman networks for services that need dedicated network isolation. Define the `podman_external_networks` list in your inventory. Networks persist across container restarts and compose stack rebuilds. See `defaults/main.yml` for configuration details.
## Dependencies
- `containers.podman` collection (installed via `requirements.yml`)
## Example Playbook
Define networks in inventory for persistent, isolated container networks:
```yaml
---
- hosts: servers
become: true
roles:
- role: podman
podman_external_networks:
- name: immich
subnet: 172.20.0.0/16
gateway: 172.20.0.1
```
### Custom Configuration
Networks persist across container restarts and compose rebuilds.
See `defaults/main.yml` for all available variables. Override in your inventory as needed.
## Files Deployed
- `/etc/containers/registries.conf` - Registry configuration
- `{{ podman_projects_dir }}` - Projects directory (default: `/opt/podman`)
## Usage
### Running Containers
## Hands-on Commands
```bash
# Using short names (works after role deployment)
podman run -d redis:alpine
# View container logs (journald)
journalctl CONTAINER_NAME=immich_server -f
# Using fully qualified names (always works)
podman run -d docker.io/library/redis:alpine
# View container logs (k8s-file)
podman logs -f immich_server
# Check log driver
podman info --format '{{.Host.LogDriver}}'
# Inspect container log config
podman inspect <container> | jq '.[0].HostConfig.LogConfig'
# Test configuration
podman run --rm alpine echo "OK"
# List networks
podman network ls
```
### Docker Compose
## References
Services using `podman-compose` should store their compose files in subdirectories:
```
/opt/podman/
├── immich/
│ └── docker-compose.yml
├── nextcloud/
│ └── docker-compose.yml
└── gitea/
└── docker-compose.yml
```
## Troubleshooting
### Short names not resolving
Check the registries configuration:
```bash
cat /etc/containers/registries.conf
```
Test search order:
```bash
podman search redis --limit 3
```
### Permission denied
Ensure the user is in the appropriate groups (handled by Podman package):
```bash
# Check groups
groups $USER
# May need to log out and back in after installation
```
## License
MIT
## Author Information
Created for managing containerized services in NAS/homelab environments.
- [Podman Documentation](https://docs.podman.io/)
- [Podman Logging](https://docs.podman.io/en/latest/markdown/podman-run.1.html#log-driver-driver)
- [containers.conf(5)](https://github.com/containers/common/blob/main/docs/containers.conf.5.md)

View File

@ -1,245 +1,86 @@
# PostgreSQL Role
This Ansible role installs and configures PostgreSQL for local use only. It provides a shared PostgreSQL instance that multiple services can use with isolated databases and users.
Installs and configures PostgreSQL as a shared database service for multiple applications with isolated databases and users.
## Features
- Installs PostgreSQL
- Local-only access (localhost)
- Configurable performance settings
- Each service manages its own database/user (see below)
- Shared PostgreSQL instance (system service)
- Per-service database isolation
- Per-service user privileges (minimal permissions)
- Container access support (via Podman gateway)
- Configurable logging backend (journald or files)
- Performance tuning presets
## Requirements
## Architecture Pattern
- Systemd-based Linux distribution
- Root/sudo access
- Python `psycopg2` package (for database operations from service roles)
**Decentralized database management:**
- PostgreSQL role: Installs and configures the server
- Service roles: Create their own databases/users (e.g., immich, nextcloud)
- Isolation: Each service user can only access their own database
## Role Variables
See `CLAUDE.md` for detailed architecture documentation.
See `defaults/main.yml` for all available variables and their default values.
## Container Access
### Key Configuration Requirements
#### Required Password
The `postgres_admin_password` variable must be set in your inventory (min 12 characters). The role will fail if not set.
#### Container Access
For containers to access PostgreSQL, set `postgres_bind` to include the Podman gateway:
```yaml
postgres_bind: "127.0.0.1,{{ podman_subnet_gateway }}"
```
## Dependencies
None.
## Example Playbook
For containers to reach PostgreSQL, configure in inventory:
```yaml
---
- hosts: servers
become: true
roles:
- role: postgres
- role: immich # Will create its own database
- role: nextcloud # Will create its own database
```
## Database Isolation Strategy
This role follows a **decentralized database management** pattern:
### 1. PostgreSQL Role Responsibility
- Install and configure PostgreSQL
- Manage global performance settings
- Ensure the service is running
### 2. Service Role Responsibility
Each service role (immich, nextcloud, etc.) manages its own:
- Database creation
- User creation
- Password management
- Schema migrations
### 3. Security & Isolation
**Database Isolation:**
- Each service gets its own database
- Example: `immich`, `nextcloud`, `gitea`
**User Isolation:**
- Each service gets its own PostgreSQL user
- Users can only access their own database
- Example: `immich``immich` database only
**Authentication:**
- Each user has a unique password
- Passwords stored in service role variables (use Ansible Vault for production)
## Connection Methods
### From Containers
If your service runs in a container (Docker/Podman), you need to configure PostgreSQL to listen on the Podman bridge gateway:
**Step 1: Configure PostgreSQL in inventory**
```yaml
# inventory/host_vars/yourserver.yml
postgres_bind: "127.0.0.1,{{ podman_subnet_gateway }}"
postgres_firewall_allowed_sources:
- 127.0.0.0/8
- "{{ podman_subnet }}"
```
**Step 2: Use host.containers.internal in containers**
```yaml
# docker-compose.yml
services:
myservice:
extra_hosts:
- "host.containers.internal:host-gateway"
environment:
DB_HOSTNAME: host.containers.internal
DB_PORT: 5432
```
Containers use `host.containers.internal` as hostname.
**What this does:**
- PostgreSQL listens on `127.0.0.1` (localhost) and `10.88.0.1` (Podman gateway)
- UFW firewall allows connections from localhost and Podman subnet
- `pg_hba.conf` automatically configured to allow Podman subnet
- `host.containers.internal` resolves to the gateway IP inside containers
## Logging Backends
### From System Services
**journald (default):**
- Logs via stderr → systemd journal
- View: `journalctl -u postgresql -f`
Services running directly on the host can connect to `localhost:5432` without any special configuration.
**file:**
- Logs to data directory or `/var/log/postgresql/`
- Automatic logrotate configuration
## Security Best Practices
Switch via `postgres_log_backend` variable.
### 1. Use Ansible Vault for Passwords
## Hands-on Commands
```bash
# Create encrypted variables
ansible-vault encrypt_string 'my_secure_password' --name 'immich_db_password'
```
Add to your inventory or vars:
```yaml
immich_db_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
...encrypted...
```
### 2. Unique Passwords per Service
Never reuse passwords between services:
```yaml
immich_db_password: unique_password_1
nextcloud_db_password: unique_password_2
gitea_db_password: unique_password_3
```
### 3. Minimal Privileges
The pattern above ensures users have:
- ✅ Access to their database only
- ❌ No superuser privileges
- ❌ Cannot create databases
- ❌ Cannot create roles
- ❌ Cannot access other databases
### 4. Controlled Access
PostgreSQL default configuration:
- Listens on `localhost` only by default
- To allow container access, set `postgres_bind` to include Podman gateway
- UFW firewall rules automatically configured for allowed sources
- `pg_hba.conf` automatically configured for Podman subnet when enabled
- No remote network access by default
## Troubleshooting
### Check PostgreSQL status
```bash
systemctl status postgresql
```
### Connect to PostgreSQL
```bash
# Connect to PostgreSQL
sudo -u postgres psql
```
### List databases
```sql
\l
```
# List databases
sudo -u postgres psql -c '\l'
### List users and permissions
```sql
\du
```
# List users and permissions
sudo -u postgres psql -c '\du'
### Test connection from service
```bash
# From localhost
psql -h localhost -U immich -d immich
# Test connection
psql -h localhost -U myservice_user -d myservice_db
# From Podman gateway (if configured)
psql -h 10.88.0.1 -U immich -d immich
# View logs (journald)
journalctl -u postgresql -f
journalctl -u postgresql -p err
# View logs (file - Arch)
tail -f /var/lib/postgres/data/log/postgresql-*.log
# View logs (file - Debian)
tail -f /var/log/postgresql/postgresql-*.log
# Check listen addresses
sudo -u postgres psql -c "SHOW listen_addresses;"
# Check firewall rules
sudo ufw status | grep 5432
# Check pg_hba.conf
sudo grep -v "^#" /var/lib/postgres/data/pg_hba.conf | grep -v "^$"
# Performance settings
sudo -u postgres psql -c "SHOW shared_buffers;"
sudo -u postgres psql -c "SHOW effective_cache_size;"
```
### View logs
```bash
journalctl -u postgresql -f
```
## References
## Performance Tuning
Adjust variables based on your hardware:
**For systems with 4GB RAM:**
```yaml
postgres_shared_buffers: 1GB
postgres_effective_cache_size: 3GB
```
**For systems with 16GB RAM:**
```yaml
postgres_shared_buffers: 4GB
postgres_effective_cache_size: 12GB
```
**Rule of thumb:**
- `shared_buffers`: 25% of total RAM
- `effective_cache_size`: 50-75% of total RAM
## Backup Recommendations
Consider implementing:
1. **pg_dump** for logical backups
2. **WAL archiving** for point-in-time recovery
3. **Automated backup scripts** via cron
Example backup script for a service:
```bash
pg_dump -h localhost -U immich immich > /backup/immich_$(date +%Y%m%d).sql
```
## License
MIT
## Author Information
Created for managing shared PostgreSQL instances in NAS/homelab environments.
- [PostgreSQL Documentation](https://www.postgresql.org/docs/current/)
- [PostgreSQL Logging](https://www.postgresql.org/docs/current/runtime-config-logging.html)
- [PostgreSQL Performance Tuning](https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server)
- [pg_hba.conf Documentation](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html)