After many attemps

This commit is contained in:
Mike Phares 2024-07-26 22:58:47 -07:00
parent 60d7407abb
commit 8945386a8d
57 changed files with 1287 additions and 1108 deletions

6
.gitignore vendored
View File

@ -5,6 +5,7 @@
!*/
!etc/*.md
!etc/bash_history*
!etc/group*
!etc/gshadow*
@ -35,6 +36,7 @@
!etc/unbound/unbound.conf.d/pi-hole.conf
!etc/cups/*
!etc/containers/systemd/**/*
!etc/dnsmasq.d/*
!etc/fstab/*
!etc/netplan/*
@ -48,6 +50,10 @@
!opt/copy/**/*
!root/**/*container
!home/podman/**/*volume
!home/podman/**/*container
# !home/gogs/gogs/custom/conf/app.ini
# !home/syncthing/.config/syncthing/config.xml
# !usr/local/etc/gogs/conf/app.ini

View File

@ -0,0 +1,64 @@
cat /root/.ssh/authorized_keys
nano /root/.ssh/authorized_keys
exit
apt-get install podman -y
apt-cache rdepends podman-compose
apt-get install podman-compose -y
adduser podman sudo
sudo -iu podman
exit
timedatectl set-timezone America/Phoenix
date
apt-get update
apt upgrade -y
adduser pihole
adduser podman
adduser lphares
mkdir /home/lphares/.ssh
cp /root/.ssh/authorized_keys /home/lphares/.ssh/authorized_keys
chown lphares:lphares -R /home/lphares
#
adduser bmiller
mkdir /home/bmiller/.ssh
cp /root/.ssh/authorized_keys /home/bmiller/.ssh/authorized_keys
chown bmiller:bmiller -R /home/bmiller
adduser bmiller lphares
sudo usermod -a -G lphares bmiller
newgrp lphares
systemctl restart ssh
exit
reboot
sudo -iu podman
cd /run/user/0/
ls -la
cd ..
ls -la
cp -R 0/ 1002/
chown -R podman:podman /run/user/1002/
sudo -iu podman
rm -R /run/user/1002/
mkdir -p /run/user/1002/
sudo -iu podman
chown -R podman:podman /run/user/1002/
sudo -iu podman
exit
reboot
adduser pihole
cd /tmp
curl -sSL https://install.pi-hole.net | bash
pihole -a -p LhhI1S73VZhtP#WIG8Tk
exit
reboot
apt install unbound -y
nano /etc/unbound/unbound.conf.d/pi-hole.conf
service unbound restart
nano /etc/lighttpd/lighttpd.conf
pihole -up
service lighttpd restart
dig fail01.dnssec.works @127.0.0.1 -p 5335
dig dnssec.works @127.0.0.1 -p 5335
exit
reboot
ln -s /var/log/nginx /var/www/html/log-nginx
ls -la /var/www/html
exit

View File

@ -0,0 +1,167 @@
cat /root/.ssh/authorized_keys
nano /root/.ssh/authorized_keys
exit
apt-get install podman -y
apt-cache rdepends podman-compose
apt-get install podman-compose -y
adduser podman sudo
sudo -iu podman
exit
timedatectl set-timezone America/Phoenix
date
apt-get update
apt upgrade -y
adduser pihole
adduser podman
adduser lphares
mkdir /home/lphares/.ssh
cp /root/.ssh/authorized_keys /home/lphares/.ssh/authorized_keys
chown lphares:lphares -R /home/lphares
#
adduser bmiller
mkdir /home/bmiller/.ssh
cp /root/.ssh/authorized_keys /home/bmiller/.ssh/authorized_keys
chown bmiller:bmiller -R /home/bmiller
adduser bmiller lphares
sudo usermod -a -G lphares bmiller
newgrp lphares
systemctl restart ssh
exit
reboot
sudo -iu podman
cd /run/user/0/
ls -la
cd ..
ls -la
cp -R 0/ 1002/
chown -R podman:podman /run/user/1002/
sudo -iu podman
rm -R /run/user/1002/
mkdir -p /run/user/1002/
sudo -iu podman
chown -R podman:podman /run/user/1002/
sudo -iu podman
exit
reboot
adduser pihole
cd /tmp
curl -sSL https://install.pi-hole.net | bash
pihole -a -p LhhI1S73VZhtP#WIG8Tk
exit
reboot
apt install unbound -y
nano /etc/unbound/unbound.conf.d/pi-hole.conf
service unbound restart
nano /etc/lighttpd/lighttpd.conf
pihole -up
service lighttpd restart
dig fail01.dnssec.works @127.0.0.1 -p 5335
dig dnssec.works @127.0.0.1 -p 5335
exit
reboot
ln -s /var/log/nginx /var/www/html/log-nginx
ls -la /var/www/html
exit
cp ~/.bash_history /etc/bash_history_2024-07-25.txt
cat /etc/bash_history_2024-07-25.txt
exit
apt install smartmontools
smartctl -i /dev/sda
exit
reboot
apt install snapd -y
apt install sudo links unzip nginx git nano ncdu -y
snap install dashkiosk
apt install sudo links unzip nginx git nano ncdu -y
snap install ubuntu-frame wpe-webkit-mir-kiosk
snap set wpe-webkit-mir-kiosk daemon=true
snap set wpe-webkit-mir-kiosk url=http://localhost:9400/receiver
exit
wpe-webkit-mir-kiosk.cog http://localhost:9400/receiver
/snap/wpe-webkit-mir-kiosk/current/bin/setup.sh
wpe-webkit-mir-kiosk.cog http://localhost:9400/receiver
snap set ubuntu-frame daemon=true
exit
reboot
sudo -iu podman
chmod -R podman:podman /home/podman/.config/containers/systemd/
chown -R podman:podman /home/podman/.config/containers/systemd/
rm -R .config/containers/systemd/*.ignore
rm -R .config/containers/systemd/*.volume
rm -R .config/containers/systemd/*.container
rm -R /home/podman/.config/containers/systemd/*.ignore
rm -R /home/podman/.config/containers/systemd/*.volume
rm -R /home/podman/.config/containers/systemd/*.container
mkdir /home/podman/.ssh
cp /root/.ssh/authorized_keys /home/podman/.ssh/authorized_keys
chown podman:podman -R /home/podman
systemctl --user daemon-reload
sudo -iu podman
cd ~/
mkdir -p ~/.bashrc.d
echo "export XDG_RUNTIME_DIR=/run/user/$(id -u)" > ~/.bashrc.d/systemd
source ~/.bashrc.d/systemd
loginctl enable-linger 1002
podman ps -a
systemctl --user daemon-reload
podman image prune
podman volume prune
podman container prune
exit
shutdown now
ip a
lsof -i -P -n | grep LISTEN
apt-cache rdepends cockpit
apt install cockpit -y
passwd phares
apt-get install cockpit cockpit-podman -y
systemctl enable --now cockpit.socket
journalctl -fu unbound-resolvconf.service
ip a
service unbound restart
journalctl -fu unbound-resolvconf.service
nano /etc/sysctl.conf
sysctl -p
cat /proc/sys/net/ipv6/conf/all/disable_ipv6
exit
reboot
ip a
nano /etc/postfix/main.cf
nano /etc/default/grub
grub2-mkconfig
grub-mkconfig
exit
reboot
ip a
nmcli
dig pi-hole.net @127.0.0.1 -p 5335
dig fail01.dnssec.works @127.0.0.1 -p 5335
dig dnssec.works @127.0.0.1 -p 5335
systemctl is-active unbound-resolvconf.service
systemctl disable --now unbound-resolvconf.service
sed -Ei 's/^unbound_conf=/#unbound_conf=/' /etc/resolvconf.conf
service unbound restart
rm /etc/unbound/unbound.conf.d/resolvconf_resolvers.conf
systemctl disable --now unbound-resolvconf.service
exit
reboot\
reboot
sudo -iu podman
exit
reboot
sudo -iu podman
exit
reboot
sudo -iu podman
exit
shutdown now
lvs -a
lsblk -I 8 -o NAME,SIZE,TYPE,FSUSED,FSUSE%
smartctl -i /dev/sda
lsblk -I 8 -o NAME,SIZE,TYPE,FSUSED,FSUSE%
mkdir /mnt/usb
mount /dev/sdb2 /mnt/usb
exit
nginx -t
nginx -s reload
exit

547
etc/beelink.md Normal file
View File

@ -0,0 +1,547 @@
# Beelink
## Ubuntu and Docker End of July 2024
### Dashkiosk
### authorized_keys
```bash Thu Jul 25 2024 16:02:13 GMT-0700 (Mountain Standard Time)
sudo -i
cat /root/.ssh/authorized_keys
nano /root/.ssh/authorized_keys
```
```conf Thu Jul 25 2024 16:02:15 GMT-0700 (Mountain Standard Time)
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK7khmvc9lTWHALZ/IOMcJNz/1SuvrU53fI43v4PxIJN # 92532396 2023-12-27 mikep@DESKTOP-VP94SPB
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGq7b8qtm4fgNxW2lxm+qoxm8Wz7+1uhLQCg7xSlKYL1 # 92683771 2024-01-01 mikep@DESKTOP-BMV4V66
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJqLFW9kYMp9lOTXXYafxLiUADFFKeM5KDzt02E3XeRP # 92691378 2024-01-01 cphar@DESKTOP-KA0LMMJ
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBkS+uvWevmhYNos6MJKVi2auAx6x+xlyOnnvbdEstsz # 92797978 2024-01-03 mikep@DESKTOP-GNECR7R
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKf5fo63+vyYa8E06+HOPg24e9VMsvv/kBsHKGjBBUyz # 92895319 2024-01-05 mike@mike-B365M-DS3H
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGM6ANLH/4dS0YqlsCyoXkznzkgIsDqCTk3YX01XVWd8 # 92909911 2024-01-06 mikep@DESKTOP-H6JG91B
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOwAv1vrRA29GYL0DFSE8nmD5q3gL+vVtFSwhrXObDo4 # 93288214 2024-01-13 infineon\phares@ISCN5CG3256CPS
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH1xPVOisKwW6Xwo/Yh1kx2smt2HJU2/pRjTJf4KdGXo # 97267150 2024-03-28 mike@mike-Default-string
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEDz5WqCKsoaxV+LG/kufLNuiE9K3lqp+B3AqeKXGusX # 97442032 2024-04-01 lphar@DESKTOP-1238PEQ
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID09PhrjIpeNBmUeycnT3xKF8F5TcGL3ZKAvU9YdLj7H # 98747144 2024-04-25 mike@mike-Default-string
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIaZyTLpafZta8W8Uv6uWpgbbj4QL5dde9Rlf06PfdYc # 98824914 2024-04-26 k0308@DESKTOP-0UJ8DD5
# ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEFpucyp3FAHn8/9cXvfRegi09DkR6XLugoniEyQ9w0T # 98824924 2024-04-26 mille@DESKTOP-QL2HGEH
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOn/Ptg/k3rOJn32GxrL2J3Bazt/3M9fGWjzP+CW1qXK # 100900455 2024-06-03 lphares0907@penguin
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPMIxGucViHMHFXoM2VBPlvLvMgqNi2BUtTE/D4n778M # 101638217 2024-06-14 mikep@DESKTOP-VVO6KCM
```
```bash Thu Jul 25 2024 16:02:34 GMT-0700 (Mountain Standard Time)
exit
```
### Timezone
```bash Thu Jul 25 2024 16:10:06 GMT-0700 (Mountain Standard Time)
timedatectl set-timezone America/Phoenix
date
# Thu Jul 25 04:03:08 PM MST 2024
```
```bash Thu Jul 25 2024 16:10:08 GMT-0700 (Mountain Standard Time)
apt-get update
apt upgrade -y
```
### Create Users
```bash Thu Jul 25 2024 16:10:13 GMT-0700 (Mountain Standard Time)
adduser pihole
adduser podman
mkdir /home/podman/.ssh
cp /root/.ssh/authorized_keys /home/podman/.ssh/authorized_keys
chown podman:podman -R /home/podman
#
adduser lphares
#
mkdir /home/lphares/.ssh
cp /root/.ssh/authorized_keys /home/lphares/.ssh/authorized_keys
chown lphares:lphares -R /home/lphares
#
adduser bmiller
#
mkdir /home/bmiller/.ssh
cp /root/.ssh/authorized_keys /home/bmiller/.ssh/authorized_keys
chown bmiller:bmiller -R /home/bmiller
```
### Directory Share
```bash Mon Thu Jul 25 2024 16:10:17 GMT-0700 (Mountain Standard Time)
adduser bmiller lphares
sudo usermod -a -G lphares bmiller
newgrp lphares
systemctl restart ssh
```
### Podman Quadlet Rootless Example
```bash Thu Jul 25 2024 16:30:13 GMT-0700 (Mountain Standard Time)
apt-get install podman -y
apt-cache rdepends podman-compose
apt-get install podman-compose -y
# apt-get remove podman-compose -y
# apt-get purge podman-compose -y
adduser podman sudo
sudo -iu podman
loginctl enable-linger
# 2.
podman --version
# podman version 4.9.3
# https://www.redhat.com/sysadmin/quadlet-podman
cd ~/
mkdir -p .config/containers/systemd/
nano .config/containers/systemd/mysleep.container
```
```conf Thu Jul 25 2024 16:31:21 GMT-0700 (Mountain Standard Time)
[Unit]
Description=The sleep container
After=local-fs.target
[Container]
Image=registry.access.redhat.com/ubi9-minimal:latest
Exec=sleep 1000
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
```
```bash Thu Jul 25 2024 16:31:24 GMT-0700 (Mountain Standard Time)
exit
exit
reboot
```
```bash Thu Jul 25 2024 16:31:27 GMT-0700 (Mountain Standard Time)
# cp -R /run/user/0/ /run/user/1002/
mkdir -p /run/user/1002/
chown -R podman:podman /run/user/1002/
sudo -iu podman
cd ~/
mkdir -p ~/.bashrc.d
echo "export XDG_RUNTIME_DIR=/run/user/$(id -u)" > ~/.bashrc.d/systemd
source ~/.bashrc.d/systemd
loginctl enable-linger 1002
podman ps -a
systemctl --user daemon-reload
systemctl --user enable --now podman.socket
systemctl --user status mysleep.service
systemctl --user start mysleep.service
exit
reboot
```
### Pi-hole
```bash Thu Jul 25 2024 16:31:30 GMT-0700 (Mountain Standard Time)
# https://canyoublockit.com/
adduser pihole
cd /tmp
curl -sSL https://install.pi-hole.net | bash
pihole -a -p 484C889D0ED1EA4AB930AA518FE3B15E
exit
reboot
```
```bash Thu Jul 25 2024 16:35:27 GMT-0700 (Mountain Standard Time)
apt install unbound -y
# https://docs.pi-hole.net/guides/dns/unbound/
nano /etc/unbound/unbound.conf.d/pi-hole.conf
```
```conf Thu Jul 25 2024 16:35:36 GMT-0700 (Mountain Standard Time)
server:
# If no logfile is specified, syslog is used
# logfile: "/var/log/unbound/unbound.log"
verbosity: 0
interface: 127.0.0.1
port: 5335
do-ip4: yes
do-udp: yes
do-tcp: yes
# May be set to yes if you have IPv6 connectivity
do-ip6: no
# You want to leave this to no unless you have *native* IPv6. With 6to4 and
# Terredo tunnels your web browser should favor IPv4 for the same reasons
prefer-ip6: no
# Use this only when you downloaded the list of primary root servers!
# If you use the default dns-root-data package, unbound will find it automatically
#root-hints: "/var/lib/unbound/root.hints"
# Trust glue only if it is within the server's authority
harden-glue: yes
# Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS
harden-dnssec-stripped: yes
# Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes
# see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details
use-caps-for-id: no
# Reduce EDNS reassembly buffer size.
# IP fragmentation is unreliable on the Internet today, and can cause
# transmission failures when large DNS messages are sent via UDP. Even
# when fragmentation does work, it may not be secure; it is theoretically
# possible to spoof parts of a fragmented DNS message, without easy
# detection at the receiving end. Recently, there was an excellent study
# >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<<
# by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/)
# in collaboration with NLnet Labs explored DNS using real world data from the
# the RIPE Atlas probes and the researchers suggested different values for
# IPv4 and IPv6 and in different scenarios. They advise that servers should
# be configured to limit DNS messages sent over UDP to a size that will not
# trigger fragmentation on typical network links. DNS servers can switch
# from UDP to TCP when a DNS response is too big to fit in this limited
# buffer size. This value has also been suggested in DNS Flag Day 2020.
edns-buffer-size: 1232
# Perform prefetching of close to expired message cache entries
# This only applies to domains that have been frequently queried
prefetch: yes
# One thread should be sufficient, can be increased on beefy machines. In reality for most users running on small networks or on a single machine, it should be unnecessary to seek performance enhancement by increasing num-threads above 1.
num-threads: 1
# Ensure kernel buffer is large enough to not lose messages in traffic spikes
so-rcvbuf: 1m
# Ensure privacy of local IP ranges
private-address: 192.168.0.0/16
private-address: 169.254.0.0/16
private-address: 172.16.0.0/12
private-address: 10.0.0.0/8
private-address: fd00::/8
private-address: fe80::/10
```
```bash Thu Jul 25 2024 16:37:29 GMT-0700 (Mountain Standard Time)
service unbound restart
nano /etc/lighttpd/lighttpd.conf
```
```conf Thu Jul 25 2024 16:37:31 GMT-0700 (Mountain Standard Time)
...
server.port = 8005
...
```
```bash Thu Jul 25 2024 16:37:32 GMT-0700 (Mountain Standard Time)
pihole -up
service lighttpd restart
dig fail01.dnssec.works @127.0.0.1 -p 5335
dig dnssec.works @127.0.0.1 -p 5335
exit
reboot
```
### Log
```bash Thu Jul 25 2024 16:44:00 GMT-0700 (Mountain Standard Time)
ln -s /var/log/nginx /var/www/html/log-nginx
ls -la /var/www/html
exit
```
```bash Thu Jul 25 2024 16:44:01 GMT-0700 (Mountain Standard Time)
cp ~/.bash_history /etc/bash_history_2024-07-25.txt
cat /etc/bash_history_2024-07-25.txt
exit
```
```bash Thu Jul 25 2024 16:44:03 GMT-0700 (Mountain Standard Time)
apt install smartmontools
smartctl -i /dev/sda
```
```yaml Thu Jul 25 2024 16:44:05 GMT-0700 (Mountain Standard Time)
Device Model: minisforum
Serial Number: L9MLCHC11280472
LU WWN Device Id: 0 000000 000000000
Firmware Version: SBFMJ1.3
User Capacity: 256,060,514,304 bytes [256 GB]
Sector Size: 512 bytes logical/physical
Rotation Rate: Solid State Device
Form Factor: M.2
TRIM Command: Available
Device is: Not in smartctl database 7.3/5528
ATA Version is: ACS-4 (minor revision not indicated)
SATA Version is: SATA 3.2, 6.0 Gb/s (current: 6.0 Gb/s)
Local Time is: Thu Jul 25 16:38:33 2024 MST
SMART support is: Available - device has SMART capability.
SMART support is: Enabled
```
### Dashkiosk
```bash Thu Jul 25 2024 16:44:07 GMT-0700 (Mountain Standard Time)
apt install snapd -y
apt install sudo links unzip nginx git nano ncdu -y
snap install dashkiosk
snap install ubuntu-frame wpe-webkit-mir-kiosk
snap set wpe-webkit-mir-kiosk daemon=true
snap set wpe-webkit-mir-kiosk url=http://localhost:9400/receiver
exit
```
```bash Thu Jul 25 2024 16:44:09 GMT-0700 (Mountain Standard Time)
wpe-webkit-mir-kiosk.cog http://localhost:9400/receiver
# ^C
/snap/wpe-webkit-mir-kiosk/current/bin/setup.sh
wpe-webkit-mir-kiosk.cog http://localhost:9400/receiver
# ^C
snap set ubuntu-frame daemon=true
exit
reboot
```
### Podman Verify
```bash Thu Jul 25 2024 16:58:14 GMT-0700 (Mountain Standard Time)
# apt install systemd-container -y
# machinectl --help
sudo -iu podman
cd ~/
mkdir -p ~/.bashrc.d
echo "export XDG_RUNTIME_DIR=/run/user/$(id -u)" > ~/.bashrc.d/systemd
source ~/.bashrc.d/systemd
loginctl enable-linger 1002
podman ps -a
systemctl --user daemon-reload
podman image prune
podman volume prune
podman container prune
chown -R podman:podman /home/podman/.config/containers/systemd/
rm -R /home/podman/.config/containers/systemd/*.ignore
rm -R /home/podman/.config/containers/systemd/*.volume
rm -R /home/podman/.config/containers/systemd/*.container
```
### Free File Sync - * Select - A
```bash Thu Jul 25 2024 16:49:21 GMT-0700 (Mountain Standard Time)
# sftp://root@free.file.sync.root/etc|chan=10|keyfile=C:\Users\phares\.ssh\id_ed25519
# L:\Git\Linux-Ubuntu-Server\etc
```
### Backup
- [x] Rescuezilla Fri Jul 26 2024 10:45:27 GMT-0700 (Mountain Standard Time)
### Restore
- [x] Rescuezilla Fri Jul 26 2024 10:45:27 GMT-0700 (Mountain Standard Time)
### Disable IPv6 (Move up next time!!!)
```bash Fri Jul 26 2024 10:45:27 GMT-0700 (Mountain Standard Time)
# https://intercom.help/privatevpn/en/articles/6440374-how-to-disable-ipv6-on-ubuntu-and-fedora-linux
nano /etc/sysctl.conf
```
```conf Fri Jul 26 2024 10:45:30 GMT-0700 (Mountain Standard Time)
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
```
```bash Fri Jul 26 2024 10:45:34 GMT-0700 (Mountain Standard Time)
sysctl -p
cat /proc/sys/net/ipv6/conf/all/disable_ipv6
exit
reboot
```
```bash Fri Jul 26 2024 10:45:38 GMT-0700 (Mountain Standard Time)
nano /etc/default/grub
```
### Fix Unbond (Move up next time!!!)
```conf Fri Jul 26 2024 10:45:41 GMT-0700 (Mountain Standard Time)
service unbound restart
# http://beelink:8005/admin/settings.php?tab=dns
# https://docs.pi-hole.net/guides/dns/unbound/#configure-pi-hole
127.0.0.1#5335
```
```bash Fri Jul 26 2024 10:45:44 GMT-0700 (Mountain Standard Time)
journalctl -fu unbound-resolvconf.service
systemctl disable --now unbound-resolvconf.service
service unbound restart
```
```conf Fri Jul 26 2024 10:45:47 GMT-0700 (Mountain Standard Time)
# https://opensource.com/article/22/8/disable-ipv6
# GRUB_CMDLINE_LINUX_DEFAULT=""
GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=1 quiet splash"
# GRUB_CMDLINE_LINUX=""
GRUB_CMDLINE_LINUX="ipv6.disable=1"
```
```bash Fri Jul 26 2024 10:45:51 GMT-0700 (Mountain Standard Time)
grub-mkconfig
exit
reboot
```
### Cockpit (Move up next time!!!)
- [cockpit](https://cockpit-project.org/)
```bash Fri Jul 26 2024 10:45:55 GMT-0700 (Mountain Standard Time)
# apt install chkservice -y
apt-cache rdepends cockpit
apt install cockpit -y
passwd phares
# https://192.168.11.2:9090
# https://blog.while-true-do.io/podman-web-ui-via-cockpit/
apt-get install cockpit cockpit-podman -y
systemctl enable --now cockpit.socket
```
### Podman Create
```bash Fri Jul 26 2024 10:46:01 GMT-0700 (Mountain Standard Time)
# apt install systemd-container -y
# machinectl --help
sudo -iu podman
cd ~/
mkdir -p ~/.bashrc.d
echo "export XDG_RUNTIME_DIR=/run/user/$(id -u)" > ~/.bashrc.d/systemd
source ~/.bashrc.d/systemd
loginctl enable-linger 1002
podman ps -a
systemctl --user daemon-reload
podman pull docker.io/dpage/pgadmin4:latest
podman pull docker.io/gitea/act_runner:nightly
podman pull docker.io/gitea/gitea:latest
podman pull docker.io/library/postgres:13
podman pull docker.io/library/postgres:14
podman pull docker.io/library/postgres:16
podman pull docker.io/library/postgres:16
podman pull docker.io/library/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900
podman pull docker.io/odoo:15.0
podman pull docker.io/odoo:16.0
podman pull docker.io/vaultwarden/server:latest
podman pull ghcr.io/immich-app/immich-machine-learning:release
podman pull ghcr.io/immich-app/immich-server:release
```
```conf Fri Jul 26 2024 10:57:44 GMT-0700 (Mountain Standard Time)
# sftp://root@free.file.sync.root/home/podman|chan=10|keyfile=C:\Users\phares\.ssh\id_ed25519
# L:\Git\Linux-Ubuntu-Server\home\podman
```
```bash Fri Jul 26 2024 11:01:22 GMT-0700 (Mountain Standard Time)
# ... .service is transient or generated.
# WantedBy=multi-user.target
systemctl --user daemon-reload
systemctl --user start gitea-db
# systemctl --user start gitea-runner
systemctl --user start gitea-server
systemctl --user start immich-db
systemctl --user start immich-learning
systemctl --user start immich-redis
# systemctl --user start immich-server
systemctl --user start odoo-db
systemctl --user start odoo-server
systemctl --user start pgadmin
systemctl --user start vaultwarden-server
podman ps -a --sort names
smartctl -i /dev/sda
```
```yaml Fri Jul 26 2024 11:55:37 GMT-0700 (Mountain Standard Time)
Device Model: 256GB SSD
Serial Number: CM42ABH1005410
LU WWN Device Id: 5 3a5a27 0100c22ca
Firmware Version: SN11842
User Capacity: 256,060,514,304 bytes [256 GB]
Sector Size: 512 bytes logical/physical
Rotation Rate: Solid State Device
Form Factor: M.2
TRIM Command: Available, deterministic, zeroed
Device is: Not in smartctl database 7.3/5528
ATA Version is: ACS-4 (minor revision not indicated)
SATA Version is: SATA 3.2, 6.0 Gb/s (current: 6.0 Gb/s)
Local Time is: Fri Jul 26 11:55:12 2024 MST
SMART support is: Available - device has SMART capability.
SMART support is: Enabled
```
### Copy Slideshow Photos
```bash Fri Jul 26 2024 10:57:44 GMT-0700 (Mountain Standard Time)
lvs -a
lsblk -I 8 -o NAME,SIZE,TYPE,FSUSED,FSUSE%
mkdir /mnt/usb
mount /dev/sdb2 /mnt/usb
```
```conf Fri Jul 26 2024 10:57:44 GMT-0700 (Mountain Standard Time)
# cp -R ... ...
# sftp://root@free.file.sync.root/mnt/usb/var/www/html/Images-c9dbce3b-Results/thumbs|chan=10|keyfile=C:\Users\phares\.ssh\id_ed25519
# sftp://root@free.file.sync.root/var/www/html/Images-c9dbce3b-Results/thumbs|chan=10|keyfile=C:\Users\phares\.ssh\id_ed25519
```
```bash Fri Jul 26 2024 13:34:48 GMT-0700 (Mountain Standard Time)
OS-var-thumbs-server 7/26/2024 11:59:57 AM
Completed successfully
Warnings: 2
Items processed: 156,388 (35.7 GB)
Total time: 01:31:53
Errors and warnings:
11:59:58 AM Warning: The following folders do not yet exist:
sftp://root@free.file.sync.root/var/www/html/Images-c9dbce3b-Results/thumbs
The folders are created automatically when needed.
12:00:30 PM Warning: Database file is not av
```
```bash Fri Jul 26 2024 13:34:52 GMT-0700 (Mountain Standard Time)
umount /mnt/usb
# http://192.168.11.2:9400/admin
# Slideshow
# http://127.0.0.1:8080/slideshow/index.html?nocache=2024-07-01-11-36
# Local
# http://192.168.11.2:8080/slideshow/index.html?nocache=2024-07-01-11-36
```
### Free File Sync - * Select - B
```conf Fri Jul 26 2024 13:34:57 GMT-0700 (Mountain Standard Time)
# sftp://root@free.file.sync.root/etc|chan=10|keyfile=C:\Users\phares\.ssh\id_ed25519
# L:\Git\Linux-Ubuntu-Server\etc
```
```bash Fri Jul 26 2024 13:35:01 GMT-0700 (Mountain Standard Time)
cp ~/.bash_history /etc/bash_history_2024-07-26.txt
exit
reboot
```
### Backup
- [x] Rescuezilla Fri Jul 26 2024 13:35:04 GMT-0700 (Mountain Standard Time)
## Common
```bash
nginx -t
nginx -s reload
cat /etc/crontab
systemctl restart nginx
cat /etc/ssh/sshd_config
lsof -i -P -n | grep LISTEN
netstat -tulpn | grep LISTEN
lsblk -I 8 -o NAME,SIZE,TYPE,FSUSED,FSUSE%
(echo >/dev/tcp/localhost/5433) &>/dev/null && echo "Open 5433" || echo "Close 5433"
```

View File

@ -18,7 +18,7 @@ voice:x:22:
cdrom:x:24:phares
floppy:x:25:
tape:x:26:
sudo:x:27:phares
sudo:x:27:phares,podman
audio:x:29:
dip:x:30:phares
www-data:x:33:
@ -34,7 +34,7 @@ sasl:x:45:
plugdev:x:46:phares
staff:x:50:
games:x:60:
users:x:100:lphares,bmiller
users:x:100:pihole,podman,lphares,bmiller
nogroup:x:65534:
systemd-journal:x:999:
systemd-network:x:998:
@ -58,8 +58,10 @@ landscape:x:109:
fwupd-refresh:x:989:
netdev:x:110:
phares:x:1000:
lphares:x:1001:bmiller
bmiller:x:1002:
pihole:x:1003:www-data
pihole:x:1001:www-data
podman:x:1002:
lphares:x:1003:bmiller
bmiller:x:1004:
unbound:x:111:
docker:x:988:
cockpit-ws:x:112:
cockpit-wsinstance:x:113:

View File

@ -18,7 +18,7 @@ voice:x:22:
cdrom:x:24:phares
floppy:x:25:
tape:x:26:
sudo:x:27:phares
sudo:x:27:phares,podman
audio:x:29:
dip:x:30:phares
www-data:x:33:
@ -34,7 +34,7 @@ sasl:x:45:
plugdev:x:46:phares
staff:x:50:
games:x:60:
users:x:100:lphares,bmiller
users:x:100:pihole,podman,lphares,bmiller
nogroup:x:65534:
systemd-journal:x:999:
systemd-network:x:998:
@ -58,7 +58,9 @@ landscape:x:109:
fwupd-refresh:x:989:
netdev:x:110:
phares:x:1000:
lphares:x:1001:bmiller
bmiller:x:1002:
pihole:x:1003:www-data
pihole:x:1001:www-data
podman:x:1002:
lphares:x:1003:bmiller
bmiller:x:1004:
unbound:x:111:
cockpit-ws:x:112:

View File

@ -18,7 +18,7 @@ voice:*::
cdrom:*::phares
floppy:*::
tape:*::
sudo:*::phares
sudo:*::phares,podman
audio:*::
dip:*::phares
www-data:*::
@ -34,7 +34,7 @@ sasl:*::
plugdev:*::phares
staff:*::
games:*::
users:*::lphares,bmiller
users:*::pihole,podman,lphares,bmiller
nogroup:*::
systemd-journal:!*::
systemd-network:!*::
@ -58,8 +58,10 @@ landscape:!::
fwupd-refresh:!*::
netdev:!::
phares:!::
pihole:!::www-data
podman:!::
lphares:!::bmiller
bmiller:!::
pihole:!::www-data
unbound:!::
docker:!::
cockpit-ws:!::
cockpit-wsinstance:!::

View File

@ -18,7 +18,7 @@ voice:*::
cdrom:*::phares
floppy:*::
tape:*::
sudo:*::phares
sudo:*::phares,podman
audio:*::
dip:*::phares
www-data:*::
@ -34,7 +34,7 @@ sasl:*::
plugdev:*::phares
staff:*::
games:*::
users:*::lphares,bmiller
users:*::pihole,podman,lphares,bmiller
nogroup:*::
systemd-journal:!*::
systemd-network:!*::
@ -58,7 +58,9 @@ landscape:!::
fwupd-refresh:!*::
netdev:!::
phares:!::
pihole:!::www-data
podman:!::
lphares:!::bmiller
bmiller:!::
pihole:!::www-data
unbound:!::
cockpit-ws:!::

View File

@ -0,0 +1 @@
{"creation_dt": "2024-07-26T23:59:42Z", "creation_host": "beelink"}

View File

@ -0,0 +1 @@
{"n": "kg7W5UNvS0rGXFBdlzyOJwwsbJMDICMbV5uZNmBg57SbPycsdhY7q6E5i0Uwgnm9BZCym_x8xHSJm8h6YBOqaWfgtI3akPUUREaW4lfUK73cQqrSkcuytVKzaz1fPnzNX0uoxa5Z3llc7BuFbUWknWPB1TFBJH7puneKIAsq6RkNJ_ykT49mzK6QB_mIjnDDJfv066Hs5-y2k7Mve4ntBgxOCO2sGBuiYOad2CX8pn0bRBLg3_JoAMc2eozDeXPbSqTfta5iCiV_39942RhWctDsk2May-EhQDWBRsjTKDnQ--OkYNA23unqq8Hwjb7ROSP1Mo4ryD7epS0lkAuyLw", "e": "AQAB", "d": "RR9HvT2CRMAxCwwvlrzeS4V72rBieMmUjYwCYNUQ12dGV4drB8Gy4KFO8eW_NOh3N9SIasD9rR236CVjIOf9rJ-wHNLLXvHVcxBzJNBLBZ53SIe-WqhrvVtTtQtY8WIv8YsFZvSB7HQfQVAu_41HgALvfz0tpp93brtW_545YxG0fNnuK3WZEJnIF30a99gWFVK2iSVPCUK-SpJ5Pa2w1rGhB_IdT2WxXJ-1Ni-emxCeFYcaUs7D7luxFEFkbJce_bvsJJ92CdRUIRDnJymVtb1WBaWL5nbkp9KlUNdF1-FJjfS1FN5a1ESQcW0yg4nR4-AN6gENPp6i6hYTFw-c4Q", "p": "wiCyc7wN7fNPgB9ghgP5vd-RO5_F4qKn5K0KaCCOitev1nhVNzbBWmB_heaA7FEOi_B0Roa2PFaDnuQanL21wGLL3FrzpNDLj7y-jAhddrpcWJZOL28dkH1bRWVc3SyyVc7muMG3UcpjuA5wROSaUo07k2Bx2yga0zqh_Tt6ceE", "q": "wJwGxGz-qudJ6wgpyufXXYO4dvXpncjNIAFjMglKB_EWntDY3CQdVx21LmddJAWfaef9o9ZTrQHlI4NTbbzuSFx9Kx4MvZ4pcT8FxI5lLr21aR0ZIfQtBVq_SJ3SPO6fhGS0OkfnNneUodHlPCTZjcQv6ZfpkVOl_orWUx8rxg8", "dp": "ja7_j6i5IeprI7_C2w0bwZDilvnNm9PLZgNFjSn13y_8jjPVPi8gf63eZQQPJGskis48jxzS9MbP2a_yHGP5uyhgUbuwB6K8b3P6Pon8bSMnMyDAf65Bjatwaf3JAUQWBLq1ejMocPGeRj-DgUS3vXiG-tFxR9rFeVVZ1VEL1QE", "dq": "nXWwjppwY7Url8oXqHp6dUe-4PnO36AOGZEoz_oKHI36qSEheG7J0bNGsEXuPgmetkzi7TLFO1WmRq8Q4FCpGF0MMtihH88SIDOeen7StzB-Aw23wytENvFmGW50Nj36CJjvQgoQOlXuG5BlcBLMC_qMFNKNLrKMrJhB-Lgt5_8", "qi": "WlkLosxwdKVCthqALtlpQnU8sc6uh1AiRFhKw-YBZLNP0HxMJDS0llTk517Gx8gS2bbxZ0XwqIASxFiskSGR6JsmO2kAeuCvaRvfXuTZZ_2b0-wJEiqU11uphr5Sd_USPyeuoyeaYexoip7s0Gxv1gn4Z7Xo-E1y46obtOAHRyI", "kty": "RSA"}

View File

@ -0,0 +1 @@
{"body": {}, "uri": "https://acme-staging-v02.api.letsencrypt.org/acme/acct/157190503"}

View File

@ -0,0 +1 @@
{"creation_dt": "2024-07-26T23:57:42Z", "creation_host": "beelink"}

View File

@ -0,0 +1 @@
{"n": "vVHUQWRpoK-vQwGXGnZM2ArwzgHYVRege39FvhwWeMZS1MaQIrGYfm8I9IIRPHCPTysK2CccuQLWDZBDeDSrljq68VLVd7Q5Mx7C0TD6bzRHe70Ur8FKDtGh-d2aAjdVevOUgcykJ0pmKWYBmDbAqeKPt7bFoqeXi-oMvjz6NHF_2weiDyC-Exv8galKoK7uJbcAL1KJt39gAUzkOuDAMhb_e2s8QCnBHmP-yhyAWz5Q_A2dKuI4Focg5PmKBPh_XerYnXCeoMaNSySiaS4NorIYpLBU65P9qebfXj4SMfIS1QtU6e8pnUqTWs57ApMSWeYxvNry74YE0fiHFq4mHw", "e": "AQAB", "d": "BeO4TFOhALvGa4K3zAcE1Im8Tr_DhC4oxSNVY5jcPK0vWFcPBGi2ZIkZCJPx2cCQGnebQEIMX-npdYWPm8RajnNfInDpLUoTOb_TBGpFAVWgO2HdrD0vMIqvZzi-HW0bHE1u21lwDr0jj8Nr-NlU6lQMyzuFAgVyQPiqydGyzhPM9Qa0_ikHJz_xsB9xPJz4dnP2PLOy5wu_oyPzS49n9tifa8vHahX-fDoS_1GGuAO2QJL6iqgbgAozmCBNs-3lFEZrN0sw3ko4MR4PEsJLoap8g6z3DsY5-jq1aj029v4AOx6VWD462lSRhtoUDtHPduWEUEDsvBzrhs9jpEKYBQ", "p": "7JPbQoYU97li3KH4AVOF2A_auQzP-A0CH6UWuem_Mb7pTA3HAGxVwt3K6Jw_RZlWoiKyumUsEOaZjDsOj8leK5LuWDmLjX3hcuNHAI3EosdylPR-cDEXGA_kewSGDhy819tIs_96vMXus0F7BRcuIen4Dml-fs94LBzDKKnPRMU", "q": "zNzBHtLCTEHs4g0z0drzenvx_Fce7dhBs5_zqzeLXBpCNxNMonwbBu70k8-25EPWaUbtIKZdRzuNqL-1D1ECJDx9Amdp0EaEdxxuubpzFOvKORv304XeDXJ1vbt7UxUBhf6ZksaUJSm6-1tFMYRylMaXHzza6G3ym7nkUFNHlZM", "dp": "WptWQmgv56Cs6GtwHLHsdQ-9aVoq3Im-4nfL-HA5tce0elhceyECx5tCd0xAP1u2_mnjodfVxLNM45RKw28QDgg8GLYRaKNpkSLSsreZM-7HR51MP5PNkH2luCfHX-hJuzJU6ftS-rGj9iuB_jUeigad5oco11CGaf6qEAzZQTE", "dq": "c5ImMKldi3-1j2E7Vqxap0K9JUpQ3IUfq6IU6QaQzAjUShXNKe0xohu0Rp7r4qKSlxm4NK8l-EWUiRp9noJo3uBw9aY5f5IIsNmNbaHNwoOytb0ddmsS9isYWU0nKZu0HE-he7gjY0PDhIHc6y55JY1TPAAszusX7tpT_yy8OZk", "qi": "QjLmAyTADP4e2MlXLLfJDejLnIfHutxHUfNBt3WYvC2jvX0AH8nJcgccCVOchDFjAK0m5wzBQ2SJH4CAIzgNqkyKc42vLNGEF6wK6k8tdw0BVDveWl5O2-0D8NEgb8WrLhWZBFAOPWASvrsVfl9VetbThYqfmNCaPTW9w3tP4zk", "kty": "RSA"}

View File

@ -1 +1 @@
{"body": {}, "uri": "https://acme-v02.api.letsencrypt.org/acme/acct/1851917407"}
{"body": {}, "uri": "https://acme-v02.api.letsencrypt.org/acme/acct/1859007847"}

View File

@ -1 +0,0 @@
{"creation_dt": "2024-07-23T03:12:26Z", "creation_host": "beelink"}

View File

@ -1 +0,0 @@
{"n": "tAPgSFbPGk0aKdR6Pk3_0WtZ_YpjzRewuttzTYHV8m8adMKO771G-NMB_zSoYoWmia9s1tj1KCF3P6bnVQzDE3ZbBLfeIFT9GaGCczgDpMEWs6rkWfLWGW93IQoqEJi0f2ati7UpaO22NcnhFAkDmIAyLmaYxv_cTAgeGurv690379v_I38b0rxfw8woGlpfYzvyY_UiEbeK4sMT_TnZdq6ZYgcu770d_ZgYlhstK_jKnumH9G2zaxs3kgRKSoyd8LT4VmLHTIoVT8eEFT1abz0bFo2uPWOaxHtQjI2S8qLEQI9Tg15oy86SPDYDGpY6HfgshasA3Pm9-IUV0tgTUQ", "e": "AQAB", "d": "BzTTX9aNPMMFHvfaJF3OI3kPIQyF7fxaqGcsT3y4-ATcCGn9e7oD58HXSnsj4xgVvjhxc2fODBHwnN-SCcTdvA0B5TUsRWSBmVilbYjM8kEUlNxso4JTldVe7aU-qiKQjPLPwb3euF56oroJMn8hT5O1pWviT0GsUG5P51usEbDu81t_ZZcn-I21Xe9jQ0mXgNsS1c18cX6AlDMhAlm7aw6gUJdrnScbsWp0sDXQiOzLEvwm1ahjUioEtiQiHH1Jsa2mYcuGhrWyrx-Xlt0TzfslocDbHbgCFKPG6XLVBoBbWb4Hm3LBA7naiuEY_Dmi-wQUtHiJELwSu4opkNRxTQ", "p": "23HhNDzEaiDI2Gg2-qxkOBEwCDT9b1If_vGSwZRzPLJBijy5ypXDdtQifCgLEeNJm0CJSa9SZcZjyDZH7_JLFO8bWl1pjf095SwJBK3Sc9uavr8jl-OzAbX_fHOL1ZHhvI4vsBQhc6dsushPFCFEcvaokTuxf8QZUEP0DLsNWh8", "q": "0gCJICISX_tcQ7AmbBsJDFTnU2IqAo3nG0UXcoHSlMXJqkkjjaPo7TZ-CMlt8dbSZX1ekvw8nvRq7tHuNDqflNgfyxt0gHnjkzYowlM83OIMiMbMtV9ddDwYCYGJYZTPEVVRW6s6A1Pb97-2b-IiDoQkdohC0NxhIPM4ntw9xI8", "dp": "ovJ0320gDA48FTWNXaYU5O0eVXnDlp3M1GpdJxEsYK2crSFadGuwsNPkp3y0e5viVD4fs29UJ-6guVJVKH8p8Wl7TiZ-8shQ5ZNFPwwzcYRn66vSqj6R9XtHMwo5k2S30mNiVcUc8dwoiKmkzrXFNfiiQvWoX88lXMyYs7tRttE", "dq": "iNJWhTwNssFnZKaA8hBPPdyXdulPK6jeuRKz6yQQWmPfN3Y9lswDN4I4bWsnmdh37i3Xj3aN3JaskPrv8tF7JZQGauNxbT98-W3g8nMfBV62AetcdpMypUd99buTEaPNoaJvxoTx4Dcj6u_aYlz4CXMo-p1ewwvCsKAm9MHT8Js", "qi": "2VGQ-m3oki_uXi85Ybs2u-4tDFVL7zsFaoYtV8-KMOaYyd6N65kk2EjFWSXRa6BnAqoELEXr0cV9ZJA2YF4WfLQwCLVntyDN6Dc0_9du81qIc_efqBcY_fNMeEfDKAOZkslKIKkWJaP9CAXyI4_ctHdiAhFowdSCcBFTZ2O9xa4", "kty": "RSA"}

View File

@ -1,25 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIEJzCCA6ygAwIBAgISBLIxc7whwwFJZCApth1UIMcPMAoGCCqGSM49BAMDMDIx
MIIEODCCA7+gAwIBAgISBGZxfACT5sYQIkJkWF9VAy3IMAoGCCqGSM49BAMDMDIx
CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF
NjAeFw0yNDA3MjMwMjEzNDhaFw0yNDEwMjEwMjEzNDdaMB4xHDAaBgNVBAMTE3Bo
YXJlczM3NTcuZGRucy5uZXQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQdWJOW
xYvavXWc5dOMmGpaToaiTDrDRZ+oKsywjM/KChalQiNYLUSJqp3Dh5eoRfQ09K50
X6yJOJaQxb0FFzC1o4ICtDCCArAwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQG
CCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBScvr8E
AeV+Re+aj/j90JHjpLTkuzAfBgNVHSMEGDAWgBSTJ0aYA6lRaI6Y1sRCSNsjv1iU
NjAeFw0yNDA3MjYyMjU4NTlaFw0yNDEwMjQyMjU4NThaMB4xHDAaBgNVBAMTE3Bo
YXJlczM3NTcuZGRucy5uZXQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATHcFj7
f+XaIh7OYwyOptBrsxy3WB8KGoc9O2XSSnzeXioX1ddrGnDomfJgM0OZFxsTZ+8O
Qr8NnFfTcSO4xbb+o4ICxzCCAsMwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQG
CCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBR5wkKi
B1hhrOLCQDYCn6MshPzG/TAfBgNVHSMEGDAWgBSTJ0aYA6lRaI6Y1sRCSNsjv1iU
0jBVBggrBgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9lNi5vLmxlbmNy
Lm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL2U2LmkubGVuY3Iub3JnLzCBuwYDVR0R
BIGzMIGwgg9hZmZpcm0uZGRucy5uZXSCGGdpdGVhLnBoYXJlcy5kdWNrZG5zLm9y
Lm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL2U2LmkubGVuY3Iub3JnLzCBzwYDVR0R
BIHHMIHEgg9hZmZpcm0uZGRucy5uZXSCGGdpdGVhLnBoYXJlcy5kdWNrZG5zLm9y
Z4IZaW1taWNoLnBoYXJlcy5kdWNrZG5zLm9yZ4IYbXVzaWMucGhhcmVzLmR1Y2tk
bnMub3JnghNwaGFyZXMzNzU3LmRkbnMubmV0ghlxdWFydHoucGhhcmVzLmR1Y2tk
bnMub3Jngh52YXVsdHdhcmRlbi5waGFyZXMuZHVja2Rucy5vcmcwEwYDVR0gBAww
CjAIBgZngQwBAgEwggEFBgorBgEEAdZ5AgQCBIH2BIHzAPEAdwBIsONr2qZHNA/l
agL6nTDrHFIBy1bdLIHZu7+rOdiEcwAAAZDdlLOuAAAEAwBIMEYCIQCpfmsaNEgL
DrwdkCY+7NbJayn43Gv4sUjD4arZVJntKgIhAOyuzAjCF9JGPMSqNlSXd83zX89g
eKGUwLWltfIe+zVbAHYAPxdLT9ciR1iUHWUchL4NEu2QN38fhWrrwb8ohez4ZG4A
AAGQ3ZSztwAABAMARzBFAiEA4JYbBt/ZdGzUZk4evX3alv6QobD5D0An4NG0vF5G
JkQCICph30m0Ev4uFpVvLUx1CJlTR/gJMJ+0U8RbE95c9xyYMAoGCCqGSM49BAMD
A2kAMGYCMQCy5ix9dZALkOcFIWoAI0t2UzXBho7+eRGsXfIiLrBfYw8eDALPAglI
glGBo2OmRs8CMQCXQwaoJRZG2IPnVS+0JMDZq2PzhyyV9Tycj77wRrYHwOHWwyA6
9UaLhFqUL/sVdwo=
bnMub3JnghJwaGFyZXMuZHVja2Rucy5vcmeCE3BoYXJlczM3NTcuZGRucy5uZXSC
GXF1YXJ0ei5waGFyZXMuZHVja2Rucy5vcmeCHnZhdWx0d2FyZGVuLnBoYXJlcy5k
dWNrZG5zLm9yZzATBgNVHSAEDDAKMAgGBmeBDAECATCCAQQGCisGAQQB1nkCBAIE
gfUEgfIA8AB2AN/hVuuqBa+1nA+GcY2owDJOrlbZbqf1pWoB0cE7vlJcAAABkPF7
y3UAAAQDAEcwRQIgfqj7WK/yXqpQHgKdhsKjFbhWA/6wFy4iMd6m1WeA7XsCIQDX
ASLBpFnKw3Mg37Jqo4Y5teLOB1pSyFPwR+k9lwrAIgB2AEiw42vapkc0D+VqAvqd
MOscUgHLVt0sgdm7v6s52IRzAAABkPF7yqEAAAQDAEcwRQIhAMswhuEwLAuW4652
GSH14dozTPJhZqrZ/FPbuv9zO/j2AiAbisEfkTkP8D/gnfhzDOTMoHKNtQ7jlmV5
Vs7GWzeqZzAKBggqhkjOPQQDAwNnADBkAjAv5mUnVLx+WJKepoOeQi+qQLLPvkdx
spMeWv6NF+guT5vqRpeMJvMrxnMAl7SrUE4CMFMVjNtzVRmBTRI1QhVJ/RJBGgdU
Qslpc60kIftFxNqqw3n/OUle+OBKX71YyGFC/g==
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIEJzCCA6ygAwIBAgISBLIxc7whwwFJZCApth1UIMcPMAoGCCqGSM49BAMDMDIx
MIIEODCCA7+gAwIBAgISBGZxfACT5sYQIkJkWF9VAy3IMAoGCCqGSM49BAMDMDIx
CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF
NjAeFw0yNDA3MjMwMjEzNDhaFw0yNDEwMjEwMjEzNDdaMB4xHDAaBgNVBAMTE3Bo
YXJlczM3NTcuZGRucy5uZXQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQdWJOW
xYvavXWc5dOMmGpaToaiTDrDRZ+oKsywjM/KChalQiNYLUSJqp3Dh5eoRfQ09K50
X6yJOJaQxb0FFzC1o4ICtDCCArAwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQG
CCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBScvr8E
AeV+Re+aj/j90JHjpLTkuzAfBgNVHSMEGDAWgBSTJ0aYA6lRaI6Y1sRCSNsjv1iU
NjAeFw0yNDA3MjYyMjU4NTlaFw0yNDEwMjQyMjU4NThaMB4xHDAaBgNVBAMTE3Bo
YXJlczM3NTcuZGRucy5uZXQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATHcFj7
f+XaIh7OYwyOptBrsxy3WB8KGoc9O2XSSnzeXioX1ddrGnDomfJgM0OZFxsTZ+8O
Qr8NnFfTcSO4xbb+o4ICxzCCAsMwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQG
CCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBR5wkKi
B1hhrOLCQDYCn6MshPzG/TAfBgNVHSMEGDAWgBSTJ0aYA6lRaI6Y1sRCSNsjv1iU
0jBVBggrBgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9lNi5vLmxlbmNy
Lm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL2U2LmkubGVuY3Iub3JnLzCBuwYDVR0R
BIGzMIGwgg9hZmZpcm0uZGRucy5uZXSCGGdpdGVhLnBoYXJlcy5kdWNrZG5zLm9y
Lm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL2U2LmkubGVuY3Iub3JnLzCBzwYDVR0R
BIHHMIHEgg9hZmZpcm0uZGRucy5uZXSCGGdpdGVhLnBoYXJlcy5kdWNrZG5zLm9y
Z4IZaW1taWNoLnBoYXJlcy5kdWNrZG5zLm9yZ4IYbXVzaWMucGhhcmVzLmR1Y2tk
bnMub3JnghNwaGFyZXMzNzU3LmRkbnMubmV0ghlxdWFydHoucGhhcmVzLmR1Y2tk
bnMub3Jngh52YXVsdHdhcmRlbi5waGFyZXMuZHVja2Rucy5vcmcwEwYDVR0gBAww
CjAIBgZngQwBAgEwggEFBgorBgEEAdZ5AgQCBIH2BIHzAPEAdwBIsONr2qZHNA/l
agL6nTDrHFIBy1bdLIHZu7+rOdiEcwAAAZDdlLOuAAAEAwBIMEYCIQCpfmsaNEgL
DrwdkCY+7NbJayn43Gv4sUjD4arZVJntKgIhAOyuzAjCF9JGPMSqNlSXd83zX89g
eKGUwLWltfIe+zVbAHYAPxdLT9ciR1iUHWUchL4NEu2QN38fhWrrwb8ohez4ZG4A
AAGQ3ZSztwAABAMARzBFAiEA4JYbBt/ZdGzUZk4evX3alv6QobD5D0An4NG0vF5G
JkQCICph30m0Ev4uFpVvLUx1CJlTR/gJMJ+0U8RbE95c9xyYMAoGCCqGSM49BAMD
A2kAMGYCMQCy5ix9dZALkOcFIWoAI0t2UzXBho7+eRGsXfIiLrBfYw8eDALPAglI
glGBo2OmRs8CMQCXQwaoJRZG2IPnVS+0JMDZq2PzhyyV9Tycj77wRrYHwOHWwyA6
9UaLhFqUL/sVdwo=
bnMub3JnghJwaGFyZXMuZHVja2Rucy5vcmeCE3BoYXJlczM3NTcuZGRucy5uZXSC
GXF1YXJ0ei5waGFyZXMuZHVja2Rucy5vcmeCHnZhdWx0d2FyZGVuLnBoYXJlcy5k
dWNrZG5zLm9yZzATBgNVHSAEDDAKMAgGBmeBDAECATCCAQQGCisGAQQB1nkCBAIE
gfUEgfIA8AB2AN/hVuuqBa+1nA+GcY2owDJOrlbZbqf1pWoB0cE7vlJcAAABkPF7
y3UAAAQDAEcwRQIgfqj7WK/yXqpQHgKdhsKjFbhWA/6wFy4iMd6m1WeA7XsCIQDX
ASLBpFnKw3Mg37Jqo4Y5teLOB1pSyFPwR+k9lwrAIgB2AEiw42vapkc0D+VqAvqd
MOscUgHLVt0sgdm7v6s52IRzAAABkPF7yqEAAAQDAEcwRQIhAMswhuEwLAuW4652
GSH14dozTPJhZqrZ/FPbuv9zO/j2AiAbisEfkTkP8D/gnfhzDOTMoHKNtQ7jlmV5
Vs7GWzeqZzAKBggqhkjOPQQDAwNnADBkAjAv5mUnVLx+WJKepoOeQi+qQLLPvkdx
spMeWv6NF+guT5vqRpeMJvMrxnMAl7SrUE4CMFMVjNtzVRmBTRI1QhVJ/RJBGgdU
Qslpc60kIftFxNqqw3n/OUle+OBKX71YyGFC/g==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEVzCCAj+gAwIBAgIRALBXPpFzlydw27SHyzpFKzgwDQYJKoZIhvcNAQELBQAw

View File

@ -1,5 +1,5 @@
-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgWJ4x7Qxn+VXJXETc
HorPVTm5TMqwASdTvWnGchZodiWhRANCAAQdWJOWxYvavXWc5dOMmGpaToaiTDrD
RZ+oKsywjM/KChalQiNYLUSJqp3Dh5eoRfQ09K50X6yJOJaQxb0FFzC1
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgl3/wvigSQRZX75ZL
c/kcKHbCnM4Jees44nHIhikPm26hRANCAATHcFj7f+XaIh7OYwyOptBrsxy3WB8K
Goc9O2XSSnzeXioX1ddrGnDomfJgM0OZFxsTZ+8OQr8NnFfTcSO4xbb+
-----END PRIVATE KEY-----

5
etc/letsencrypt/cli.ini Normal file
View File

@ -0,0 +1,5 @@
# Because we are using logrotate for greater flexibility, disable the
# internal certbot logrotation.
max-log-backups = 0
# Adjust interactive output regarding automated renewal
preconfigured-renewal = True

View File

@ -8,7 +8,7 @@ fullchain = /etc/letsencrypt/live/phares3757.ddns.net/fullchain.pem
# Options used in the renewal process
[renewalparams]
account = 805d2f39f581c9a2a417db3d91bfc764
account = 0dc29e2da338706a1a356c4f2ef0c15b
authenticator = nginx
installer = nginx
server = https://acme-v02.api.letsencrypt.org/directory

View File

@ -13,7 +13,7 @@ include /etc/nginx/include/affirm.conf;
# include /etc/nginx/include/casa.conf;
include /etc/nginx/include/codeserver.conf;
include /etc/nginx/include/dashkiosk.conf;
include /etc/nginx/include/dockge.conf;
# include /etc/nginx/include/dockge.conf;
# include /etc/nginx/include/docmost.conf;
# include /etc/nginx/include/emby.conf;
# include /etc/nginx/include/filebrowser.conf;
@ -28,6 +28,7 @@ include /etc/nginx/include/music.conf;
# include /etc/nginx/include/nextcloud.conf;
# include /etc/nginx/include/owncast.conf;
include /etc/nginx/include/phares.conf;
include /etc/nginx/include/pgadmin.conf;
# include /etc/nginx/include/photoprism.conf;
# include /etc/nginx/include/pihole.conf;
# include /etc/nginx/include/proxmox.conf;

View File

@ -32,7 +32,11 @@ fwupd-refresh:x:989:989:Firmware update daemon:/var/lib/fwupd:/usr/sbin/nologin
usbmux:x:108:46:usbmux daemon,,,:/var/lib/usbmux:/usr/sbin/nologin
sshd:x:109:65534::/run/sshd:/usr/sbin/nologin
phares:x:1000:1000:Mike Phares:/home/phares:/bin/bash
lphares:x:1001:1001:Logan Phares,,,:/home/lphares:/bin/bash
bmiller:x:1002:1002:Bill Miller,,,:/home/bmiller:/bin/bash
pihole:x:999:1003::/home/pihole:/usr/sbin/nologin
pihole:x:1001:1001:Pi-hole,,,:/home/pihole:/bin/bash
podman:x:1002:1002:Podman,,,:/home/podman:/bin/bash
lphares:x:1003:1003:Logan Phares,,,:/home/lphares:/bin/bash
bmiller:x:1004:1004:Bill Miller,,,:/home/bmiller:/bin/bash
unbound:x:110:111::/var/lib/unbound:/usr/sbin/nologin
dnsmasq:x:999:65534:dnsmasq:/var/lib/misc:/usr/sbin/nologin
cockpit-ws:x:111:112::/nonexistent:/usr/sbin/nologin
cockpit-wsinstance:x:112:113::/nonexistent:/usr/sbin/nologin

View File

@ -32,6 +32,10 @@ fwupd-refresh:x:989:989:Firmware update daemon:/var/lib/fwupd:/usr/sbin/nologin
usbmux:x:108:46:usbmux daemon,,,:/var/lib/usbmux:/usr/sbin/nologin
sshd:x:109:65534::/run/sshd:/usr/sbin/nologin
phares:x:1000:1000:Mike Phares:/home/phares:/bin/bash
lphares:x:1001:1001:Logan Phares,,,:/home/lphares:/bin/bash
bmiller:x:1002:1002:Bill Miller,,,:/home/bmiller:/bin/bash
pihole:x:999:1003::/home/pihole:/usr/sbin/nologin
pihole:x:1001:1001:Pi-hole,,,:/home/pihole:/bin/bash
podman:x:1002:1002:Podman,,,:/home/podman:/bin/bash
lphares:x:1003:1003:Logan Phares,,,:/home/lphares:/bin/bash
bmiller:x:1004:1004:Bill Miller,,,:/home/bmiller:/bin/bash
unbound:x:110:111::/var/lib/unbound:/usr/sbin/nologin
dnsmasq:x:999:65534:dnsmasq:/var/lib/misc:/usr/sbin/nologin
cockpit-ws:x:111:112::/nonexistent:/usr/sbin/nologin

View File

@ -29,10 +29,14 @@ tcpdump:!:19836::::::
tss:!:19836::::::
landscape:!:19836::::::
fwupd-refresh:!*:19836::::::
usbmux:!:19927::::::
sshd:!:19927::::::
phares:$6$LmgqfVuKR4/5T6by$yxaJ71xy0Exf7laLI6OMkgqabo5r8bzlFPZekwuGRYO8JJMH2tKeTD2W1JOVD0X2pgL5Ob73xB45Vl/lIGYsO.:19927:0:99999:7:::
lphares:$y$j9T$bai9Rz4yLf4MoGt4s6iJB/$rzofcXaHGl0hmnnx1gZwsF4/IoTkcJRA2MX.Tc3E6l6:19927:0:99999:7:::
bmiller:$y$j9T$xqZANIbaE1MCQCukITvTr0$d55.kptZwaAVL4uPmeYm2cygatWo5NG5LW8V833OST1:19927:0:99999:7:::
pihole:!:19927::::::
unbound:!:19927::::::
usbmux:!:19929::::::
sshd:!:19929::::::
phares:$y$j9T$mk3Fb5hENQkN//RvJPyB6.$xdsox1L6gnbZibmeEsveAMNjZ22J7sIEz.W957Osj1A:19930:0:99999:7:::
pihole:$y$j9T$k223Uf777oEQZtuag6kXO1$vfa4e7EdalU7A9ECEoPJ7QHnN9Bkylct7kNIHZYXGP8:19929:0:99999:7:::
podman:$y$j9T$kuuH4dAlA8LAbBASzBA6y/$9xVT4/nstOeIVTVoil/WSUKMIyePo8dKBXDByMm.qG5:19929:0:99999:7:::
lphares:$y$j9T$m33.tZHwrEl7X.ovXN.a7/$z2We2A72fQMDkSQIYetbXuNNTk8YHNEvQeisSwtmo6C:19929:0:99999:7:::
bmiller:$y$j9T$sYFlvEEV1yntCl3CeN8M70$CpuMQrO3K9NFF122NsJWvM5nxnQK8EXvmD3C41.JZm8:19929:0:99999:7:::
unbound:!:19929::::::
dnsmasq:!:19930::::::
cockpit-ws:!:19930::::::
cockpit-wsinstance:!:19930::::::

View File

@ -29,9 +29,13 @@ tcpdump:!:19836::::::
tss:!:19836::::::
landscape:!:19836::::::
fwupd-refresh:!*:19836::::::
usbmux:!:19927::::::
sshd:!:19927::::::
phares:$6$LmgqfVuKR4/5T6by$yxaJ71xy0Exf7laLI6OMkgqabo5r8bzlFPZekwuGRYO8JJMH2tKeTD2W1JOVD0X2pgL5Ob73xB45Vl/lIGYsO.:19927:0:99999:7:::
lphares:$y$j9T$bai9Rz4yLf4MoGt4s6iJB/$rzofcXaHGl0hmnnx1gZwsF4/IoTkcJRA2MX.Tc3E6l6:19927:0:99999:7:::
bmiller:$y$j9T$xqZANIbaE1MCQCukITvTr0$d55.kptZwaAVL4uPmeYm2cygatWo5NG5LW8V833OST1:19927:0:99999:7:::
pihole:!:19927::::::
usbmux:!:19929::::::
sshd:!:19929::::::
phares:$6$X.bTmW8z9/2WwB08$pivFW7YtPuGBou4Ut7eB1Y1ELwOVumy5tJYMf/RTQgkdUWzkKs9jndwfuVzTRlknbyGzA4A1lPImVtVHOCyBs/:19929:0:99999:7:::
pihole:$y$j9T$k223Uf777oEQZtuag6kXO1$vfa4e7EdalU7A9ECEoPJ7QHnN9Bkylct7kNIHZYXGP8:19929:0:99999:7:::
podman:$y$j9T$kuuH4dAlA8LAbBASzBA6y/$9xVT4/nstOeIVTVoil/WSUKMIyePo8dKBXDByMm.qG5:19929:0:99999:7:::
lphares:$y$j9T$m33.tZHwrEl7X.ovXN.a7/$z2We2A72fQMDkSQIYetbXuNNTk8YHNEvQeisSwtmo6C:19929:0:99999:7:::
bmiller:$y$j9T$sYFlvEEV1yntCl3CeN8M70$CpuMQrO3K9NFF122NsJWvM5nxnQK8EXvmD3C41.JZm8:19929:0:99999:7:::
unbound:!:19929::::::
dnsmasq:!:19930::::::
cockpit-ws:!:19930::::::

View File

@ -62,3 +62,7 @@
# for what other values do
#kernel.sysrq=438
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1

View File

@ -7,8 +7,8 @@ X-Snappy=yes
[Timer]
Unit=snap.certbot.renew.service
OnCalendar=*-*-* 03:24
OnCalendar=*-*-* 20:15
OnCalendar=*-*-* 05:54
OnCalendar=*-*-* 14:00
[Install]
WantedBy=timers.target

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,28 @@
[Container]
AutoUpdate=registry
ContainerName=gitea-db
Environment="POSTGRES_DB=gitea"
Environment="POSTGRES_PASSWORD=gitea"
Environment="POSTGRES_USER=gitea"
Image=docker.io/library/postgres:14
# Network=gitea.network
# Pod=gitea.pod
PublishPort=5433:5432
Volume=gitea-db-data.volume:/var/lib/postgresql/data:Z
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/library/postgres:14
# systemctl --user daemon-reload
# systemctl --user start gitea-db
# systemctl --user status gitea-db
# journalctl -fu gitea-db.service
# podman logs gitea-db
# systemctl --user stop gitea-db
# systemctl --user disable gitea-db
# podman exec -ti gitea-db /bin/sh
# podman exec -ti gitea-db /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,50 @@
[Container]
# AutoUpdate=registry
ContainerName=gitea-server
Environment="GITEA__database__DB_TYPE=postgres"
Environment="GITEA__database__HOST=192.168.11.2:5433"
Environment="GITEA__database__NAME=gitea"
Environment="GITEA__database__PASSWD=gitea"
Environment="GITEA__database__USER=gitea"
Environment="GITEA__oauth2__JWT_SECRET=KUT-1Y6jrgw0hAHeCQ6XwyYzv_IfLG6zzRgi56kHNR4"
Environment="GITEA__repository__DEFAULT_BRANCH=master"
Environment="GITEA__repository__DEFAULT_PUSH_CREATE_PRIVATE=false"
Environment="GITEA__repository__ENABLE_PUSH_CREATE_ORG=true"
Environment="GITEA__repository__ENABLE_PUSH_CREATE_USER=true"
Environment="GITEA__security__INSTALL_LOCK=true"
Environment="GITEA__security__INTERNAL_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE3MjIwNTg2NzJ9.A8qHgZk3QALse398h7YtIOtHhm-Uefi50QApfKR5sic"
Environment="GITEA__security__PASSWORD_HASH_ALGO=pbkdf2"
Environment="GITEA__security__SECRET_KEY="
Environment="GITEA__server__DOMAIN=gitea.phares.duckdns.org"
Environment="GITEA__server__LFS_JWT_SECRET=WgTjm7nPHRtxHWrWi9EInaNnQGENsECgCqi2e9H37W0"
Environment="GITEA__server__ROOT_URL=https://gitea.phares.duckdns.org/"
Environment="GITEA__server__SSH_DOMAIN=gitea.phares.duckdns.org"
Image=docker.io/gitea/gitea:1.22.1-rootless
# Network=gitea.network
# Pod=gitea.pod
PublishPort=3000:3000
Volume=/etc/localtime:/etc/localtime:ro
Volume=/etc/timezone:/etc/timezone:ro
Volume=gitea-server-config.volume:/etc/gitea
Volume=gitea-server-data.volume:/var/lib/gitea:Z
[Unit]
Requires=gitea-db.service
After=gitea-db.service
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/gitea/gitea:1.22.1-rootless
# systemctl --user daemon-reload
# systemctl --user start gitea-server
# systemctl --user status gitea-server
# journalctl -fu gitea-server.service
# podman logs gitea-server
# systemctl --user stop gitea-server
# systemctl --user disable gitea-server
# podman exec -ti gitea-server /bin/sh
# podman exec -ti gitea-server /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,53 @@
[Container]
AutoUpdate=registry
ContainerName=immich-db
Environment="POSTGRES_DB=immich"
Environment="POSTGRES_INITDB_ARGS=--data-checksums"
Environment="POSTGRES_PASSWORD=postgres"
Environment="POSTGRES_USER=postgres"
Image=docker.io/library/postgres:16
# Network=immich.network
# Pod=immich.pod
PublishPort=5432:5432
Volume=immich-db-data.volume:/var/lib/postgresql/data:Z
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# healthcheck:
# test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql
# --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only
# --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM
# pg_stat_database')"; echo "checksum failure count is $$Chksum"; [
# "$$Chksum" = '0' ] || exit 1
# interval: 5m
# start_interval: 30s
# start_period: 5m
# command:
# - postgres
# - -c
# - shared_preload_libraries=vectors.so
# - -c
# - search_path="$$user", public, vectors
# - -c
# - logging_collector=on
# - -c
# - max_wal_size=2GB
# - -c
# - shared_buffers=512MB
# - -c
# - wal_compression=on
# podman pull docker.io/library/postgres:16
# systemctl --user daemon-reload
# systemctl --user start immich-db
# systemctl --user status immich-db
# journalctl -fu immich-db.service
# podman logs immich-db
# systemctl --user stop immich-db
# systemctl --user disable immich-db
# podman exec -ti immich-db /bin/sh
# podman exec -ti immich-db /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,29 @@
[Container]
AutoUpdate=registry
ContainerName=immich-learning
Image=ghcr.io/immich-app/immich-machine-learning:release
# Network=immich.network
# Pod=immich.pod
PublishPort=3003:3003
Volume=immich-learning-cache.volume:/cache:Z
[Service]
Restart=no
[Unit]
Requires=immich-redis.service
After=immich-redis.service
[Install]
WantedBy=multi-user.target default.target
# podman pull ghcr.io/immich-app/immich-machine-learning:release
# systemctl --user daemon-reload
# systemctl --user start immich-learning
# systemctl --user status immich-learning
# journalctl -fu immich-learning.service
# podman logs immich-learning
# systemctl --user stop immich-learning
# systemctl --user disable immich-learning
# podman exec -ti immich-learning /bin/sh
# podman exec -ti immich-learning /bin/bash

View File

@ -0,0 +1,31 @@
[Container]
AutoUpdate=registry
ContainerName=immich-redis
Image=docker.io/library/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900
# Network=immich.network
# Pod=immich.pod
PublishPort=6379:6379
[Service]
Restart=no
[Unit]
Requires=immich-db.service
After=immich-db.service
[Install]
WantedBy=multi-user.target default.target
# healthcheck:
# test: redis-cli ping || exit 1
# podman pull docker.io/library/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900
# systemctl --user daemon-reload
# systemctl --user start immich-redis
# systemctl --user status immich-redis
# journalctl -fu immich-redis.service
# podman logs immich-redis
# systemctl --user stop immich-redis
# systemctl --user disable immich-redis
# podman exec -ti immich-redis /bin/sh
# podman exec -ti immich-redis /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,37 @@
[Container]
AutoUpdate=registry
ContainerName=immich-server
Environment="DB_DATABASE_NAME=immich"
Environment="DB_HOST=192.168.11.2"
Environment="DB_PASSWORD=postgres"
Environment="DB_USERNAME=postgres"
Image=ghcr.io/immich-app/immich-server:release
# Network=immich.network
# Pod=immich.pod
PublishPort=3001:3001
Volume=/etc/localtime:/etc/localtime:ro
Volume=immich-server-upload.volume:/usr/src/app/upload:Z
Volume=immich-server-external.volume:/usr/src/app/external:Z
[Service]
Restart=no
[Unit]
Requires=immich-learning.service
After=immich-learning.service
[Install]
WantedBy=multi-user.target default.target
# https://immich.app/docs/install/environment-variables
# podman pull ghcr.io/immich-app/immich-server:release
# systemctl --user daemon-reload
# systemctl --user start immich-server
# systemctl --user status immich-server
# journalctl -fu immich-server.service
# podman logs immich-server
# systemctl --user stop immich-server
# systemctl --user disable immich-server
# podman exec -ti immich-server /bin/sh
# podman exec -ti immich-server /bin/bash

View File

@ -0,0 +1,11 @@
[Unit]
Description=The sleep container
After=local-fs.target
[Container]
Image=registry.access.redhat.com/ubi9-minimal:latest
Exec=sleep 1000
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target multi-user.target

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,31 @@
[Container]
AutoUpdate=registry
ContainerName=odoo-db
Environment="PGDATA=/var/lib/postgresql/data/pgdata"
Environment="POSTGRES_DB=postgres"
Environment="POSTGRES_PASSWORD=9UvN6k0m#c6cKtONLm59"
Environment="POSTGRES_USER=odoo"
# Image=docker.io/library/postgres:13
Image=docker.io/library/postgres:16
# Network=odoo.network
# Pod=odoo.pod
PublishPort=5434:5432
Volume=odoo-db-data.volume:/var/lib/postgresql/data/pgdata:Z
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/library/postgres:13
# podman pull docker.io/library/postgres:16
# systemctl --user daemon-reload
# systemctl --user start odoo-db
# systemctl --user status odoo-db
# journalctl -fu odoo-db.service
# podman logs odoo-db
# systemctl --user stop odoo-db
# systemctl --user disable odoo-db
# podman exec -ti odoo-db /bin/sh
# podman exec -ti odoo-db /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,36 @@
[Container]
AutoUpdate=registry
ContainerName=odoo-server
# Image=docker.io/odoo:15.0
Image=docker.io/odoo:16.0
# Network=odoo.network
# Pod=odoo.pod
PublishPort=8069:8069
Volume=odoo-server-data.volume:/var/lib/odoo:Z
Environment="ADMIN_PASSWD=jEKJJHBQ3ByX#JJcjauy"
Environment="HOST=192.168.11.2"
Environment="PASSWORD=9UvN6k0m#c6cKtONLm59"
Environment="PORT=5434"
Environment="USER=odoo"
[Service]
Restart=no
[Unit]
Requires=odoo-db.service
After=odoo-db.service
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/odoo:15.0
# podman pull docker.io/odoo:16.0
# systemctl --user daemon-reload
# systemctl --user start odoo-server
# systemctl --user status odoo-server
# journalctl -fu odoo-server.service
# podman logs odoo-server
# systemctl --user stop odoo-server
# systemctl --user disable odoo-server
# podman exec -ti odoo-server /bin/sh
# podman exec -ti odoo-server /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,27 @@
[Container]
AutoUpdate=registry
ContainerName=pgadmin
Environment="PGADMIN_DEFAULT_EMAIL=mikepharesjr@msn.com"
Environment="PGADMIN_DEFAULT_PASSWORD=Vm1jZ4mzdaF1q#pn4v1b"
Image=docker.io/dpage/pgadmin4:latest
# Network=gitea.network
# Pod=gitea.pod
PublishPort=5007:80
Volume=pgadmin-data.volume:/var/lib/pgadmin/:Z
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/library/postgres:14
# systemctl --user daemon-reload
# systemctl --user start pgadmin
# systemctl --user status pgadmin
# journalctl -fu pgadmin.service
# podman logs pgadmin
# systemctl --user stop pgadmin
# systemctl --user disable pgadmin
# podman exec -ti pgadmin /bin/sh
# podman exec -ti pgadmin /bin/bash

View File

@ -0,0 +1 @@
[Volume]

View File

@ -0,0 +1,35 @@
[Container]
AutoUpdate=registry
ContainerName=vaultwarden-server
Image=docker.io/vaultwarden/server:latest
# Network=vaultwarden.network
# Pod=vaultwarden.pod
PublishPort=5008:80
Volume=vaultwarden-server-data.volume:/data:rw
Environment="ADMIN_TOKEN=7jrceE25+m5vPMK9jmVT8VsMM/0Svoiz4YEpLYHHT2hSaJPIlXcP8lOXwR5GpdaM"
Environment="DOMAIN=https://vaultwarden.phares.duckdns.org"
Environment="SIGNUPS_ALLOWED=true"
Environment="SMTP_FROM=user@example.com"
Environment="SMTP_HOST=smtp-relay.sendinblue.com"
Environment="SMTP_PASSWORD=sendinblue password"
Environment="SMTP_PORT=587"
Environment="SMTP_SSL=true"
Environment="SMTP_USERNAME=user@example.com"
Environment="WEBSOCKET_ENABLED=true"
[Service]
Restart=no
[Install]
WantedBy=multi-user.target default.target
# podman pull docker.io/vaultwarden/server:latest
# systemctl --user daemon-reload
# systemctl --user start vaultwarden-server
# systemctl --user status vaultwarden-server
# journalctl -fu vaultwarden-server.service
# podman logs vaultwarden-server
# systemctl --user stop vaultwarden-server
# systemctl --user disable vaultwarden-server
# podman exec -ti vaultwarden-server /bin/sh
# podman exec -ti vaultwarden-server /bin/bash

View File

@ -1,9 +0,0 @@
GITEA__database__DB_TYPE=postgres
GITEA__database__HOST=postgres:5432
GITEA__database__NAME=gitea
GITEA__database__USER=gitea
GITEA__database__PASSWD=gitea
POSTGRES_USER=gitea
POSTGRES_PASSWORD=gitea
POSTGRES_DB=gitea

View File

@ -1,37 +0,0 @@
services:
postgres:
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
image: postgres:14
restart: always
volumes:
- gitea-postgres:/var/lib/postgresql/data
server:
depends_on:
postgres:
condition: service_started
environment:
- GITEA__database__DB_TYPE=${GITEA__database__DB_TYPE}
- GITEA__database__HOST=${GITEA__database__HOST}
- GITEA__database__NAME=${GITEA__database__NAME}
- GITEA__database__USER=${GITEA__database__USER}
- GITEA__database__PASSWD=${GITEA__database__PASSWD}
image: gitea/gitea:latest-rootless
ports:
- 3000:3000
- 2222:2222
restart: always
volumes:
- gitea-data:/var/lib/gitea
- gitea-config:/etc/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
volumes:
gitea-config:
driver: local
gitea-data:
driver: local
gitea-postgres:
driver: local

View File

@ -1,22 +0,0 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# # The location where your uploaded files are stored
# UPLOAD_LOCATION=/opt/stacks/immich/upload
# # The location where your database files are stored
# DB_DATA_LOCATION=/opt/stacks/immich/database
# The location where your external files are stored
EXTERNAL_LOCATION=/home/phares/immich/photos
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@ -1,93 +0,0 @@
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
networks: {}
services:
database:
command:
- postgres
- -c
- shared_preload_libraries=vectors.so
- -c
- search_path="$$user", public, vectors
- -c
- logging_collector=on
- -c
- max_wal_size=2GB
- -c
- shared_buffers=512MB
- -c
- wal_compression=on
container_name: immich_postgres
environment:
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: --data-checksums
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
healthcheck:
interval: 5m
start_interval: 30s
start_period: 5m
test: pg_isready --dbname='${DB_DATABASE_NAME}' || exit 1; Chksum="$$(psql
--dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only
--no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM
pg_stat_database')"; echo "checksum failure count is $$Chksum"; [
"$$Chksum" = '0' ] || exit 1
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
restart: always
volumes:
- postgres-data:/var/lib/postgresql/data
immich-machine-learning:
container_name: immich_machine_learning
env_file:
- .env
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
restart: always
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
immich-server:
container_name: immich_server
depends_on:
- redis
- database
env_file:
- .env
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
ports:
- 2283:3001
restart: always
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
- upload-data:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
# - external-data:/usr/src/app/external
- ${EXTERNAL_LOCATION}:/usr/src/app/external:ro
# profiles:
# - donotstart
redis:
container_name: immich_redis
healthcheck:
test: redis-cli ping || exit 1
image: docker.io/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900
restart: always
volumes:
model-cache: null
postgres-data:
driver: local
upload-data:
driver: local
# external-data:
# driver: local

View File

@ -1,65 +0,0 @@
services:
kestra:
command: server standalone --worker-thread=128
depends_on:
postgres:
condition: service_started
environment:
KESTRA_CONFIGURATION: |
datasources:
postgres:
url: jdbc:postgresql://postgres:5432/kestra
driverClassName: org.postgresql.Driver
username: kestra
password: k3str4
kestra:
server:
basic-auth:
enabled: false
username: "admin@kestra.io" # it must be a valid email address
password: kestra
repository:
type: postgres
storage:
type: local
local:
base-path: "/app/storage"
queue:
type: postgres
tasks:
tmp-dir:
path: /tmp/kestra-wd/tmp
url: http://localhost:5002/
image: kestra/kestra:latest-full
ports:
- "5002:8080"
# - "8081:8081"
pull_policy: always
# Note that this is meant for development only. Refer to the documentation for production deployments of Kestra which runs without a root user.
user: "root"
volumes:
- kestra-data:/app/storage
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/kestra-wd:/tmp/kestra-wd
postgres:
environment:
POSTGRES_DB: kestra
POSTGRES_PASSWORD: k3str4
POSTGRES_USER: kestra
healthcheck:
interval: 30s
retries: 10
test:
[
"CMD-SHELL",
"pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"
]
timeout: 10s
image: postgres
volumes:
- postgres-data:/var/lib/postgresql/data
volumes:
kestra-data:
driver: local
postgres-data:
driver: local

View File

@ -1,15 +0,0 @@
networks: {}
services:
lxconsole:
image: penninglabs/lxconsole:latest
ports:
- 5004:5000
restart: unless-stopped
volumes:
- certs:/opt/lxconsole/certs
- server:/opt/lxconsole/instance
volumes:
certs:
driver: local
server:
driver: local

View File

@ -1,780 +0,0 @@
# Note: Do not add single quotes '' to variables. Having spaces still works without quotes where required.
#---------------------------------------------------------------------------------------------------------
# ==== CREATING USERS AND LOGGING IN TO WEKAN ====
# https://github.com/wekan/wekan/wiki/Adding-users
#---------------------------------------------------------------------------------------------------------
# ==== FORGOT PASSWORD ====
# https://github.com/wekan/wekan/wiki/Forgot-Password
#---------------------------------------------------------------------------------------------------------
# ==== Upgrading Wekan to new version =====
# NOTE: MongoDB has changed from 3.x to 4.x, in that case you need backup/restore with --noIndexRestore
# see https://github.com/wekan/wekan/wiki/Backup
# 1) Stop Wekan:
# docker-compose stop
# 2) Remove old Wekan app (wekan-app only, not that wekan-db container that has all your data)
# docker rm wekan-app
# 3) Get newest docker-compose.yml from https://github.com/wekan/wekan to have correct image,
# for example: "image: quay.io/wekan/wekan" or version tag "image: quay.io/wekan/wekan:v4.52"
# 4) Start Wekan:
# docker-compose up -d
#----------------------------------------------------------------------------------
# ==== OPTIONAL: DEDICATED DOCKER USER ====
# 1) Optionally create a dedicated user for Wekan, for example:
# sudo useradd -d /home/wekan -m -s /bin/bash wekan
# 2) Add this user to the docker group, then logout+login or reboot:
# sudo usermod -aG docker wekan
# 3) Then login as user wekan.
# 4) Create this file /home/wekan/docker-compose.yml with your modifications.
#----------------------------------------------------------------------------------
# ==== RUN DOCKER AS SERVICE ====
# 1a) Running Docker as service, on Systemd like Debian 9, Ubuntu 16.04, CentOS 7:
# sudo systemctl enable docker
# sudo systemctl start docker
# 1b) Running Docker as service, on init.d like Debian 8, Ubuntu 14.04, CentOS 6:
# sudo update-rc.d docker defaults
# sudo service docker start
# ----------------------------------------------------------------------------------
# ==== USAGE OF THIS docker-compose.yml ====
# 1) For seeing does Wekan work, try this and check with your web browser:
# docker-compose up
# 2) Stop Wekan and start Wekan in background:
# docker-compose stop
# docker-compose up -d
# 3) See running Docker containers:
# docker ps
# 4) Stop Docker containers:
# docker-compose stop
# ----------------------------------------------------------------------------------
# ===== INSIDE DOCKER CONTAINERS, AND BACKUP/RESTORE ====
# https://github.com/wekan/wekan/wiki/Backup
# If really necessary, repair MongoDB: https://github.com/wekan/wekan-mongodb/issues/6#issuecomment-424004116
# 1) Going inside containers:
# a) Wekan app, does not contain data
# docker exec -it wekan-app bash
# b) MongoDB, contains all data
# docker exec -it wekan-db bash
# 2) Copying database to outside of container:
# docker exec -it wekan-db bash
# cd /data
# mongodump
# exit
# docker cp wekan-db:/data/dump .
# 3) Restoring database
# # 1) Stop wekan
# docker stop wekan-app
# # 2) Go inside database container
# docker exec -it wekan-db bash
# # 3) and data directory
# cd /data
# # 4) Remove previous dump
# rm -rf dump
# # 5) Exit db container
# exit
# # 6) Copy dump to inside docker container
# docker cp dump wekan-db:/data/
# # 7) Go inside database container
# docker exec -it wekan-db bash
# # 8) and data directory
# cd /data
# # 9) Restore
# mongorestore --drop
# # 10) Exit db container
# exit
# # 11) Start wekan
# docker start wekan-app
#-------------------------------------------------------------------------
services:
wekandb:
#-------------------------------------------------------------------------------------
# ==== MONGODB FROM DOCKER HUB ====
image: mongo:6
#-------------------------------------------------------------------------------------
container_name: wekan-db
restart: always
# command: mongod --oplogSize 128
# Syslog: mongod --syslog --oplogSize 128 --quiet
# Disable MongoDB logs:
command: mongod --logpath /dev/null --oplogSize 128 --quiet
networks:
- wekan-tier
expose:
- 27017
volumes:
- /etc/localtime:/etc/localtime:ro
- wekan-db:/data/db
- wekan-db-dump:/dump
#- /etc/timezone:/etc/timezone:ro # Do not use https://github.com/wekan/wekan/issues/5123
wekan:
#-------------------------------------------------------------------------------------
# ==== WEKAN FROM GITHUB/QUAY/DOCKER HUB ====
# All of GitHub, Quay and Docker Hub have latest, but because
# latest tag changes when is newest release,
# when upgrading would be better to use version tag.
# a) Using specific version tag is better:
# image: ghcr.io/wekan/wekan:v6.89
# image: quay.io/wekan/wekan:v6.89
# image: wekanteam/wekan:v6.89
# b) GitHub Container registry.
# b1) Latest release tag:
image: ghcr.io/wekan/wekan:latest
# b2) Newest git commit automatic build:
#image: ghcr.io/wekan/wekan:main
# c) Quay:
#image: quay.io/wekan/wekan:latest
# d) Docker Hub:
#image: wekanteam/wekan:latest
#-------------------------------------------------------------------------------------
container_name: wekan-app
# On CentOS 7 there is seccomp issue with glibc 6,
# so CentOS 7 users shoud use these security_opt seccomp:unconfined
# settings to get WeKan working. See:
# - https://github.com/wekan/wekan/issues/4585
# - https://github.com/wekan/wekan/issues/4587
#security_opt:
# - seccomp:unconfined
restart: always
networks:
- wekan-tier
#-------------------------------------------------------------------------------------
# ==== BUILD wekan-app DOCKER CONTAINER FROM SOURCE, if you uncomment these ====
# ==== and use commands: docker-compose up -d --build
#build:
# context: .
# dockerfile: Dockerfile
#-------------------------------------------------------------------------------------
ports:
# Docker outsideport:insideport. Do not add anything extra here.
# For example, if you want to have wekan on port 3001,
# use 3001:8080 . Do not add any extra address etc here, that way it does not work.
# remove port mapping if you use nginx reverse proxy, port 8080 is already exposed to wekan-tier network
- 5003:8080
environment:
#-----------------------------------------------------------------
# ==== WRITEABLE PATH FOR FILE UPLOADS ====
- WRITABLE_PATH=/data
#-----------------------------------------------------------------
# ==== AWS S3 FOR FILES ====
# Any region. For example:
# us-standard,us-west-1,us-west-2,
# eu-west-1,eu-central-1,
# ap-southeast-1,ap-northeast-1,sa-east-1
#
#- S3='{"s3":{"key": "xxx", "secret": "xxx", "bucket": "xxx", "region": "xxx"}}'
#-----------------------------------------------------------------
# ==== MONGO_URL ====
- MONGO_URL=mongodb://wekandb:27017/wekan
#---------------------------------------------------------------
# ==== ROOT_URL SETTING ====
# Change ROOT_URL to your real Wekan URL, for example:
# If you have Caddy/Nginx/Apache providing SSL
# - https://example.com
# - https://boards.example.com
# This can be problematic with avatars https://github.com/wekan/wekan/issues/1776
# - https://example.com/wekan
# If without https, can be only wekan node, no need for Caddy/Nginx/Apache if you don't need them
# - http://example.com
# - http://boards.example.com
# - http://192.168.1.100 <=== using at local LAN
- ROOT_URL=http://localhost # <=== using only at same laptop/desktop where Wekan is installed
#---------------------------------------------------------------
# ==== EMAIL SETTINGS ====
# Email settings are only at MAIL_URL and MAIL_FROM.
# Admin Panel has test button, but it's not used for settings.
# see https://github.com/wekan/wekan/wiki/Troubleshooting-Mail
# For SSL in email, change smtp:// to smtps://
# NOTE: Special characters need to be url-encoded in MAIL_URL.
# You can encode those characters for example at: https://www.urlencoder.org
#- MAIL_URL=smtp://user:pass@mailserver.example.com:25/
- MAIL_URL=smtp://<mail_url>:25/?ignoreTLS=true&tls={rejectUnauthorized:false}
- MAIL_FROM=Wekan Notifications <noreply.wekan@mydomain.com>
# Currently MAIL_SERVICE is not in use.
#- MAIL_SERVICE=Outlook365
#- MAIL_SERVICE_USER=firstname.lastname@hotmail.com
#- MAIL_SERVICE_PASSWORD=SecretPassword
#---------------------------------------------------------------
# https://github.com/wekan/wekan/issues/3585#issuecomment-1021522132
# Add more Node heap, this is done by default at Dockerfile:
# - NODE_OPTIONS="--max_old_space_size=4096"
# Add more stack, this is done at Dockerfile:
# bash -c "ulimit -s 65500; exec node --stack-size=65500 main.js"
#---------------------------------------------------------------
# ==== OPTIONAL: MONGO OPLOG SETTINGS =====
# https://github.com/wekan/wekan-mongodb/issues/2#issuecomment-378343587
# We've fixed our CPU usage problem today with an environment
# change around Wekan. I wasn't aware during implementation
# that if you're using more than 1 instance of Wekan
# (or any MeteorJS based tool) you're supposed to set
# MONGO_OPLOG_URL as an environment variable.
# Without setting it, Meteor will perform a poll-and-diff
# update of it's dataset. With it, Meteor will update from
# the OPLOG. See here
# https://blog.meteor.com/tuning-meteor-mongo-livedata-for-scalability-13fe9deb8908
# After setting
# MONGO_OPLOG_URL=mongodb://<username>:<password>@<mongoDbURL>/local?authSource=admin&replicaSet=rsWekan
# the CPU usage for all Wekan instances dropped to an average
# of less than 10% with only occasional spikes to high usage
# (I guess when someone is doing a lot of work)
# - MONGO_OPLOG_URL=mongodb://<username>:<password>@<mongoDbURL>/local?authSource=admin&replicaSet=rsWekan
#---------------------------------------------------------------
# ==== OPTIONAL: KADIRA PERFORMANCE MONITORING FOR METEOR ====
# https://github.com/edemaine/kadira-compose
# https://github.com/meteor/meteor-apm-agent
# https://blog.meteor.com/kadira-apm-is-now-open-source-490469ffc85f
#- APM_OPTIONS_ENDPOINT=http://<kadira-ip>:11011
#- APM_APP_ID=
#- APM_APP_SECRET=
#---------------------------------------------------------------
# ==== OPTIONAL: LOGS AND STATS ====
# https://github.com/wekan/wekan/wiki/Logs
#
# Daily export of Wekan changes as JSON to Logstash and ElasticSearch / Kibana (ELK)
# https://github.com/wekan/wekan-logstash
#
# Statistics Python script for Wekan Dashboard
# https://github.com/wekan/wekan-stats
#
# Console, file, and zulip logger on database changes https://github.com/wekan/wekan/pull/1010
# with fix to replace console.log by winston logger https://github.com/wekan/wekan/pull/1033
# but there could be bug https://github.com/wekan/wekan/issues/1094
#
# There is Feature Request: Logging date and time of all activity with summary reports,
# and requesting reason for changing card to other column https://github.com/wekan/wekan/issues/1598
#---------------------------------------------------------------
# ==== NUMBER OF SEARCH RESULTS PER PAGE BY DEFAULT ====
#- RESULTS_PER_PAGE=20
#---------------------------------------------------------------
# ==== AFTER OIDC LOGIN, ADD USERS AUTOMATICALLY TO THIS BOARD ID ====
# https://github.com/wekan/wekan/pull/5098
#- DEFAULT_BOARD_ID=abcd1234
#---------------------------------------------------------------
# ==== WEKAN API AND EXPORT BOARD ====
# Wekan Export Board works when WITH_API=true.
# https://github.com/wekan/wekan/wiki/REST-API
# https://github.com/wekan/wekan-gogs
# If you disable Wekan API with false, Export Board does not work.
- WITH_API=true
#---------------------------------------------------------------
# ==== PASSWORD BRUTE FORCE PROTECTION ====
#https://atmospherejs.com/lucasantoniassi/accounts-lockout
#Defaults below. Uncomment to change. wekan/server/accounts-lockout.js
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_FAILURES_BEFORE=3
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_PERIOD=60
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_FAILURE_WINDOW=15
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_FAILURES_BERORE=3
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_LOCKOUT_PERIOD=60
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_FAILURE_WINDOW=15
#---------------------------------------------------------------
# ==== ACCOUNT OPTIONS ====
# https://docs.meteor.com/api/accounts-multi.html#AccountsCommon-config
# Defaults below. Uncomment to change. wekan/server/accounts-common.js
# - ACCOUNTS_COMMON_LOGIN_EXPIRATION_IN_DAYS=90
#---------------------------------------------------------------
# ==== RICH TEXT EDITOR IN CARD COMMENTS ====
# https://github.com/wekan/wekan/pull/2560
- RICHER_CARD_COMMENT_EDITOR=false
#---------------------------------------------------------------
# ==== CARD OPENED, SEND WEBHOOK MESSAGE ====
# https://github.com/wekan/wekan/issues/2518
- CARD_OPENED_WEBHOOK_ENABLED=false
#---------------------------------------------------------------
# ==== Allow configuration to validate uploaded attachments ====
#-ATTACHMENTS_UPLOAD_EXTERNAL_PROGRAM=/usr/local/bin/avscan {file}
#-ATTACHMENTS_UPLOAD_MIME_TYPES=image/*,text/*
#-ATTACHMENTS_UPLOAD_MAX_SIZE=5000000
#---------------------------------------------------------------
# ==== Allow configuration to validate uploaded avatars ====
#-AVATARS_UPLOAD_EXTERNAL_PROGRAM=/usr/local/bin/avscan {file}
#-AVATARS_UPLOAD_MIME_TYPES=image/*
#-AVATARS_UPLOAD_MAX_SIZE=500000
#---------------------------------------------------------------
# ==== Allow to shrink attached/pasted image ====
# https://github.com/wekan/wekan/pull/2544
#- MAX_IMAGE_PIXEL=1024
#- IMAGE_COMPRESS_RATIO=80
#---------------------------------------------------------------
# ==== NOTIFICATION TRAY AFTER READ DAYS BEFORE REMOVE =====
# Number of days after a notification is read before we remove it.
# Default: 2
#- NOTIFICATION_TRAY_AFTER_READ_DAYS_BEFORE_REMOVE=2
#---------------------------------------------------------------
# ==== BIGEVENTS DUE ETC NOTIFICATIONS =====
# https://github.com/wekan/wekan/pull/2541
# Introduced a system env var BIGEVENTS_PATTERN default as "NONE",
# so any activityType matches the pattern, system will send out
# notifications to all board members no matter they are watching
# or tracking the board or not. Owner of the wekan server can
# disable the feature by setting this variable to "NONE" or
# change the pattern to any valid regex. i.e. '|' delimited
# activityType names.
# a) Example
#- BIGEVENTS_PATTERN=due
# b) All
#- BIGEVENTS_PATTERN=received|start|due|end
# c) Disabled
- BIGEVENTS_PATTERN=NONE
#---------------------------------------------------------------
# ==== EMAIL DUE DATE NOTIFICATION =====
# https://github.com/wekan/wekan/pull/2536
# System timelines will be showing any user modification for
# dueat startat endat receivedat, also notification to
# the watchers and if any card is due, about due or past due.
#
# Notify due days, default is None, 2 days before and on the event day
#- NOTIFY_DUE_DAYS_BEFORE_AND_AFTER=2,0
#
# Notify due at hour of day. Default every morning at 8am. Can be 0-23.
# If env variable has parsing error, use default. Notification sent to watchers.
#- NOTIFY_DUE_AT_HOUR_OF_DAY=8
#-----------------------------------------------------------------
# ==== EMAIL NOTIFICATION TIMEOUT, ms =====
# Default: 30000 ms = 30s
#- EMAIL_NOTIFICATION_TIMEOUT=30000
#-----------------------------------------------------------------
# ==== CORS =====
# CORS: Set Access-Control-Allow-Origin header.
#- CORS=*
# CORS_ALLOW_HEADERS: Set Access-Control-Allow-Headers header. "Authorization,Content-Type" is required for cross-origin use of the API.
#- CORS_ALLOW_HEADERS=Authorization,Content-Type
# CORS_EXPOSE_HEADERS: Set Access-Control-Expose-Headers header. This is not needed for typical CORS situations
#- CORS_EXPOSE_HEADERS=*
#-----------------------------------------------------------------
# ==== MATOMO INTEGRATION ====
# Optional: Integration with Matomo https://matomo.org that is installed to your server
# The address of the server where Matomo is hosted.
#- MATOMO_ADDRESS=https://example.com/matomo
# The value of the site ID given in Matomo server for Wekan
#- MATOMO_SITE_ID=1
# The option do not track which enables users to not be tracked by matomo
#- MATOMO_DO_NOT_TRACK=true
# The option that allows matomo to retrieve the username:
#- MATOMO_WITH_USERNAME=true
#-----------------------------------------------------------------
# ==== BROWSER POLICY AND TRUSTED IFRAME URL ====
# Enable browser policy and allow one trusted URL that can have iframe that has Wekan embedded inside.
# Setting this to false is not recommended, it also disables all other browser policy protections
# and allows all iframing etc. See wekan/server/policy.js
- BROWSER_POLICY_ENABLED=true
# When browser policy is enabled, HTML code at this Trusted URL can have iframe that embeds Wekan inside.
#- TRUSTED_URL=https://intra.example.com
#-----------------------------------------------------------------
# ==== METRICS ALLOWED IP ADDRESSES ====
# https://github.com/wekan/wekan/wiki/Metrics
#- METRICS_ALLOWED_IP_ADDRESSES=192.168.0.100,192.168.0.200
#-----------------------------------------------------------------
# ==== OUTGOING WEBHOOKS ====
# What to send to Outgoing Webhook, or leave out. If commented out the default values will be: cardId,listId,oldListId,boardId,comment,user,card,commentId,swimlaneId,customerField,customFieldValue
#- WEBHOOKS_ATTRIBUTES=cardId,listId,oldListId,boardId,comment,user,card,board,list,swimlane,commentId
#-----------------------------------------------------------------
# ==== Debug OIDC OAuth2 etc ====
#- DEBUG=true
#---------------------------------------------
# ==== AUTOLOGIN WITH OIDC/OAUTH2 ====
# https://github.com/wekan/wekan/wiki/autologin
#- OIDC_REDIRECTION_ENABLED=true
#-----------------------------------------------------------------
# ==== OAUTH2 ORACLE on premise identity manager OIM ====
#- ORACLE_OIM_ENABLED=true
#-----------------------------------------------------------------
# ==== OAUTH2 AZURE ====
# https://github.com/wekan/wekan/wiki/Azure
# 1) Register the application with Azure. Make sure you capture
# the application ID as well as generate a secret key.
# 2) Configure the environment variables. This differs slightly
# by installation type, but make sure you have the following:
#- OAUTH2_ENABLED=true
# Optional OAuth2 CA Cert, see https://github.com/wekan/wekan/issues/3299
#- OAUTH2_CA_CERT=ABCD1234
# Use OAuth2 ADFS additional changes. Also needs OAUTH2_ENABLED=true setting.
#- OAUTH2_ADFS_ENABLED=false
# Azure AD B2C. https://github.com/wekan/wekan/issues/5242
#- OAUTH2_B2C_ENABLED=false
# OAuth2 login style: popup or redirect.
#- OAUTH2_LOGIN_STYLE=redirect
# Application GUID captured during app registration:
#- OAUTH2_CLIENT_ID=xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx
# Secret key generated during app registration:
#- OAUTH2_SECRET=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#- OAUTH2_SERVER_URL=https://login.microsoftonline.com/
#- OAUTH2_AUTH_ENDPOINT=/oauth2/v2.0/authorize
#- OAUTH2_USERINFO_ENDPOINT=https://graph.microsoft.com/oidc/userinfo
#- OAUTH2_TOKEN_ENDPOINT=/oauth2/v2.0/token
# The claim name you want to map to the unique ID field:
#- OAUTH2_ID_MAP=email
# The claim name you want to map to the username field:
#- OAUTH2_USERNAME_MAP=email
# The claim name you want to map to the full name field:
#- OAUTH2_FULLNAME_MAP=name
# The claim name you want to map to the email field:
#- OAUTH2_EMAIL_MAP=email
#-----------------------------------------------------------------
# ==== OAUTH2 Nextcloud ====
# 1) Register the application with Nextcloud: https://your.nextcloud/index.php/settings/admin/security
# Make sure you capture the application ID as well as generate a secret key.
# Use https://your.wekan/_oauth/oidc for the redirect URI.
# 2) Configure the environment variables. This differs slightly
# by installation type, but make sure you have the following:
#- OAUTH2_ENABLED=true
# OAuth2 login style: popup or redirect.
#- OAUTH2_LOGIN_STYLE=redirect
# Application GUID captured during app registration:
#- OAUTH2_CLIENT_ID=xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx
# Secret key generated during app registration:
#- OAUTH2_SECRET=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#- OAUTH2_SERVER_URL=https://your-nextcloud.tld
#- OAUTH2_AUTH_ENDPOINT=/index.php/apps/oauth2/authorize
#- OAUTH2_USERINFO_ENDPOINT=/ocs/v2.php/cloud/user?format=json
#- OAUTH2_TOKEN_ENDPOINT=/index.php/apps/oauth2/api/v1/token
# The claim name you want to map to the unique ID field:
#- OAUTH2_ID_MAP=id
# The claim name you want to map to the username field:
#- OAUTH2_USERNAME_MAP=id
# The claim name you want to map to the full name field:
#- OAUTH2_FULLNAME_MAP=display-name
# The claim name you want to map to the email field:
#- OAUTH2_EMAIL_MAP=email
#-----------------------------------------------------------------
# ==== OAUTH2 KEYCLOAK ====
# https://github.com/wekan/wekan/wiki/Keycloak <== MAPPING INFO, REQUIRED
#- OAUTH2_ENABLED=true
# OAuth2 login style: popup or redirect.
#- OAUTH2_LOGIN_STYLE=redirect
#- OAUTH2_CLIENT_ID=<Keycloak create Client ID>
#- OAUTH2_SERVER_URL=<Keycloak server name>/auth
#- OAUTH2_AUTH_ENDPOINT=/realms/<keycloak realm>/protocol/openid-connect/auth
#- OAUTH2_USERINFO_ENDPOINT=/realms/<keycloak realm>/protocol/openid-connect/userinfo
#- OAUTH2_TOKEN_ENDPOINT=/realms/<keycloak realm>/protocol/openid-connect/token
#- OAUTH2_SECRET=<keycloak client secret>
#-----------------------------------------------------------------
# ==== OAUTH2 DOORKEEPER ====
# https://github.com/wekan/wekan/issues/1874
# https://github.com/wekan/wekan/wiki/OAuth2
# Enable the OAuth2 connection
#- OAUTH2_ENABLED=true
# OAuth2 docs: https://github.com/wekan/wekan/wiki/OAuth2
# OAuth2 login style: popup or redirect.
#- OAUTH2_LOGIN_STYLE=redirect
# OAuth2 Client ID.
#- OAUTH2_CLIENT_ID=abcde12345
# OAuth2 Secret.
#- OAUTH2_SECRET=54321abcde
# OAuth2 Server URL.
#- OAUTH2_SERVER_URL=https://chat.example.com
# OAuth2 Authorization Endpoint.
#- OAUTH2_AUTH_ENDPOINT=/oauth/authorize
# OAuth2 Userinfo Endpoint.
#- OAUTH2_USERINFO_ENDPOINT=/oauth/userinfo
# OAuth2 Token Endpoint.
#- OAUTH2_TOKEN_ENDPOINT=/oauth/token
# OAUTH2 ID Token Whitelist Fields.
#- OAUTH2_ID_TOKEN_WHITELIST_FIELDS=""
# OAUTH2 Request Permissions.
#- OAUTH2_REQUEST_PERMISSIONS=openid profile email
# OAuth2 ID Mapping
#- OAUTH2_ID_MAP=
# OAuth2 Username Mapping
#- OAUTH2_USERNAME_MAP=
# OAuth2 Fullname Mapping
#- OAUTH2_FULLNAME_MAP=
# OAuth2 Email Mapping
#- OAUTH2_EMAIL_MAP=
#-----------------------------------------------------------------
# ==== LDAP: UNCOMMENT ALL TO ENABLE LDAP ====
# https://github.com/wekan/wekan/wiki/LDAP
# For Snap settings see https://github.com/wekan/wekan-snap/wiki/Supported-settings-keys
# Most settings work both on Snap and Docker below.
# Note: Do not add single quotes '' to variables. Having spaces still works without quotes where required.
#
# The default authentication method used if a user does not exist to create and authenticate. Can be set as ldap.
# (this is set properly in the Admin Panel, changing this item does not remove Password login option)
#- DEFAULT_AUTHENTICATION_METHOD=ldap
#
# Enable or not the connection by the LDAP
#- LDAP_ENABLE=true
#
# The port of the LDAP server
#- LDAP_PORT=389
#
# The host server for the LDAP server
#- LDAP_HOST=localhost
#
#-----------------------------------------------------------------
# ==== LDAP AD Simple Auth ====
#
# Set to true, if you want to connect with Active Directory by Simple Authentication.
# When using AD Simple Auth, LDAP_BASEDN is not needed.
#
# Example:
#- LDAP_AD_SIMPLE_AUTH=true
#
# === LDAP User Authentication ===
#
# a) Option to login to the LDAP server with the user's own username and password, instead of
# an administrator key. Default: false (use administrator key).
#
# b) When using AD Simple Auth, set to true, when login user is used for binding,
# and LDAP_BASEDN is not needed.
#
# Example:
#- LDAP_USER_AUTHENTICATION=true
#
# Which field is used to find the user for the user authentication. Default: uid.
#- LDAP_USER_AUTHENTICATION_FIELD=uid
#
# === LDAP Default Domain ===
#
# a) In case AD SimpleAuth is configured, the default domain is appended to the given
# loginname for creating the correct username for the bind request to AD.
#
# b) The default domain of the ldap it is used to create email if the field is not map
# correctly with the LDAP_SYNC_USER_DATA_FIELDMAP
#
# Example :
#- LDAP_DEFAULT_DOMAIN=mydomain.com
#
#-----------------------------------------------------------------
# ==== LDAP BASEDN Auth ====
#
# The base DN for the LDAP Tree
#- LDAP_BASEDN=ou=user,dc=example,dc=org
#
#-----------------------------------------------------------------
# Fallback on the default authentication method
#- LDAP_LOGIN_FALLBACK=false
#
# Reconnect to the server if the connection is lost
#- LDAP_RECONNECT=true
#
# Overall timeout, in milliseconds
#- LDAP_TIMEOUT=10000
#
# Specifies the timeout for idle LDAP connections in milliseconds
#- LDAP_IDLE_TIMEOUT=10000
#
# Connection timeout, in milliseconds
#- LDAP_CONNECT_TIMEOUT=10000
#
# If the LDAP needs a user account to search
#- LDAP_AUTHENTIFICATION=true
#
# The search user DN - You need quotes when you have spaces in parameters
# 2 examples:
#- LDAP_AUTHENTIFICATION_USERDN=CN=ldap admin,CN=users,DC=domainmatter,DC=lan
#- LDAP_AUTHENTIFICATION_USERDN=CN=wekan_adm,OU=serviceaccounts,OU=admin,OU=prod,DC=mydomain,DC=com
#
# The password for the search user
#- LDAP_AUTHENTIFICATION_PASSWORD=pwd
#
# Enable logs for the module
#- LDAP_LOG_ENABLED=true
#
# If the sync of the users should be done in the background
#- LDAP_BACKGROUND_SYNC=false
#
# At which interval does the background task sync.
# The format must be as specified in:
# https://bunkat.github.io/later/parsers.html#text
#- LDAP_BACKGROUND_SYNC_INTERVAL=every 1 hour
#
#- LDAP_BACKGROUND_SYNC_KEEP_EXISTANT_USERS_UPDATED=false
#
#- LDAP_BACKGROUND_SYNC_IMPORT_NEW_USERS=false
#
# If using LDAPS: LDAP_ENCRYPTION=ssl
#- LDAP_ENCRYPTION=false
#
# The certification for the LDAPS server. Certificate needs to be included in this docker-compose.yml file.
#- LDAP_CA_CERT=-----BEGIN CERTIFICATE-----MIIE+G2FIdAgIC...-----END CERTIFICATE-----
#
# Reject Unauthorized Certificate
#- LDAP_REJECT_UNAUTHORIZED=false
#
# Optional extra LDAP filters. Don't forget the outmost enclosing parentheses if needed
#- LDAP_USER_SEARCH_FILTER=
#
# base (search only in the provided DN), one (search only in the provided DN and one level deep), or sub (search the whole subtree)
#- LDAP_USER_SEARCH_SCOPE=one
#
# Which field is used to find the user, like uid / sAMAccountName
#- LDAP_USER_SEARCH_FIELD=sAMAccountName
#
# Used for pagination (0=unlimited)
#- LDAP_SEARCH_PAGE_SIZE=0
#
# The limit number of entries (0=unlimited)
#- LDAP_SEARCH_SIZE_LIMIT=0
#
# Enable group filtering. Note the authenticated ldap user must be able to query all relevant group data with own login data from ldap.
#- LDAP_GROUP_FILTER_ENABLE=false
#
# The object class for filtering. Example: group
#- LDAP_GROUP_FILTER_OBJECTCLASS=
#
# The attribute of a group identifying it. Example: cn
#- LDAP_GROUP_FILTER_GROUP_ID_ATTRIBUTE=
#
# The attribute inside a group object listing its members. Example: member
#- LDAP_GROUP_FILTER_GROUP_MEMBER_ATTRIBUTE=
#
# The format of the value of LDAP_GROUP_FILTER_GROUP_MEMBER_ATTRIBUTE. Example: 'dn' if the users dn is saved as value into the attribute.
#- LDAP_GROUP_FILTER_GROUP_MEMBER_FORMAT=
#
# The group name (id) that matches all users.
#- LDAP_GROUP_FILTER_GROUP_NAME=
#
# LDAP_UNIQUE_IDENTIFIER_FIELD : This field is sometimes class GUID (Globally Unique Identifier). Example: guid
#- LDAP_UNIQUE_IDENTIFIER_FIELD=
#
# LDAP_UTF8_NAMES_SLUGIFY : Convert the username to utf8
#- LDAP_UTF8_NAMES_SLUGIFY=true
#
# LDAP_USERNAME_FIELD : Which field contains the ldap username. username / sAMAccountName
#- LDAP_USERNAME_FIELD=sAMAccountName
#
# LDAP_FULLNAME_FIELD : Which field contains the ldap fullname. fullname / sAMAccountName
#- LDAP_FULLNAME_FIELD=fullname
#
#- LDAP_MERGE_EXISTING_USERS=false
#
# Allow existing account matching by e-mail address when username does not match
#- LDAP_EMAIL_MATCH_ENABLE=true
#
# LDAP_EMAIL_MATCH_REQUIRE : require existing account matching by e-mail address when username does match
#- LDAP_EMAIL_MATCH_REQUIRE=true
#
# LDAP_EMAIL_MATCH_VERIFIED : require existing account email address to be verified for matching
#- LDAP_EMAIL_MATCH_VERIFIED=true
#
# LDAP_EMAIL_FIELD : which field contains the LDAP e-mail address
#- LDAP_EMAIL_FIELD=mail
#-----------------------------------------------------------------
#- LDAP_SYNC_USER_DATA=false
#
#- LDAP_SYNC_USER_DATA_FIELDMAP={"cn":"name", "mail":"email"}
#
#- LDAP_SYNC_GROUP_ROLES=
#
# The default domain of the ldap it is used to create email if the field is not map correctly
# with the LDAP_SYNC_USER_DATA_FIELDMAP is defined in setting LDAP_DEFAULT_DOMAIN above.
#
# Enable/Disable syncing of admin status based on ldap groups:
#- LDAP_SYNC_ADMIN_STATUS=true
#
# Comma separated list of admin group names to sync.
#- LDAP_SYNC_ADMIN_GROUPS=group1,group2
#---------------------------------------------------------------------
# Login to LDAP automatically with HTTP header.
# In below example for siteminder, at right side of = is header name.
#- HEADER_LOGIN_ID=HEADERUID
#- HEADER_LOGIN_FIRSTNAME=HEADERFIRSTNAME
#- HEADER_LOGIN_LASTNAME=HEADERLASTNAME
#- HEADER_LOGIN_EMAIL=HEADEREMAILADDRESS
#---------------------------------------------------------------------
# ==== LOGOUT TIMER, probably does not work yet ====
# LOGOUT_WITH_TIMER : Enables or not the option logout with timer
# example : LOGOUT_WITH_TIMER=true
#- LOGOUT_WITH_TIMER=
#
# LOGOUT_IN : The number of days
# example : LOGOUT_IN=1
#- LOGOUT_IN=
#
# LOGOUT_ON_HOURS : The number of hours
# example : LOGOUT_ON_HOURS=9
#- LOGOUT_ON_HOURS=
#
# LOGOUT_ON_MINUTES : The number of minutes
# example : LOGOUT_ON_MINUTES=55
#- LOGOUT_ON_MINUTES=
#-------------------------------------------------------------------
# Hide password login form
# - PASSWORD_LOGIN_ENABLED=true
#-------------------------------------------------------------------
#- CAS_ENABLED=true
#- CAS_BASE_URL=https://cas.example.com/cas
#- CAS_LOGIN_URL=https://cas.example.com/login
#- CAS_VALIDATE_URL=https://cas.example.com/cas/p3/serviceValidate
#---------------------------------------------------------------------
#- SAML_ENABLED=true
#- SAML_PROVIDER=
#- SAML_ENTRYPOINT=
#- SAML_ISSUER=
#- SAML_CERT=
#- SAML_IDPSLO_REDIRECTURL=
#- SAML_PRIVATE_KEYFILE=
#- SAML_PUBLIC_CERTFILE=
#- SAML_IDENTIFIER_FORMAT=
#- SAML_LOCAL_PROFILE_MATCH_ATTRIBUTE=
#- SAML_ATTRIBUTES=
#---------------------------------------------------------------------
# Wait spinner to use
# - WAIT_SPINNER=Bounce
#---------------------------------------------------------------------
depends_on:
- wekandb
volumes:
- /etc/localtime:/etc/localtime:ro
- wekan-files:/data:rw
#---------------------------------------------------------------------------------
# ==== OPTIONAL: SHARE DATABASE TO OFFICE LAN AND REMOTE VPN ====
# When using Wekan both at office LAN and remote VPN:
# 1) Have above Wekan docker container config with LAN IP address
# 2) Copy all of above wekan container config below, look above of this part above and all config below it,
# before above depends_on: part:
#
# wekan:
# #-------------------------------------------------------------------------------------
# # ==== MONGODB AND METEOR VERSION ====
# # a) For Wekan Meteor 1.8.x version at meteor-1.8 branch, .....
#
#
# and change name to different name like wekan2 or wekanvpn, and change ROOT_URL to server VPN IP
# address.
# 3) This way both Wekan containers can use same MongoDB database
# and see the same Wekan boards.
# 4) You could also add 3rd Wekan container for 3rd network etc.
# EXAMPLE:
# wekan2:
# ....COPY CONFIG FROM ABOVE TO HERE...
# environment:
# - ROOT_URL='http://10.10.10.10'
# ...COPY CONFIG FROM ABOVE TO HERE...
#---------------------------------------------------------------------------------
# OPTIONAL NGINX CONFIG FOR REVERSE PROXY
# nginx:
# image: nginx
# container_name: nginx
# restart: always
# networks:
# - wekan-tier
# depends_on:
# - wekan
# ports:
# - 80:80
# - 443:443
# volumes:
# - ./nginx/ssl:/etc/nginx/ssl/:ro
# - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
## Alternative volume config:
## volumes:
## - ./nginx/nginx.conf:/etc/nginx/conf.d/default.conf:ro
## - ./nginx/ssl/ssl.conf:/etc/nginx/conf.d/ssl/ssl.conf:ro
## - ./nginx/ssl/testvm-ehu.crt:/etc/nginx/conf.d/ssl/certs/mycert.crt:ro
## - ./nginx/ssl/testvm-ehu.key:/etc/nginx/conf.d/ssl/certs/mykey.key:ro
## - ./nginx/ssl/pphrase:/etc/nginx/conf.d/ssl/pphrase:ro
volumes:
wekan-files:
driver: local
wekan-db:
driver: local
wekan-db-dump:
driver: local
networks:
wekan-tier:
driver: bridge