diff --git a/G000 - Table Of Contents.md b/G000 - Table Of Contents.md
index 3e2ddbe..48f87fd 100644
--- a/G000 - Table Of Contents.md
+++ b/G000 - Table Of Contents.md
@@ -1,10 +1,12 @@
# **G000** - Table Of Contents
-## [**README**](README.md#small-homelab-k8s-cluster-on-proxmox-ve)
+## [**README**](README.md)
-- [_Description of contents_](README.md#description-of-contents)
-- [_Intended audience_](README.md#intended-audience)
-- [_Goals_](README.md#goals)
+- [A complete guide for building a virtualized Kubernetes homelab](README.md#a-complete-guide-for-building-a-virtualized-kubernetes-homelab)
+- [Main concepts](README.md#main-concepts)
+- [Intended audience](README.md#intended-audience)
+- [Goal of this guide](README.md#goal-of-this-guide)
+- [Software used](README.md#software-used)
## [**LICENSE**](LICENSE.md)
@@ -12,671 +14,697 @@
## Guides
-### [**G001** - Hardware setup](G001%20-%20Hardware%20setup.md#g001-hardware-setup)
-
-- [_The reference hardware setup_](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup)
-- [_References_](G001%20-%20Hardware%20setup.md#references)
-
-### [**G002** - Proxmox VE installation](G002%20-%20Proxmox%20VE%20installation.md#g002-proxmox-ve-installation)
-
-- [_System Requirements_](G002%20-%20Proxmox%20VE%20installation.md#system-requirements)
-- [_Installation procedure_](G002%20-%20Proxmox%20VE%20installation.md#installation-procedure)
-- [_After the installation_](G002%20-%20Proxmox%20VE%20installation.md#after-the-installation)
-- [_Connecting remotely_](G002%20-%20Proxmox%20VE%20installation.md#connecting-remotely)
-- [_References_](G002%20-%20Proxmox%20VE%20installation.md#references)
-
-### [**G003** - Host configuration 01 ~ Apt sources, updates and extra tools](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#g003-host-configuration-01-apt-sources-updates-and-extra-tools)
-
-- [_Remember, Proxmox VE 7.0 runs on Debian 11_ bullseye](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#remember-proxmox-ve-70-runs-on-debian-11-bullseye)
-- [_Editing the apt repository sources_](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#editing-the-apt-repository-sources)
-- [_Update your system_](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#update-your-system)
-- [_Installing useful extra tools_](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#installing-useful-extra-tools)
-- [_Relevant system paths_](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#relevant-system-paths)
-- [_References_](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#references)
-
-### [**G004** - Host configuration 02 ~ UPS management with NUT](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#g004-host-configuration-02-ups-management-with-nut)
-
-- [_Connecting your UPS with your pve node using NUT_](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#connecting-your-ups-with-your-pve-node-using-nut)
-- [_Executing instant commands on the UPS unit_](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#executing-instant-commands-on-the-ups-unit)
-- [_Other possibilities with NUT_](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#other-possibilities-with-nut)
-- [_Relevant system paths_](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#relevant-system-paths)
-- [_References_](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#references)
-
-### [**G005** - Host configuration 03 ~ LVM storage](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#g005-host-configuration-03-lvm-storage)
-
-- [_Initial filesystem configuration (**web console**)_](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#initial-filesystem-configuration-web-console)
-- [_Initial filesystem configuration (**shell as root**)_](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#initial-filesystem-configuration-shell-as-root)
-- [_Configuring the unused storage drives_](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#configuring-the-unused-storage-drives)
-- [_LVM rearrangement in the main storage drive_](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#lvm-rearrangement-in-the-main-storage-drive)
-- [_References_](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#references)
-
-### [**G006** - Host configuration 04 ~ Removing subscription warning](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#g006-host-configuration-04-removing-subscription-warning)
-
-- [_Reverting the changes_](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#reverting-the-changes)
-- [_Change executed in just one command line_](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#change-executed-in-just-one-command-line)
-- [_Final note_](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#final-note)
-- [_Relevant system paths_](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#relevant-system-paths)
-- [_References_](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md#references)
-
-### [**G007** - Host hardening 01 ~ TFA authentication](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#g007-host-hardening-01-tfa-authentication)
-
-- [_Enabling TFA for SSH access_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enabling-tfa-for-ssh-access)
-- [_Enforcing TFA TOTP for accessing the Proxmox VE web console_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enforcing-tfa-totp-for-accessing-the-proxmox-ve-web-console)
-- [_Enforcing TFA TOTP as a default requirement for `pam` realm_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enforcing-tfa-totp-as-a-default-requirement-for-pam-realm)
-- [_Incompatibility of PVE web console login with TFA enforced local shell access_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#incompatibility-of-pve-web-console-login-with-tfa-enforced-local-shell-access)
-- [_Relevant system paths_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#relevant-system-paths)
-- [_References_](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#references)
-
-### [**G008** - Host hardening 02 ~ Alternative administrator user](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#g008-host-hardening-02-alternative-administrator-user)
-
-- [_Understanding the Proxmox VE user management and the realms_](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#understanding-the-proxmox-ve-user-management-and-the-realms)
-- [_Creating a new system administrator user for a Proxmox VE node_](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#creating-a-new-system-administrator-user-for-a-proxmox-ve-node)
-- [_Relevant system paths_](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#relevant-system-paths)
-- [_References_](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#references)
-
-### [**G009** - Host hardening 03 ~ SSH key pairs and sshd service configuration](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#g009-host-hardening-03-ssh-key-pairs-and-sshd-service-configuration)
-
-- [_SSH key pairs_](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#ssh-key-pairs)
-- [_Hardening the `sshd` service_](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#hardening-the-sshd-service)
-- [_Relevant system paths_](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#relevant-system-paths)
-- [_References_](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#references)
-
-### [**G010** - Host hardening 04 ~ Enabling Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#g010-host-hardening-04-enabling-fail2ban)
-
-- [_Installing Fail2ban_](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#installing-fail2ban)
-- [_Configuring Fail2ban_](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#configuring-fail2ban)
-- [_Considerations regarding Fail2ban_](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#considerations-regarding-fail2ban)
-- [_Relevant system paths_](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#relevant-system-paths)
-- [_References_](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#references)
-
-### [**G011** - Host hardening 05 ~ Proxmox VE services](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#g011-host-hardening-05-proxmox-ve-services)
-
-- [_Checking currently running services_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#checking-currently-running-services)
-- [_Configuring the `pveproxy` service_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#configuring-the-pveproxy-service)
-- [_Disabling RPC services_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-rpc-services)
-- [_Disabling `zfs` and `ceph`_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-zfs-and-ceph)
-- [_Disabling the SPICE proxy_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-the-spice-proxy)
-- [_Disabling cluster and high availability related services_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-cluster-and-high-availability-related-services)
-- [_Considerations_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#considerations)
-- [_Relevant system paths_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#relevant-system-paths)
-- [_References_](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#references)
-
-### [**G012** - Host hardening 06 ~ Network hardening with sysctl](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#g012-host-hardening-06-network-hardening-with-sysctl)
-
-- [_About `sysctl`_](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#about-sysctl)
-- [_TCP/IP stack hardening with `sysctl`_](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#tcpip-stack-hardening-with-sysctl)
-- [_Relevant system paths_](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#relevant-system-paths)
-- [_References_](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#references)
-
-### [**G013** - Host hardening 07 ~ Mitigating CPU vulnerabilities](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#g013-host-hardening-07-mitigating-cpu-vulnerabilities)
-
-- [_Checking out your CPU's vulnerabilities_](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#checking-out-your-cpus-vulnerabilities)
-- [_Applying the correct microcode package_](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#applying-the-correct-microcode-package)
-- [_Relevant system paths_](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#relevant-system-paths)
-- [_References_](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#references)
-
-### [**G014** - Host hardening 08 ~ Firewalling](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#g014-host-hardening-08-firewalling)
-
-- [_Proxmox VE firewall uses `iptables`_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#proxmox-ve-firewall-uses-iptables)
-- [_Zones in the Proxmox VE firewall_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#zones-in-the-proxmox-ve-firewall)
-- [_Situation at this point_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#situation-at-this-point)
-- [_Enabling the firewall at the `Datacenter` tier_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#enabling-the-firewall-at-the-datacenter-tier)
-- [_Firewalling with `ebtables`_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewalling-with-ebtables)
-- [_Firewall fine tuning_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewall-fine-tuning)
-- [_Firewall logging_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewall-logging)
-- [_Connection tracking tool_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#connection-tracking-tool)
-- [_Relevant system paths_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#relevant-system-paths)
-- [_References_](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#references)
-
-### [**G015** - Host optimization 01 ~ Adjustments through sysctl](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#g015-host-optimization-01-adjustments-through-sysctl)
-
-- [_Network optimizations_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#network-optimizations)
-- [_Memory optimizations_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#memory-optimizations)
-- [_Kernel optimizations_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#kernel-optimizations)
-- [_Reboot the system_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#reboot-the-system)
-- [_Final considerations_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#final-considerations)
-- [_Relevant system paths_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#relevant-system-paths)
-- [_References_](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#references)
-
-### [**G016** - Host optimization 02 ~ Disabling transparent hugepages](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md#g016-host-optimization-02-disabling-transparent-hugepages)
-
-- [_Status of transparent hugepages in your host_](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md#status-of-transparent-hugepages-in-your-host)
-- [_Disabling the transparent hugepages_](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md#disabling-the-transparent-hugepages)
-- [_Relevant system paths_](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md#relevant-system-paths)
-- [_References_](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md#references)
-
-### [**G017** - Virtual Networking ~ Network configuration](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#g017-virtual-networking-network-configuration)
-
-- [_Current virtual network setup_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#current-virtual-network-setup)
-- [_Target network scenario_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#target-network-scenario)
-- [_Creating an isolated Linux bridge_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#creating-an-isolated-linux-bridge)
-- [_Bridges management_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#bridges-management)
-- [_Relevant system paths_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#relevant-system-paths)
-- [_References_](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#references)
-
-### [**G018** - K3s cluster setup 01 ~ Requirements and arrangement](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#g018-k3s-cluster-setup-01-requirements-and-arrangement)
-
-- [_Requirements for the K3s cluster and the services to deploy in it_](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#requirements-for-the-k3s-cluster-and-the-services-to-deploy-in-it)
-- [_Arrangement of VMs and services_](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#arrangement-of-vms-and-services)
-- [_References_](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#references)
-
-### [**G019** - K3s cluster setup 02 ~ Storage setup](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#g019-k3s-cluster-setup-02-storage-setup)
-
-- [_Storage organization model_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#storage-organization-model)
-- [_Creating the logical volumes (LVs)_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#creating-the-logical-volumes-lvs)
-- [_Enabling the LVs for Proxmox VE_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#enabling-the-lvs-for-proxmox-ve)
-- [_Configuration file_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#configuration-file)
-- [_Relevant system paths_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#relevant-system-paths)
-- [_References_](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#references)
-
-### [**G020** - K3s cluster setup 03 ~ Debian VM creation](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#g020-k3s-cluster-setup-03-debian-vm-creation)
-
-- [_Preparing the Debian ISO image_](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#preparing-the-debian-iso-image)
-- [_Building a Debian virtual machine_](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#building-a-debian-virtual-machine)
-- [_Note about the VM's `Boot Order` option_](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#note-about-the-vms-boot-order-option)
-- [_Relevant system paths_](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#relevant-system-paths)
-- [_References_](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#references)
-
-### [**G021** - K3s cluster setup 04 ~ Debian VM configuration](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#g021-k3s-cluster-setup-04-debian-vm-configuration)
-
-- [_Suggestion about IP configuration in your network_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#suggestion-about-ip-configuration-in-your-network)
-- [_Adding the `apt` sources for _non-free_ packages_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#adding-the-apt-sources-for-non-free-packages)
-- [_Installing extra packages_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#installing-extra-packages)
-- [_The QEMU guest agent comes enabled in Debian 11_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#the-qemu-guest-agent-comes-enabled-in-debian-11)
-- [_Hardening the VM's access_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#hardening-the-vms-access)
-- [_Hardening the `sshd` service_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#hardening-the-sshd-service)
-- [_Configuring Fail2Ban for SSH connections_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#configuring-fail2ban-for-ssh-connections)
-- [_Disabling the `root` user login_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#disabling-the-root-user-login)
-- [_Configuring the VM with `sysctl`_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#configuring-the-vm-with-sysctl)
-- [_Reboot the VM_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#reboot-the-vm)
-- [_Disabling transparent hugepages on the VM_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#disabling-transparent-hugepages-on-the-vm)
-- [_Regarding the microcode `apt` packages for CPU vulnerabilities_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#regarding-the-microcode-apt-packages-for-cpu-vulnerabilities)
-- [_Relevant system paths_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#relevant-system-paths)
-- [_References_](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#references)
-
-### [**G022** - K3s cluster setup 05 ~ Connecting the VM to the NUT server](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#g022-k3s-cluster-setup-05-connecting-the-vm-to-the-nut-server)
-
-- [_Reconfiguring the NUT `master` server on your **Proxmox VE host**_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#reconfiguring-the-nut-master-server-on-your-proxmox-ve-host)
-- [_Configuring the NUT `slave` client on your Debian VM_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#configuring-the-nut-slave-client-on-your-debian-vm)
-- [_Checking the connection between the VM NUT `slave` client and the PVE node NUT `master` server_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#checking-the-connection-between-the-vm-nut-slave-client-and-the-pve-node-nut-master-server)
-- [_Testing a Forced ShutDown sequence (`FSD`) with NUT_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#testing-a-forced-shutdown-sequence-fsd-with-nut)
-- [_Relevant system paths_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#relevant-system-paths)
-- [_References_](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#references)
-
-### [**G023** - K3s cluster setup 06 ~ Debian VM template and backup](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#g023-k3s-cluster-setup-06-debian-vm-template-and-backup)
-
-- [_Turning the Debian VM into a VM template_](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turning-the-debian-vm-into-a-vm-template)
-- [_VM template's backup_](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#vm-templates-backup)
-- [_Other considerations regarding VM templates_](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#other-considerations-regarding-vm-templates)
-- [_References_](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#references)
-
-### [**G024** - K3s cluster setup 07 ~ K3s node VM template setup](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#g024-k3s-cluster-setup-07-k3s-node-vm-template-setup)
-
-- [_Reasons for a new VM template_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#reasons-for-a-new-vm-template)
-- [_Creating a new VM based on the Debian VM template_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#creating-a-new-vm-based-on-the-debian-vm-template)
-- [_Set an static IP for the main network device (`net0`)_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#set-an-static-ip-for-the-main-network-device-net0)
-- [_Setting a proper hostname string_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-a-proper-hostname-string)
-- [_Disabling the swap volume_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#disabling-the-swap-volume)
-- [_Changing the VG's name_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#changing-the-vgs-name)
-- [_Setting up the second network card_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-the-second-network-card)
-- [_Setting up sysctl kernel parameters for K3s nodes_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-sysctl-kernel-parameters-for-k3s-nodes)
-- [_Turning the VM into a VM template_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#turning-the-vm-into-a-vm-template)
-- [_Protecting VMs and VM templates in Proxmox VE_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#protecting-vms-and-vm-templates-in-proxmox-ve)
-- [_Relevant system paths_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#relevant-system-paths)
-- [_References_](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#references)
-
-### [**G025** - K3s cluster setup 08 ~ K3s Kubernetes cluster setup](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#g025-k3s-cluster-setup-08-k3s-kubernetes-cluster-setup)
-
-- [_Criteria for the VMs' IPs and hostnames_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#criteria-for-the-vms-ips-and-hostnames)
-- [_Creation of VMs based on the K3s node VM template_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#creation-of-vms-based-on-the-k3s-node-vm-template)
-- [_Preparing the VMs for K3s_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#preparing-the-vms-for-k3s)
-- [_Firewall setup for the K3s cluster_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#firewall-setup-for-the-k3s-cluster)
-- [_Considerations before installing the K3s cluster nodes_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#considerations-before-installing-the-k3s-cluster-nodes)
-- [_K3s Server node setup_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-server-node-setup)
-- [_K3s Agent nodes setup_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-agent-nodes-setup)
-- [_Enabling bash autocompletion for `kubectl`_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#enabling-bash-autocompletion-for-kubectl)
-- [_Enabling the `k3s.log` file's rotation_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#enabling-the-k3slog-files-rotation)
-- [_Enabling the `containerd.log` file's rotation_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#enabling-the-containerdlog-files-rotation)
-- [_K3s relevant paths_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-relevant-paths)
-- [_Starting up and shutting down the K3s cluster nodes_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#starting-up-and-shutting-down-the-k3s-cluster-nodes)
-- [_Relevant system paths_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#relevant-system-paths)
-- [_References_](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#references)
-
-### [**G026** - K3s cluster setup 09 ~ Setting up a kubectl client for remote access](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#g026-k3s-cluster-setup-09-setting-up-a-kubectl-client-for-remote-access)
-
-- [_Scenario_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#scenario)
-- [_Getting the right version of `kubectl`_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#getting-the-right-version-of-kubectl)
-- [_Installing `kubectl` on your client system_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#installing-kubectl-on-your-client-system)
-- [_Getting the configuration for accessing the K3s cluster_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#getting-the-configuration-for-accessing-the-k3s-cluster)
-- [_Opening the `6443` port in the K3s server node_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#opening-the-6443-port-in-the-k3s-server-node)
-- [_Enabling bash autocompletion for `kubectl`_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#enabling-bash-autocompletion-for-kubectl)
-- [_**Kubeval**, tool for validating Kubernetes configuration files_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#kubeval-tool-for-validating-kubernetes-configuration-files)
-- [_Relevant system paths_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#relevant-system-paths)
-- [_References_](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#references)
-
-### [**G027** - K3s cluster setup 10 ~ Deploying the MetalLB load balancer](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#g027-k3s-cluster-setup-10-deploying-the-metallb-load-balancer)
-
-- [_Considerations before deploying MetalLB_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#considerations-before-deploying-metallb)
-- [_Choosing the IP ranges for MetalLB_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#choosing-the-ip-ranges-for-metallb)
-- [_Deploying MetalLB on your K3s cluster_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#deploying-metallb-on-your-k3s-cluster)
-- [_MetalLB's Kustomize project attached to this guide series_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#metallbs-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#relevant-system-paths)
-- [_References_](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#references)
-
-### [**G028** - K3s cluster setup 11 ~ Deploying the metrics-server service](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#g028-k3s-cluster-setup-11-deploying-the-metrics-server-service)
-
-- [_Checking the metrics-server's manifest_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#checking-the-metrics-servers-manifest)
-- [_Deployment of metrics-server_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#deployment-of-metrics-server)
-- [_Checking the metrics-server service_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#checking-the-metrics-server-service)
-- [_Metrics-server's Kustomize project attached to this guide series_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#metrics-servers-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#relevant-system-paths)
-- [_References_](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#references)
-
-### [**G029** - K3s cluster setup 12 ~ Setting up cert-manager and wildcard certificate](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#g029-k3s-cluster-setup-12-setting-up-cert-manager-and-wildcard-certificate)
-
-- [_Warning about cert-manager performance_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#warning-about-cert-manager-performance)
-- [_Deploying cert-manager_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#deploying-cert-manager)
-- [_Reflector, a solution for syncing secrets and configmaps_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#reflector-a-solution-for-syncing-secrets-and-configmaps)
-- [_Setting up a wildcard certificate for a domain_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#setting-up-a-wildcard-certificate-for-a-domain)
-- [_Checking your certificate with the `kubectl` cert-manager plugin_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#checking-your-certificate-with-the-kubectl-cert-manager-plugin)
-- [_Cert-manager and Reflector's Kustomize projects attached to this guide series_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#cert-manager-and-reflectors-kustomize-projects-attached-to-this-guide-series)
-- [_Relevant system paths_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#relevant-system-paths)
-- [_References_](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#references)
-
-### [**G030** - K3s cluster setup 13 ~ Deploying the Kubernetes Dashboard](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#g030-k3s-cluster-setup-13-deploying-the-kubernetes-dashboard)
-
-- [_Deploying Kubernetes Dashboard_](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#deploying-kubernetes-dashboard)
-- [_Testing Kubernetes Dashboard_](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#testing-kubernetes-dashboard)
-- [_Kubernetes Dashboard's Kustomize project attached to this guide series_](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#kubernetes-dashboards-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#relevant-system-paths)
-- [_References_](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md#references)
-
-### [**G031** - K3s cluster setup 14 ~ Enabling the Traefik dashboard](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#g031-k3s-cluster-setup-14-enabling-the-traefik-dashboard)
-
-- [_Creating an IngressRoute for Traefik dashboard_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#creating-an-ingressroute-for-traefik-dashboard)
-- [_Getting into the dashboard_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#getting-into-the-dashboard)
-- [_Traefik dashboard has bad performance_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#traefik-dashboard-has-bad-performance)
-- [_Traefik dashboard's Kustomize project attached to this guide series_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#traefik-dashboards-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#relevant-system-paths)
-- [_References_](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#references)
-
-### [**G032** - Deploying services 01 ~ Considerations](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#g032-deploying-services-01-considerations)
-
-- [_Be watchful of your system's resources usage_](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#be-watchful-of-your-systems-resources-usage)
-- [_Don't fill your setup up to the brim_](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#dont-fill-your-setup-up-to-the-brim)
+### [**G001** - Hardware setup](G001%20-%20Hardware%20setup.md)
+
+- [You just need a capable enough computer](G001%20-%20Hardware%20setup.md#you-just-need-a-capable-enough-computer)
+- [The reference hardware setup](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup)
+- [References](G001%20-%20Hardware%20setup.md#references)
+
+### [**G002** - Proxmox VE installation](G002%20-%20Proxmox%20VE%20installation.md)
+
+- [A procedure to install Proxmox VE in limited consumer hardware](G002%20-%20Proxmox%20VE%20installation.md#a-procedure-to-install-proxmox-ve-in-limited-consumer-hardware)
+- [System Requirements](G002%20-%20Proxmox%20VE%20installation.md#system-requirements)
+- [Installation procedure](G002%20-%20Proxmox%20VE%20installation.md#installation-procedure)
+- [After the installation](G002%20-%20Proxmox%20VE%20installation.md#after-the-installation)
+- [Connecting remotely](G002%20-%20Proxmox%20VE%20installation.md#connecting-remotely)
+- [References](G002%20-%20Proxmox%20VE%20installation.md#references)
+
+### [**G003** - Host configuration 01 ~ Apt sources, updates and extra tools](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md)
+
+- [Proxmox VE 9.0 runs on Debian 13 "trixie"](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#proxmox-ve-90-runs-on-debian-13-trixie)
+- [Editing the apt repository sources](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#editing-the-apt-repository-sources)
+- [Update your system](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#update-your-system)
+- [Installing useful extra tools](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#installing-useful-extra-tools)
+- [Relevant system paths](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#relevant-system-paths)
+- [References](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources,%20updates%20and%20extra%20tools.md#references)
+
+### [**G004** - Host configuration 02 ~ UPS management with NUT](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md)
+
+- [Any server must be always connected to an UPS unit](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#any-server-must-be-always-connected-to-an-ups-unit)
+- [Connecting your UPS with your PVE node using NUT](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#connecting-your-ups-with-your-pve-node-using-nut)
+- [Checking the NUT logs](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#checking-the-nut-logs)
+- [Executing instant commands on your UPS unit](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#executing-instant-commands-on-your-ups-unit)
+- [Other possibilities with NUT](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#other-possibilities-with-nut)
+- [Relevant system paths](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#relevant-system-paths)
+- [References](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#references)
+
+### [**G005** - Host configuration 03 ~ LVM storage](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md)
+
+- [Your Proxmox VE server's storage needs to be reorganized](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#your-proxmox-ve-servers-storage-needs-to-be-reorganized)
+- [Initial filesystem configuration (**PVE web console**)](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#initial-filesystem-configuration-pve-web-console)
+- [Initial filesystem configuration (**shell as root**)](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#initial-filesystem-configuration-shell-as-root)
+- [Configuring the unused storage drives](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#configuring-the-unused-storage-drives)
+- [LVM rearrangement in the main storage drive](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#lvm-rearrangement-in-the-main-storage-drive)
+- [References](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md#references)
+
+### [**G006** - Host configuration 04 ~ Removing Proxmox's subscription warning](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md)
+
+- [About the Proxmox subscription warning](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#about-the-proxmox-subscription-warning)
+- [Removing the subscription warning](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#removing-the-subscription-warning)
+- [Reverting the changes](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#reverting-the-changes)
+- [Change executed in just one command line](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#change-executed-in-just-one-command-line)
+- [Final note](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#final-note)
+- [Relevant system paths](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#relevant-system-paths)
+- [References](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md#references)
+
+### [**G007** - Host hardening 01 ~ TFA authentication](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md)
+
+- [Enable Two Factor Authentication in your PVE system](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enable-two-factor-authentication-in-your-pve-system)
+- [Enabling TFA for SSH access](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enabling-tfa-for-ssh-access)
+- [Enforcing TFA TOTP for accessing the Proxmox VE web console](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enforcing-tfa-totp-for-accessing-the-proxmox-ve-web-console)
+- [Enforcing TFA TOTP as a default requirement for `pam` realm](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#enforcing-tfa-totp-as-a-default-requirement-for-pam-realm)
+- [Incompatibility of PVE web console login with TFA enforced local shell access](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#incompatibility-of-pve-web-console-login-with-tfa-enforced-local-shell-access)
+- [Relevant system paths](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#relevant-system-paths)
+- [References](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md#references)
+
+### [**G008** - Host hardening 02 ~ Alternative administrator user](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md)
+
+- [Avoid using the root user](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#avoid-using-the-root-user)
+- [Understanding the Proxmox VE user management and the realms](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#understanding-the-proxmox-ve-user-management-and-the-realms)
+- [Creating a new system administrator user for a Proxmox VE node](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#creating-a-new-system-administrator-user-for-a-proxmox-ve-node)
+- [Relevant system paths](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#relevant-system-paths)
+- [References](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md#references)
+
+### [**G009** - Host hardening 03 ~ SSH key pairs and sshd service configuration](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md)
+
+- [Harden your SSH connections with key pairs](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#harden-your-ssh-connections-with-key-pairs)
+- [Generating SSH key pairs](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#generating-ssh-key-pairs)
+- [Hardening the `sshd` service](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#hardening-the-sshd-service)
+- [Relevant system paths](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#relevant-system-paths)
+- [References](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md#references)
+
+### [**G010** - Host hardening 04 ~ Enabling Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md)
+
+- [Harden your setup against intrusions with Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#harden-your-setup-against-intrusions-with-fail2ban)
+- [Installing Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#installing-fail2ban)
+- [Configuring Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#configuring-fail2ban)
+- [Considerations regarding Fail2Ban](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#considerations-regarding-fail2ban)
+- [Relevant system paths](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#relevant-system-paths)
+- [References](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#references)
+
+### [**G011** - Host hardening 05 ~ Proxmox VE services](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md)
+
+- [Reduce your Proxmox VE server's exposed surface](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#reduce-your-proxmox-ve-servers-exposed-surface)
+- [Checking currently running services](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#checking-currently-running-services)
+- [Configuring the `pveproxy` service](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#configuring-the-pveproxy-service)
+- [Disabling RPC services](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-rpc-services)
+- [Disabling `zfs` and `ceph`](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-zfs-and-ceph)
+- [Disabling the SPICE proxy](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-the-spice-proxy)
+- [Disabling cluster and high availability related services](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#disabling-cluster-and-high-availability-related-services)
+- [Considerations](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#considerations)
+- [Relevant system paths](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#relevant-system-paths)
+- [References](G011%20-%20Host%20hardening%2005%20~%20Proxmox%20VE%20services.md#references)
+
+### [**G012** - Host hardening 06 ~ Network hardening with sysctl](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md)
+
+- [Harden your PVE's networking with a `sysctl` configuration](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#harden-your-pves-networking-with-a-sysctl-configuration)
+- [About `sysctl`](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#about-sysctl)
+- [TCP/IP stack hardening with `sysctl`](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#tcpip-stack-hardening-with-sysctl)
+- [Relevant system paths](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#relevant-system-paths)
+- [References](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#references)
+
+### [**G013** - Host hardening 07 ~ Mitigating CPU vulnerabilities](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md)
+
+- [CPUs also have security vulnerabilities](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#cpus-also-have-security-vulnerabilities)
+- [Discovering your CPU's vulnerabilities](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#discovering-your-cpus-vulnerabilities)
+- [Your Proxmox VE system will already have the correct microcode package applied](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#your-proxmox-ve-system-will-already-have-the-correct-microcode-package-applied)
+- [Relevant system paths](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#relevant-system-paths)
+- [References](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md#references)
+
+### [**G014** - Host hardening 08 ~ Firewalling](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md)
+
+- [[Enabling your PVE's firewall is a must](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#enabling-your-pves-firewall-is-a-must)]
+- [Proxmox VE firewall uses iptables](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#proxmox-ve-firewall-uses-iptables)
+- [Zones in the Proxmox VE firewall](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#zones-in-the-proxmox-ve-firewall)
+- [Situation at this point](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#situation-at-this-point)
+- [Enabling the firewall at the Datacenter tier](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#enabling-the-firewall-at-the-datacenter-tier)
+- [Firewalling with ebtables](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewalling-with-ebtables)
+- [Firewall fine tuning](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewall-fine-tuning)
+- [Firewall logging](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#firewall-logging)
+- [Connection tracking tool](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#connection-tracking-tool)
+- [Relevant system paths](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#relevant-system-paths)
+- [References](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md#references)
+
+### [**G015** - Host optimization 01 ~ Adjustments through sysctl](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md)
+
+- [Tune your Proxmox VE system's `sysctl` files to improve performance](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#tune-your-proxmox-ve-systems-sysctl-files-to-improve-performance)
+- [First go the `sysctl` directory](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#first-go-the-sysctl-directory)
+- [Network optimizations](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#network-optimizations)
+- [Memory optimizations](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#memory-optimizations)
+- [Kernel optimizations](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#kernel-optimizations)
+- [Reboot the system](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#reboot-the-system)
+- [Final considerations](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#final-considerations)
+- [Relevant system paths](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#relevant-system-paths)
+- [References](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md#references)
+
+### [**G016** - Host optimization 02 ~ Disabling the transparent hugepages](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md)
+
+- [Understanding the transparent hugepages](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md#understanding-the-transparent-hugepages)
+- [Status of the transparent hugepages in your host](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md#status-of-the-transparent-hugepages-in-your-host)
+- [Disabling the transparent hugepages](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md#disabling-the-transparent-hugepages)
+- [Relevant system paths](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md#relevant-system-paths)
+- [References](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md#references)
+
+### [**G017** - Virtual Networking ~ Network configuration](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md)
+
+- [Preparing your virtual network for Kubernetes](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#preparing-your-virtual-network-for-kubernetes)
+- [Current virtual network setup](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#current-virtual-network-setup)
+- [Target network scenario](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#target-network-scenario)
+- [Creating an isolated Linux bridge](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#creating-an-isolated-linux-bridge)
+- [Bridges management](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#bridges-management)
+- [Relevant system paths](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#relevant-system-paths)
+- [References](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md#references)
+
+### [**G018** - K3s cluster setup 01 ~ Requirements and arrangement](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md)
+
+- [Gearing up for your K3s cluster](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#gearing-up-for-your-k3s-cluster)
+- [Requirements for the K3s cluster and the services to deploy in it](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#requirements-for-the-k3s-cluster-and-the-services-to-deploy-in-it)
+- [Arrangement of VMs and services](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#arrangement-of-vms-and-services)
+- [References](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#references)
+
+### [**G019** - K3s cluster setup 02 ~ Storage setup](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md)
+
+- [Identifying your storage needs and current setup](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#identifying-your-storage-needs-and-current-setup)
+- [Storage organization model](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#storage-organization-model)
+- [Creating the logical volumes (LVs)](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#creating-the-logical-volumes-lvs)
+- [Enabling the LVs for Proxmox VE](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#enabling-the-lvs-for-proxmox-ve)
+- [Configuration file](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#configuration-file)
+- [Relevant system paths](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#relevant-system-paths)
+- [References](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#references)
+
+### [**G020** - K3s cluster setup 03 ~ Debian VM creation](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md)
+
+- [You can start creating VMs in your Proxmox VE server](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#you-can-start-creating-vms-in-your-proxmox-ve-server)
+- [Preparing the Debian ISO image](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#preparing-the-debian-iso-image)
+- [Building a Debian virtual machine](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#building-a-debian-virtual-machine)
+- [Note about the VM's `Boot Order` option](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#note-about-the-vms-boot-order-option)
+- [Relevant system paths](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#relevant-system-paths)
+- [References](G020%20-%20K3s%20cluster%20setup%2003%20~%20Debian%20VM%20creation.md#references)
+
+### [**G021** - K3s cluster setup 04 ~ Debian VM configuration](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md)
+
+- [You have to configure your new Debian VM](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#you-have-to-configure-your-new-debian-vm)
+- [Suggestion about the IP organization within your LAN](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#suggestion-about-the-ip-organization-within-your-lan)
+- [Adding the `apt` sources for _non-free_ packages](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#adding-the-apt-sources-for-non-free-packages)
+- [Installing extra packages](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#installing-extra-packages)
+- [The QEMU guest agent comes enabled in Debian](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#the-qemu-guest-agent-comes-enabled-in-debian)
+- [Hardening the VM's access](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#hardening-the-vms-access)
+- [Hardening the `sshd` service](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#hardening-the-sshd-service)
+- [Configuring Fail2Ban for SSH connections](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#configuring-fail2ban-for-ssh-connections)
+- [Disabling the `root` user login](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#disabling-the-root-user-login)
+- [Configuring the VM with `sysctl`](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#configuring-the-vm-with-sysctl)
+- [Reboot the VM](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#reboot-the-vm)
+- [Disabling transparent hugepages on the VM](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#disabling-transparent-hugepages-on-the-vm)
+- [Regarding the microcode `apt` packages for CPU vulnerabilities](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#regarding-the-microcode-apt-packages-for-cpu-vulnerabilities)
+- [Relevant system paths](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#relevant-system-paths)
+- [References](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#references)
+
+### [**G022** - K3s cluster setup 05 ~ Connecting the VM to the NUT server](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md)
+
+- [Make your VMs aware of your UPS unit with NUT](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#make-your-vms-aware-of-your-ups-unit-with-nut)
+- [Reconfiguring the NUT server on your Proxmox VE host](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#reconfiguring-the-nut-server-on-your-proxmox-ve-host)
+- [Configuring the NUT client on your Debian VM](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#configuring-the-nut-client-on-your-debian-vm)
+- [Checking the connection between the VM NUT client and the PVE node NUT server](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#checking-the-connection-between-the-vm-nut-client-and-the-pve-node-nut-server)
+- [Testing a Forced ShutDown sequence (`FSD`) with NUT](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#testing-a-forced-shutdown-sequence-fsd-with-nut)
+- [Relevant system paths](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#relevant-system-paths)
+- [References](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#references)
+
+### [**G023** - K3s cluster setup 06 ~ Debian VM template and backup](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md)
+
+- [Turn your Debian VM into a VM template](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turn-your-debian-vm-into-a-vm-template)
+- [Steps for transforming your Debian VM into a VM template](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#steps-for-transforming-your-debian-vm-into-a-vm-template)
+- [VM template's backup](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#vm-templates-backup)
+- [Other considerations regarding VM templates](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#other-considerations-regarding-vm-templates)
+- [References](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#references)
+
+### [**G024** - K3s cluster setup 07 ~ K3s node VM template setup](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md)
+
+- [You need a more specialized VM template for building K3s nodes](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#you-need-a-more-specialized-vm-template-for-building-k3s-nodes)
+- [Reasons for a new VM template](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#reasons-for-a-new-vm-template)
+- [Creating a new VM based on the Debian VM template](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#creating-a-new-vm-based-on-the-debian-vm-template)
+- [Setting an static IP for the main network device (`net0`)](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-an-static-ip-for-the-main-network-device-net0)
+- [Setting a proper hostname string](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-a-proper-hostname-string)
+- [Disabling the swap volume](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#disabling-the-swap-volume)
+- [Changing the VG's name](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#changing-the-vgs-name)
+- [Setting up the second network card](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-the-second-network-card)
+- [Setting up sysctl kernel parameters for K3s nodes](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-sysctl-kernel-parameters-for-k3s-nodes)
+- [Turning the VM into a VM template](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#turning-the-vm-into-a-vm-template)
+- [Relevant system paths](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#relevant-system-paths)
+- [References](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#references)
+
+### [**G025** - K3s cluster setup 08 ~ K3s Kubernetes cluster setup](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md)
+
+- [Build your virtualized K3s cluster](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#build-your-virtualized-k3s-cluster)
+- [Criteria for the VMs' IPs and hostnames](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#criteria-for-the-vms-ips-and-hostnames)
+- [Creation of VMs based on the K3s node VM template](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#creation-of-vms-based-on-the-k3s-node-vm-template)
+- [Preparing the VMs for K3s](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#preparing-the-vms-for-k3s)
+- [Firewall setup for the K3s cluster](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#firewall-setup-for-the-k3s-cluster)
+- [Considerations before installing the K3s software](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#considerations-before-installing-the-k3s-software)
+- [K3s Server node setup](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-server-node-setup)
+- [K3s Agent nodes setup](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-agent-nodes-setup)
+- [Enabling bash autocompletion for `kubectl`](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#enabling-bash-autocompletion-for-kubectl)
+- [Regular K3s logs are journaled](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#regular-k3s-logs-are-journaled)
+- [Rotating the `containerd.log` file](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#rotating-the-containerdlog-file)
+- [K3s relevant paths](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-relevant-paths)
+- [Starting up and shutting down the K3s cluster nodes](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#starting-up-and-shutting-down-the-k3s-cluster-nodes)
+- [Relevant system paths](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#relevant-system-paths)
+- [References](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#references)
+
+### [**G026** - K3s cluster setup 09 ~ Setting up a kubectl client for remote access](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md)
+
+- [Never handle your Kubernetes cluster directly from the server nodes](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#never-handle-your-kubernetes-cluster-directly-from-server-nodes)
+- [Description of the `kubectl` client system](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#description-of-the-kubectl-client-system)
+- [Getting the right version of `kubectl`](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#getting-the-right-version-of-kubectl)
+- [Installing `kubectl` on your client system](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#installing-kubectl-on-your-client-system)
+- [Getting the configuration for accessing the K3s cluster](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#getting-the-configuration-for-accessing-the-k3s-cluster)
+- [Opening the `6443` port in the K3s server node](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#opening-the-6443-port-in-the-k3s-server-node)
+- [Enabling bash autocompletion for `kubectl`](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#enabling-bash-autocompletion-for-kubectl)
+- [Validate Kubernetes configuration files with `kubeconform`](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#validate-kubernetes-configuration-files-with-kubeconform)
+- [Relevant system paths](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#relevant-system-paths)
+- [References](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md#references)
+
+### [**G027** - K3s cluster setup 10 ~ Deploying the MetalLB load balancer](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md)
+
+- [Considerations before deploying MetalLB](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#considerations-before-deploying-metallb)
+- [Choosing the IP ranges for MetalLB](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#choosing-the-ip-ranges-for-metallb)
+- [Deploying MetalLB on your K3s cluster](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#deploying-metallb-on-your-k3s-cluster)
+- [MetalLB's Kustomize project attached to this guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#metallbs-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#relevant-system-paths)
+- [References](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#references)
+
+### [**G028** - K3s cluster setup 11 ~ Deploying the metrics-server service](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md)
+
+- [Deploy a metric-server service that you can fully configure](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#deploy-a-metric-server-service-that-you-can-fully-configure)
+- [Checking the metrics-server's manifest](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#checking-the-metrics-servers-manifest)
+- [Deployment of metrics-server](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#deployment-of-metrics-server)
+- [Checking the metrics-server service](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#checking-the-metrics-server-service)
+- [Metrics-server's Kustomize project attached to this guide](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#metrics-servers-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#relevant-system-paths)
+- [References](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#references)
+
+### [**G029** - K3s cluster setup 12 ~ Setting up cert-manager and self-signed CA](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md)
+
+- [Use cert-manager to handle certificates in your cluster](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#use-cert-manager-to-handle-certificates-in-your-cluster)
+- [Deploying cert-manager](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#deploying-cert-manager)
+- [Setting up a self-signed CA for your cluster](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#setting-up-a-self-signed-ca-for-your-cluster)
+- [Checking your certificates with the cert-manager command line tool](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#checking-your-certificates-with-the-cert-manager-command-line-tool)
+- [Cert-manager's Kustomize project attached to this guide](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#cert-managers-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#relevant-system-paths)
+- [References](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md#references)
+
+### [**G030** - K3s cluster setup 14 ~ Enabling the Traefik dashboard](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md)
+
+- [Traefik is the embedded ingress controller of K3s](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#traefik-is-the-embedded-ingress-controller-of-k3s)
+- [Enabling access to the Traefik dashboard](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#enabling-access-to-the-traefik-dashboard)
+- [Getting into the Traefik dashboard](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#getting-into-the-traefik-dashboard)
+- [What to do if Traefik's dashboard has bad performance](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#what-to-do-if-traefiks-dashboard-has-bad-performance)
+- [Traefik dashboard's Kustomize project attached to this guide series](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#traefik-dashboards-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#relevant-system-paths)
+- [References](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#references)
+
+### [**G031** - K3s cluster setup 13 ~ Deploying the Headlamp dashboard](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md)
+
+- [Headlamp is an alternative to the Kubernetes Dashboard](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#headlamp-is-an-alternative-to-the-kubernetes-dashboard)
+- [Deploying Headlamp](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#deploying-headlamp)
+- [Testing Headlamp](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#testing-headlamp)
+- [Headlamp's Kustomize project attached to this guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#headlamps-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#relevant-system-paths)
+- [References](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md#references)
+
+### [**G032** - Deploying services 01 ~ Considerations](G032%20-%20Deploying%20services%2001%20~%20Considerations.md)
+
+- [Upcoming chapters are about deploying services in your K3s cluster](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#upcoming-chapters-are-about-deploying-services-in-your-k3s-cluster)
+- [Be watchful of your system's resources usage](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#be-watchful-of-your-systems-resources-usage)
+- [Do not fill your cluster up to the brim](G032%20-%20Deploying%20services%2001%20~%20Considerations.md#do-not-fill-your-cluster-up-to-the-brim)
### [**G033** - Deploying services 02 ~ **Nextcloud - Part 1** - Outlining setup, arranging storage and choosing service IPs](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#g033-deploying-services-02-nextcloud-part-1-outlining-setup-arranging-storage-and-choosing-service-ips)
-- [_Outlining Nextcloud's setup_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#outlining-nextclouds-setup)
-- [_Setting up new storage drives in the K3s agent_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#setting-up-new-storage-drives-in-the-k3s-agent)
-- [_Choosing static cluster IPs for Nextcloud related services_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#choosing-static-cluster-ips-for-nextcloud-related-services)
-- [_Relevant system paths_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#relevant-system-paths)
+- [Outlining Nextcloud's setup](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#outlining-nextclouds-setup)
+- [Setting up new storage drives in the K3s agent](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#setting-up-new-storage-drives-in-the-k3s-agent)
+- [Choosing static cluster IPs for Nextcloud related services](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#choosing-static-cluster-ips-for-nextcloud-related-services)
+- [Relevant system paths](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md#relevant-system-paths)
### [**G033** - Deploying services 02 ~ **Nextcloud - Part 2** - Redis cache server](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#g033-deploying-services-02-nextcloud-part-2-redis-cache-server)
-- [_Kustomize project folders for Nextcloud and Redis_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#kustomize-project-folders-for-nextcloud-and-redis)
-- [_Redis configuration file_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-configuration-file)
-- [_Redis password_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-password)
-- [_Redis Deployment resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-deployment-resource)
-- [_Redis Service resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-service-resource)
-- [_Redis Kustomize project_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-kustomize-project)
-- [_Don't deploy this Redis project on its own_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#dont-deploy-this-redis-project-on-its-own)
-- [_Relevant system paths_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#relevant-system-paths)
-- [_References_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#references)
+- [Kustomize project folders for Nextcloud and Redis](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#kustomize-project-folders-for-nextcloud-and-redis)
+- [Redis configuration file](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-configuration-file)
+- [Redis password](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-password)
+- [Redis Deployment resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-deployment-resource)
+- [Redis Service resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-service-resource)
+- [Redis Kustomize project](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-kustomize-project)
+- [Don't deploy this Redis project on its own](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#dont-deploy-this-redis-project-on-its-own)
+- [Relevant system paths](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#relevant-system-paths)
+- [References](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%202%20-%20Redis%20cache%20server.md#references)
### [**G033** - Deploying services 02 ~ **Nextcloud - Part 3** - MariaDB database server](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#g033-deploying-services-02-nextcloud-part-3-mariadb-database-server)
-- [_MariaDB Kustomize project's folders_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-kustomize-projects-folders)
-- [_MariaDB configuration files_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-configuration-files)
-- [_MariaDB passwords_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-passwords)
-- [_MariaDB storage_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-storage)
-- [_MariaDB StatefulSet resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-statefulset-resource)
-- [_MariaDB Service resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-service-resource)
-- [_MariaDB Kustomize project_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-kustomize-project)
-- [_Don't deploy this MariaDB project on its own_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#dont-deploy-this-mariadb-project-on-its-own)
-- [_Relevant system paths_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#relevant-system-paths)
-- [_References_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#references)
+- [MariaDB Kustomize project's folders](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-kustomize-projects-folders)
+- [MariaDB configuration files](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-configuration-files)
+- [MariaDB passwords](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-passwords)
+- [MariaDB storage](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-storage)
+- [MariaDB StatefulSet resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-statefulset-resource)
+- [MariaDB Service resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-service-resource)
+- [MariaDB Kustomize project](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#mariadb-kustomize-project)
+- [Don't deploy this MariaDB project on its own](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#dont-deploy-this-mariadb-project-on-its-own)
+- [Relevant system paths](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#relevant-system-paths)
+- [References](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md#references)
### [**G033** - Deploying services 02 ~ **Nextcloud - Part 4** - Nextcloud server](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#g033-deploying-services-02-nextcloud-part-4-nextcloud-server)
-- [_Considerations about the Nextcloud server_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#considerations-about-the-nextcloud-server)
-- [_Nextcloud server Kustomize project's folders_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-kustomize-projects-folders)
-- [_Nextcloud server configuration files_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-configuration-files)
-- [_Nextcloud server password_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-password)
-- [_Nextcloud server storage_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-storage)
-- [_Nextcloud server Stateful resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-stateful-resource)
-- [_Nextcloud server Service resource_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-service-resource)
-- [_Nextcloud server Kustomize project_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-kustomize-project)
-- [_Don't deploy this Nextcloud server project on its own_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#dont-deploy-this-nextcloud-server-project-on-its-own)
-- [_Background jobs on Nextcloud_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#background-jobs-on-nextcloud)
-- [_Relevant system paths_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#relevant-system-paths)
-- [_References_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#references)
+- [Considerations about the Nextcloud server](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#considerations-about-the-nextcloud-server)
+- [Nextcloud server Kustomize project's folders](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-kustomize-projects-folders)
+- [Nextcloud server configuration files](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-configuration-files)
+- [Nextcloud server password](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-password)
+- [Nextcloud server storage](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-storage)
+- [Nextcloud server Stateful resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-stateful-resource)
+- [Nextcloud server Service resource](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-service-resource)
+- [Nextcloud server Kustomize project](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#nextcloud-server-kustomize-project)
+- [Don't deploy this Nextcloud server project on its own](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#dont-deploy-this-nextcloud-server-project-on-its-own)
+- [Background jobs on Nextcloud](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#background-jobs-on-nextcloud)
+- [Relevant system paths](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#relevant-system-paths)
+- [References](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%204%20-%20Nextcloud%20server.md#references)
### [**G033** - Deploying services 02 ~ **Nextcloud - Part 5** - Complete Nextcloud platform](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#g033-deploying-services-02-nextcloud-part-5-complete-nextcloud-platform)
-- [_Preparing pending Nextcloud platform elements_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#preparing-pending-nextcloud-platform-elements)
-- [_Kustomize project for Nextcloud platform_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#kustomize-project-for-nextcloud-platform)
-- [_Logging and checking the background jobs configuration on your Nextcloud platform_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#logging-and-checking-the-background-jobs-configuration-on-your-nextcloud-platform)
-- [_Security considerations in Nextcloud_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#security-considerations-in-nextcloud)
-- [_Nextcloud platform's Kustomize project attached to this guide series_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#nextcloud-platforms-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#relevant-system-paths)
-- [_References_](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#references)
+- [Preparing pending Nextcloud platform elements](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#preparing-pending-nextcloud-platform-elements)
+- [Kustomize project for Nextcloud platform](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#kustomize-project-for-nextcloud-platform)
+- [Logging and checking the background jobs configuration on your Nextcloud platform](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#logging-and-checking-the-background-jobs-configuration-on-your-nextcloud-platform)
+- [Security considerations in Nextcloud](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#security-considerations-in-nextcloud)
+- [Nextcloud platform's Kustomize project attached to this guide series](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#nextcloud-platforms-kustomize-project-attached-to-this-guide-series)
+- [Relevant system paths](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#relevant-system-paths)
+- [References](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%205%20-%20Complete%20Nextcloud%20platform.md#references)
### [**G034** - Deploying services 03 ~ **Gitea - Part 1** - Outlining setup and arranging storage](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#g034-deploying-services-03-gitea-part-1-outlining-setup-and-arranging-storage)
-- [_Outlining Gitea's setup_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#outlining-giteas-setup)
-- [_Setting up new storage drives in the K3s agent_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#setting-up-new-storage-drives-in-the-k3s-agent)
-- [_FQDNs for Gitea related services_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#fqdns-for-gitea-related-services)
-- [_Relevant system paths_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#relevant-system-paths)
-- [_References_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#references)
+- [Outlining Gitea's setup](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#outlining-giteas-setup)
+- [Setting up new storage drives in the K3s agent](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#setting-up-new-storage-drives-in-the-k3s-agent)
+- [FQDNs for Gitea related services](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#fqdns-for-gitea-related-services)
+- [Relevant system paths](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#relevant-system-paths)
+- [References](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#references)
### [**G034** - Deploying services 03 ~ **Gitea - Part 2** - Redis cache server](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#g034-deploying-services-03-gitea-part-2-redis-cache-server)
-- [_Kustomize project folders for Gitea and Redis_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#kustomize-project-folders-for-gitea-and-redis)
-- [_Redis configuration file_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-configuration-file)
-- [_Redis password_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-password)
-- [_Redis Deployment resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-deployment-resource)
-- [_Redis Service resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-service-resource)
-- [_Redis Kustomize project_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-kustomize-project)
-- [_Don't deploy this Redis project on its own_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#dont-deploy-this-redis-project-on-its-own)
-- [_Relevant system paths_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#relevant-system-paths)
-- [_References_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#references)
+- [Kustomize project folders for Gitea and Redis](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#kustomize-project-folders-for-gitea-and-redis)
+- [Redis configuration file](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-configuration-file)
+- [Redis password](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-password)
+- [Redis Deployment resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-deployment-resource)
+- [Redis Service resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-service-resource)
+- [Redis Kustomize project](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#redis-kustomize-project)
+- [Don't deploy this Redis project on its own](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#dont-deploy-this-redis-project-on-its-own)
+- [Relevant system paths](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#relevant-system-paths)
+- [References](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%202%20-%20Redis%20cache%20server.md#references)
### [**G034** - Deploying services 03 ~ **Gitea - Part 3** - PostgreSQL database server](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#g034-deploying-services-03-gitea-part-3-postgresql-database-server)
-- [_PostgreSQL Kustomize project's folders_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-kustomize-projects-folders)
-- [_PostgreSQL configuration files_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-configuration-files)
-- [_PostgreSQL passwords_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-passwords)
-- [_PostgreSQL storage_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-storage)
-- [_PostgreSQL StatefulSet resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-statefulset-resource)
-- [_PostgreSQL Service resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-service-resource)
-- [_PostgreSQL Kustomize project_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-kustomize-project)
-- [_Don't deploy this PostgreSQL project on its own_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#dont-deploy-this-postgresql-project-on-its-own)
-- [_Relevant system paths_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#relevant-system-paths)
-- [_References_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#references)
+- [PostgreSQL Kustomize project's folders](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-kustomize-projects-folders)
+- [PostgreSQL configuration files](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-configuration-files)
+- [PostgreSQL passwords](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-passwords)
+- [PostgreSQL storage](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-storage)
+- [PostgreSQL StatefulSet resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-statefulset-resource)
+- [PostgreSQL Service resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-service-resource)
+- [PostgreSQL Kustomize project](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#postgresql-kustomize-project)
+- [Don't deploy this PostgreSQL project on its own](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#dont-deploy-this-postgresql-project-on-its-own)
+- [Relevant system paths](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#relevant-system-paths)
+- [References](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%203%20-%20PostgreSQL%20database%20server.md#references)
### [**G034** - Deploying services 03 ~ **Gitea - Part 4** - Gitea server](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#g034-deploying-services-03-gitea-part-4-gitea-server)
-- [_Considerations about the Gitea server_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#considerations-about-the-gitea-server)
-- [_Gitea server Kustomize project's folders_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-kustomize-projects-folders)
-- [_Gitea server configuration file_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-configuration-file)
-- [_Gitea server storage_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-storage)
-- [_Gitea server Stateful resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-stateful-resource)
-- [_Gitea server Service resource_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-service-resource)
-- [_Gitea server's Kustomize project_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-servers-kustomize-project)
-- [_Don't deploy this Gitea server project on its own_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#dont-deploy-this-gitea-server-project-on-its-own)
-- [_Relevant system paths_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#relevant-system-paths)
-- [_References_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#references)
+- [Considerations about the Gitea server](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#considerations-about-the-gitea-server)
+- [Gitea server Kustomize project's folders](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-kustomize-projects-folders)
+- [Gitea server configuration file](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-configuration-file)
+- [Gitea server storage](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-storage)
+- [Gitea server Stateful resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-stateful-resource)
+- [Gitea server Service resource](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-server-service-resource)
+- [Gitea server's Kustomize project](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#gitea-servers-kustomize-project)
+- [Don't deploy this Gitea server project on its own](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#dont-deploy-this-gitea-server-project-on-its-own)
+- [Relevant system paths](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#relevant-system-paths)
+- [References](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%204%20-%20Gitea%20server.md#references)
### [**G034** - Deploying services 03 ~ **Gitea - Part 5** - Complete Gitea platform](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#g034-deploying-services-03-gitea-part-5-complete-gitea-platform)
-- [_Declaring the pending Gitea platform elements_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#declaring-the-pending-gitea-platform-elements)
-- [_Kustomize project for Gitea platform_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#kustomize-project-for-gitea-platform)
-- [_Finishing Gitea platform's setup_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#finishing-gitea-platforms-setup)
-- [_Security considerations in Gitea_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#security-considerations-in-gitea)
-- [_Gitea platform's Kustomize project attached to this guide series_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#gitea-platforms-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#relevant-system-paths)
-- [_References_](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#references)
+- [Declaring the pending Gitea platform elements](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#declaring-the-pending-gitea-platform-elements)
+- [Kustomize project for Gitea platform](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#kustomize-project-for-gitea-platform)
+- [Finishing Gitea platform's setup](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#finishing-gitea-platforms-setup)
+- [Security considerations in Gitea](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#security-considerations-in-gitea)
+- [Gitea platform's Kustomize project attached to this guide series](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#gitea-platforms-kustomize-project-attached-to-this-guide-series)
+- [Relevant system paths](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#relevant-system-paths)
+- [References](G034%20-%20Deploying%20services%2003%20~%20Gitea%20-%20Part%205%20-%20Complete%20Gitea%20platform.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 1** - Outlining setup and arranging storage](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#g035-deploying-services-04-monitoring-stack-part-1-outlining-setup-and-arranging-storage)
-- [_Outlining your monitoring stack setup_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#outlining-your-monitoring-stack-setup)
-- [_Setting up new storage drives in the K3s agents_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#setting-up-new-storage-drives-in-the-k3s-agents)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#references)
+- [Outlining your monitoring stack setup](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#outlining-your-monitoring-stack-setup)
+- [Setting up new storage drives in the K3s agents](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#setting-up-new-storage-drives-in-the-k3s-agents)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%201%20-%20Outlining%20setup%20and%20arranging%20storage.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 2** - Kube State Metrics service](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#g035-deploying-services-04-monitoring-stack-part-2-kube-state-metrics-service)
-- [_Kustomize project folders for your monitoring stack and Kube State Metrics_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kustomize-project-folders-for-your-monitoring-stack-and-kube-state-metrics)
-- [_Kube State Metrics ServiceAccount resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-serviceaccount-resource)
-- [_Kube State Metrics ClusterRole resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-clusterrole-resource)
-- [_Kube State Metrics ClusterRoleBinding resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-clusterrolebinding-resource)
-- [_Kube State Metrics Deployment resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-deployment-resource)
-- [_Kube State Metrics Service resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-service-resource)
-- [_Kube State Metrics Kustomize project_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-kustomize-project)
-- [_Don't deploy this Kube State Metrics project on its own_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#dont-deploy-this-kube-state-metrics-project-on-its-own)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#references)
+- [Kustomize project folders for your monitoring stack and Kube State Metrics](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kustomize-project-folders-for-your-monitoring-stack-and-kube-state-metrics)
+- [Kube State Metrics ServiceAccount resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-serviceaccount-resource)
+- [Kube State Metrics ClusterRole resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-clusterrole-resource)
+- [Kube State Metrics ClusterRoleBinding resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-clusterrolebinding-resource)
+- [Kube State Metrics Deployment resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-deployment-resource)
+- [Kube State Metrics Service resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-service-resource)
+- [Kube State Metrics Kustomize project](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#kube-state-metrics-kustomize-project)
+- [Don't deploy this Kube State Metrics project on its own](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#dont-deploy-this-kube-state-metrics-project-on-its-own)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%202%20-%20Kube%20State%20Metrics%20service.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 3** - Prometheus Node Exporter service](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#g035-deploying-services-04-monitoring-stack-part-3-prometheus-node-exporter-service)
-- [_Kustomize project folders for Prometheus Node Exporter_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#kustomize-project-folders-for-prometheus-node-exporter)
-- [_Prometheus Node Exporter DaemonSet resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-daemonset-resource)
-- [_Prometheus Node Exporter Service resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-service-resource)
-- [_Prometheus Node Exporter Kustomize project_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-kustomize-project)
-- [_Don't deploy this Prometheus Node Exporter project on its own_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#dont-deploy-this-prometheus-node-exporter-project-on-its-own)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#references)
+- [Kustomize project folders for Prometheus Node Exporter](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#kustomize-project-folders-for-prometheus-node-exporter)
+- [Prometheus Node Exporter DaemonSet resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-daemonset-resource)
+- [Prometheus Node Exporter Service resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-service-resource)
+- [Prometheus Node Exporter Kustomize project](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#prometheus-node-exporter-kustomize-project)
+- [Don't deploy this Prometheus Node Exporter project on its own](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#dont-deploy-this-prometheus-node-exporter-project-on-its-own)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%203%20-%20Prometheus%20Node%20Exporter%20service.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 4** - Prometheus server](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#g035-deploying-services-04-monitoring-stack-part-4-prometheus-server)
-- [_Kustomize project folders for Prometheus server_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#kustomize-project-folders-for-prometheus-server)
-- [_Prometheus configuration files_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-configuration-files)
-- [_Prometheus server storage_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-storage)
-- [_Prometheus server StatefulSet resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-statefulset-resource)
-- [_Prometheus server Service resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-service-resource)
-- [_Prometheus server Traefik IngressRoute resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-traefik-ingressroute-resource)
-- [_Prometheus server's Kustomize project_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-servers-kustomize-project)
-- [_Don't deploy this Prometheus server project on its own_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#dont-deploy-this-prometheus-server-project-on-its-own)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#references)
+- [Kustomize project folders for Prometheus server](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#kustomize-project-folders-for-prometheus-server)
+- [Prometheus configuration files](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-configuration-files)
+- [Prometheus server storage](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-storage)
+- [Prometheus server StatefulSet resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-statefulset-resource)
+- [Prometheus server Service resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-service-resource)
+- [Prometheus server Traefik IngressRoute resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-server-traefik-ingressroute-resource)
+- [Prometheus server's Kustomize project](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#prometheus-servers-kustomize-project)
+- [Don't deploy this Prometheus server project on its own](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#dont-deploy-this-prometheus-server-project-on-its-own)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%204%20-%20Prometheus%20server.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 5** - Grafana](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#g035-deploying-services-04-monitoring-stack-part-5-grafana)
-- [_Kustomize project folders for Grafana_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#kustomize-project-folders-for-grafana)
-- [_Grafana data storage_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-data-storage)
-- [_Grafana Stateful resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-stateful-resource)
-- [_Grafana Service resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-service-resource)
-- [_Grafana Traefik IngressRoute resource_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-traefik-ingressroute-resource)
-- [_Grafana Kustomize project_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-kustomize-project)
-- [_Don't deploy this Grafana project on its own_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#dont-deploy-this-grafana-project-on-its-own)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#references)
+- [Kustomize project folders for Grafana](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#kustomize-project-folders-for-grafana)
+- [Grafana data storage](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-data-storage)
+- [Grafana Stateful resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-stateful-resource)
+- [Grafana Service resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-service-resource)
+- [Grafana Traefik IngressRoute resource](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-traefik-ingressroute-resource)
+- [Grafana Kustomize project](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#grafana-kustomize-project)
+- [Don't deploy this Grafana project on its own](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#dont-deploy-this-grafana-project-on-its-own)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%205%20-%20Grafana.md#references)
### [**G035** - Deploying services 04 ~ **Monitoring stack - Part 6** - Complete monitoring stack setup](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#g035-deploying-services-04-monitoring-stack-part-6-complete-monitoring-stack-setup)
-- [_Declaring the remaining monitoring stack components_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#declaring-the-remaining-monitoring-stack-components)
-- [_Kustomize project for the monitoring setup_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#kustomize-project-for-the-monitoring-setup)
-- [_Checking Prometheus_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#checking-prometheus)
-- [_Finishing Grafana's setup_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#finishing-grafanas-setup)
-- [_Security concerns on Prometheus and Grafana_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#security-concerns-on-prometheus-and-grafana)
-- [_Monitoring stack's Kustomize project attached to this guide series_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#monitoring-stacks-kustomize-project-attached-to-this-guide-series)
-- [_Relevant system paths_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#relevant-system-paths)
-- [_References_](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#references)
+- [Declaring the remaining monitoring stack components](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#declaring-the-remaining-monitoring-stack-components)
+- [Kustomize project for the monitoring setup](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#kustomize-project-for-the-monitoring-setup)
+- [Checking Prometheus](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#checking-prometheus)
+- [Finishing Grafana's setup](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#finishing-grafanas-setup)
+- [Security concerns on Prometheus and Grafana](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#security-concerns-on-prometheus-and-grafana)
+- [Monitoring stack's Kustomize project attached to this guide series](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#monitoring-stacks-kustomize-project-attached-to-this-guide-series)
+- [Relevant system paths](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#relevant-system-paths)
+- [References](G035%20-%20Deploying%20services%2004%20~%20Monitoring%20stack%20-%20Part%206%20-%20Complete%20monitoring%20stack%20setup.md#references)
### [**G036** - Host and K3s cluster ~ Monitoring and diagnosis](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#g036-host-and-k3s-cluster-monitoring-and-diagnosis)
-- [_Monitoring resources usage_](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#monitoring-resources-usage)
-- [_Checking the logs_](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#checking-the-logs)
-- [_Shell access into your containers_](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#shell-access-into-your-containers)
-- [_References_](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#references)
+- [Monitoring resources usage](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#monitoring-resources-usage)
+- [Checking the logs](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#checking-the-logs)
+- [Shell access into your containers](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#shell-access-into-your-containers)
+- [References](G036%20-%20Host%20and%20K3s%20cluster%20~%20Monitoring%20and%20diagnosis.md#references)
### [**G037** - Backups 01 ~ Considerations](G037%20-%20Backups%2001%20~%20Considerations.md#g037-backups-01-considerations)
-- [_What to backup. Identifying your data concerns_](G037%20-%20Backups%2001%20~%20Considerations.md#what-to-backup-identifying-your-data-concerns)
-- [_How to backup. Backup tools_](G037%20-%20Backups%2001%20~%20Considerations.md#how-to-backup-backup-tools)
-- [_Where to store the backups. Backup storage_](G037%20-%20Backups%2001%20~%20Considerations.md#where-to-store-the-backups-backup-storage)
-- [_When to do the backups. Backup scheduling_](G037%20-%20Backups%2001%20~%20Considerations.md#when-to-do-the-backups-backup-scheduling)
-- [_References_](G037%20-%20Backups%2001%20~%20Considerations.md#references)
+- [What to backup. Identifying your data concerns](G037%20-%20Backups%2001%20~%20Considerations.md#what-to-backup-identifying-your-data-concerns)
+- [How to backup. Backup tools](G037%20-%20Backups%2001%20~%20Considerations.md#how-to-backup-backup-tools)
+- [Where to store the backups. Backup storage](G037%20-%20Backups%2001%20~%20Considerations.md#where-to-store-the-backups-backup-storage)
+- [When to do the backups. Backup scheduling](G037%20-%20Backups%2001%20~%20Considerations.md#when-to-do-the-backups-backup-scheduling)
+- [References](G037%20-%20Backups%2001%20~%20Considerations.md#references)
### [**G038** - Backups 02 ~ Host platform backup with Clonezilla](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#g038-backups-02-host-platform-backup-with-clonezilla)
-- [_What gets inside this backup_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#what-gets-inside-this-backup)
-- [_Why doing this backup_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#why-doing-this-backup)
-- [_How it affects the host platform_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-it-affects-the-host-platform)
-- [_When to do the backup_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#when-to-do-the-backup)
-- [_How to backup with Clonezilla_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-to-backup-with-clonezilla)
-- [_How to restore with Clonezilla_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-to-restore-with-clonezilla)
-- [_Final considerations_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#final-considerations)
-- [_References_](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#references)
+- [What gets inside this backup](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#what-gets-inside-this-backup)
+- [Why doing this backup](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#why-doing-this-backup)
+- [How it affects the host platform](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-it-affects-the-host-platform)
+- [When to do the backup](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#when-to-do-the-backup)
+- [How to backup with Clonezilla](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-to-backup-with-clonezilla)
+- [How to restore with Clonezilla](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#how-to-restore-with-clonezilla)
+- [Final considerations](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#final-considerations)
+- [References](G038%20-%20Backups%2002%20~%20Host%20platform%20backup%20with%20Clonezilla.md#references)
### [**G039** - Backups 03 ~ Proxmox VE backup job](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#g039-backups-03-proxmox-ve-backup-job)
-- [_What gets covered with the backup job_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#what-gets-covered-with-the-backup-job)
-- [_Why scheduling a backup job_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#why-scheduling-a-backup-job)
-- [_How it affects the K3s Kubernetes cluster_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#how-it-affects-the-k3s-kubernetes-cluster)
-- [_When to do the backup job_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#when-to-do-the-backup-job)
-- [_Scheduling the backup job in Proxmox VE_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#scheduling-the-backup-job-in-proxmox-ve)
-- [_Restoring a backup in Proxmox VE_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#restoring-a-backup-in-proxmox-ve)
-- [_Location of the backup files in the Proxmox VE system_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#location-of-the-backup-files-in-the-proxmox-ve-system)
-- [_Relevant system paths_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#relevant-system-paths)
-- [_References_](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#references)
+- [What gets covered with the backup job](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#what-gets-covered-with-the-backup-job)
+- [Why scheduling a backup job](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#why-scheduling-a-backup-job)
+- [How it affects the K3s Kubernetes cluster](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#how-it-affects-the-k3s-kubernetes-cluster)
+- [When to do the backup job](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#when-to-do-the-backup-job)
+- [Scheduling the backup job in Proxmox VE](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#scheduling-the-backup-job-in-proxmox-ve)
+- [Restoring a backup in Proxmox VE](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#restoring-a-backup-in-proxmox-ve)
+- [Location of the backup files in the Proxmox VE system](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#location-of-the-backup-files-in-the-proxmox-ve-system)
+- [Relevant system paths](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#relevant-system-paths)
+- [References](G039%20-%20Backups%2003%20~%20Proxmox%20VE%20backup%20job.md#references)
### [**G040** - Backups 04 ~ UrBackup 01 - Server setup](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#g040-backups-04-urbackup-01-server-setup)
-- [_Setting up a new VM for the UrBackup server_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#setting-up-a-new-vm-for-the-urbackup-server)
-- [_Deploying UrBackup_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#deploying-urbackup)
-- [_Firewall configuration on Proxmox VE_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#firewall-configuration-on-proxmox-ve)
-- [_Adjusting the UrBackup server configuration_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#adjusting-the-urbackup-server-configuration)
-- [_UrBackup server log file_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#urbackup-server-log-file)
-- [_About backing up the UrBackup server VM_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#about-backing-up-the-urbackup-server-vm)
-- [_Relevant system paths_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#relevant-system-paths)
-- [_References_](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#references)
+- [Setting up a new VM for the UrBackup server](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#setting-up-a-new-vm-for-the-urbackup-server)
+- [Deploying UrBackup](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#deploying-urbackup)
+- [Firewall configuration on Proxmox VE](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#firewall-configuration-on-proxmox-ve)
+- [Adjusting the UrBackup server configuration](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#adjusting-the-urbackup-server-configuration)
+- [UrBackup server log file](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#urbackup-server-log-file)
+- [About backing up the UrBackup server VM](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#about-backing-up-the-urbackup-server-vm)
+- [Relevant system paths](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#relevant-system-paths)
+- [References](G040%20-%20Backups%2004%20~%20UrBackup%2001%20-%20Server%20setup.md#references)
### [**G041** - Backups 05 ~ UrBackup 02 - Clients setup and configuring file backups](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#g041-backups-05-urbackup-02-clients-setup-and-configuring-file-backups)
-- [_Deploying the UrBackup client program_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#deploying-the-urbackup-client-program)
-- [_UrBackup client log file_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#urbackup-client-log-file)
-- [_UrBackup client uninstaller_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#urbackup-client-uninstaller)
-- [_Configuring file backup paths on a client_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#configuring-file-backup-paths-on-a-client)
-- [_Backups on the UrBackup server_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#backups-on-the-urbackup-server)
-- [_Restoration from file backups_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#restoration-from-file-backups)
-- [_Relevant system paths_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#relevant-system-paths)
-- [_References_](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#references)
+- [Deploying the UrBackup client program](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#deploying-the-urbackup-client-program)
+- [UrBackup client log file](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#urbackup-client-log-file)
+- [UrBackup client uninstaller](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#urbackup-client-uninstaller)
+- [Configuring file backup paths on a client](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#configuring-file-backup-paths-on-a-client)
+- [Backups on the UrBackup server](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#backups-on-the-urbackup-server)
+- [Restoration from file backups](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#restoration-from-file-backups)
+- [Relevant system paths](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#relevant-system-paths)
+- [References](G041%20-%20Backups%2005%20~%20UrBackup%2002%20-%20Clients%20setup%20and%20configuring%20file%20backups.md#references)
### [**G042** - System update 01 ~ Considerations](G042%20-%20System%20update%2001%20~%20Considerations.md#g042-system-update-01-considerations)
-- [_What to update. Identifying your system's software layers_](G042%20-%20System%20update%2001%20~%20Considerations.md#what-to-update-identifying-your-systems-software-layers)
-- [_How to update. Update procedures_](G042%20-%20System%20update%2001%20~%20Considerations.md#how-to-update-update-procedures)
-- [_When to apply the updates_](G042%20-%20System%20update%2001%20~%20Considerations.md#when-to-apply-the-updates)
+- [What to update. Identifying your system's software layers](G042%20-%20System%20update%2001%20~%20Considerations.md#what-to-update-identifying-your-systems-software-layers)
+- [How to update. Update procedures](G042%20-%20System%20update%2001%20~%20Considerations.md#how-to-update-update-procedures)
+- [When to apply the updates](G042%20-%20System%20update%2001%20~%20Considerations.md#when-to-apply-the-updates)
### [**G043** - System update 02 ~ Updating Proxmox VE](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#g043-system-update-02-updating-proxmox-ve)
-- [_Examining your Proxmox VE system_](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#examining-your-proxmox-ve-system)
-- [_Updating Proxmox VE_](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#updating-proxmox-ve)
-- [_References_](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#references)
+- [Examining your Proxmox VE system](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#examining-your-proxmox-ve-system)
+- [Updating Proxmox VE](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#updating-proxmox-ve)
+- [References](G043%20-%20System%20update%2002%20~%20Updating%20Proxmox%20VE.md#references)
### [**G044** - System update 03 ~ Updating VMs and UrBackup](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#g044-system-update-03-updating-vms-and-urbackup)
-- [_Examining your VMs_](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#examining-your-vms)
-- [_Updating Debian on your VMs_](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#updating-debian-on-your-vms)
-- [_Updating the UrBackup software_](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#updating-the-urbackup-software)
-- [_References_](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#references)
+- [Examining your VMs](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#examining-your-vms)
+- [Updating Debian on your VMs](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#updating-debian-on-your-vms)
+- [Updating the UrBackup software](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#updating-the-urbackup-software)
+- [References](G044%20-%20System%20update%2003%20~%20Updating%20VMs%20and%20UrBackup.md#references)
### [**G045** - System update 04 ~ Updating K3s and deployed apps](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#g045-system-update-04-updating-k3s-and-deployed-apps)
-- [_Examining your K3s Kubernetes cluster_](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#examining-your-k3s-kubernetes-cluster)
-- [_Updating apps and K3s_](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#updating-apps-and-k3s)
-- [_References_](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#references)
+- [Examining your K3s Kubernetes cluster](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#examining-your-k3s-kubernetes-cluster)
+- [Updating apps and K3s](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#updating-apps-and-k3s)
+- [References](G045%20-%20System%20update%2004%20~%20Updating%20K3s%20and%20deployed%20apps.md#references)
### [**G046** - Cleaning the system](G046%20-%20Cleaning%20the%20system.md#g046-cleaning-the-system)
-- [_Checking your storage status_](G046%20-%20Cleaning%20the%20system.md#checking-your-storage-status)
-- [_Cleaning procedures_](G046%20-%20Cleaning%20the%20system.md#cleaning-procedures)
-- [_Reminder about the `apt` updates_](G046%20-%20Cleaning%20the%20system.md#reminder-about-the-apt-updates)
-- [_Relevant system paths_](G046%20-%20Cleaning%20the%20system.md#relevant-system-paths)
-- [_References_](G046%20-%20Cleaning%20the%20system.md#references)
+- [Checking your storage status](G046%20-%20Cleaning%20the%20system.md#checking-your-storage-status)
+- [Cleaning procedures](G046%20-%20Cleaning%20the%20system.md#cleaning-procedures)
+- [Reminder about the `apt` updates](G046%20-%20Cleaning%20the%20system.md#reminder-about-the-apt-updates)
+- [Relevant system paths](G046%20-%20Cleaning%20the%20system.md#relevant-system-paths)
+- [References](G046%20-%20Cleaning%20the%20system.md#references)
## Appendixes
### [**G901** - Appendix 01 ~ Connecting through SSH with PuTTY](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#g901-appendix-01-connecting-through-ssh-with-putty)
-- [_Generating `.ppk` file from private key_](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#generating-ppk-file-from-private-key)
-- [_Configuring the connection to the PVE node_](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#configuring-the-connection-to-the-pve-node)
-- [_References_](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#references)
+- [Generating `.ppk` file from private key](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#generating-ppk-file-from-private-key)
+- [Configuring the connection to the PVE node](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#configuring-the-connection-to-the-pve-node)
+- [References](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md#references)
### [**G902** - Appendix 02 ~ Vim vimrc configuration](G902%20-%20Appendix%2002%20~%20Vim%20vimrc%20configuration.md#g902-appendix-02-vim-vimrc-configuration)
-- [_References_](G902%20-%20Appendix%2002%20~%20Vim%20vimrc%20configuration.md#references)
+- [References](G902%20-%20Appendix%2002%20~%20Vim%20vimrc%20configuration.md#references)
### [**G903** - Appendix 03 ~ Customization of the motd file](G903%20-%20Appendix%2003%20~%20Customization%20of%20the%20motd%20file.md#g903-appendix-03-customization-of-the-motd-file)
-- [_References_](G903%20-%20Appendiz%2003%20~%20Customization%20of%20the%20motd%20file.md#references)
+- [References](G903%20-%20Appendiz%2003%20~%20Customization%20of%20the%20motd%20file.md#references)
### [**G904** - Appendix 04 ~ Object by object Kubernetes deployments](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#g904-appendix-04-object-by-object-kubernetes-deployments)
-- [_Example scenario: cert-manager deployment_](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#example-scenario-cert-manager-deployment)
-- [_Relevant system paths_](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#relevant-system-paths)
-- [_References_](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#references)
+- [Example scenario: cert-manager deployment](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#example-scenario-cert-manager-deployment)
+- [Relevant system paths](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#relevant-system-paths)
+- [References](G904%20-%20Appendix%2004%20~%20Object%20by%20object%20Kubernetes%20deployments.md#references)
### [**G905** - Appendix 05 ~ Cloning storage drives with Clonezilla](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#g905-appendix-05-cloning-storage-drives-with-clonezilla)
-- [_Preparing the Clonezilla Live USB_](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#preparing-the-clonezilla-live-usb)
-- [_Cloning a storage drive with Clonezilla_](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#cloning-a-storage-drive-with-clonezilla)
-- [_Restoring a Clonezilla image_](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#restoring-a-clonezilla-image)
-- [_Considerations about Clonezilla_](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#considerations-about-clonezilla)
-- [_Alternative to Clonezilla:_ Rescuezilla](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#alternative-to-clonezilla-rescuezilla)
-- [_References_](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#references)
+- [Preparing the Clonezilla Live USB](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#preparing-the-clonezilla-live-usb)
+- [Cloning a storage drive with Clonezilla](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#cloning-a-storage-drive-with-clonezilla)
+- [Restoring a Clonezilla image](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#restoring-a-clonezilla-image)
+- [Considerations about Clonezilla](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#considerations-about-clonezilla)
+- [Alternative to Clonezilla:_ Rescuezilla](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#alternative-to-clonezilla-rescuezilla)
+- [References](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md#references)
### [**G906** - Appendix 06 ~ Handling VM or VM template volumes](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#g906-appendix-06-handling-vm-or-vm-template-volumes)
-- [_Installing the `libguestfs-tools` package_](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#installing-the-libguestfs-tools-package)
-- [_Locating and checking a VM or VM template's hard disk volume_](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#locating-and-checking-a-vm-or-vm-templates-hard-disk-volume)
-- [_Relevant system paths_](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#relevant-system-paths)
-- [_References_](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#references)
+- [Installing the `libguestfs-tools` package](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#installing-the-libguestfs-tools-package)
+- [Locating and checking a VM or VM template's hard disk volume](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#locating-and-checking-a-vm-or-vm-templates-hard-disk-volume)
+- [Relevant system paths](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#relevant-system-paths)
+- [References](G906%20-%20Appendix%2006%20~%20Handling%20VM%20or%20VM%20template%20volumes.md#references)
### [**G907** - Appendix 07 ~ Resizing a root LVM volume](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#g907-appendix-07-resizing-a-root-lvm-volume)
-- [_Resizing the storage drive on Proxmox VE_](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#resizing-the-storage-drive-on-proxmox-ve)
-- [_Extending the root LVM filesystem on a live VM_](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#extending-the-root-lvm-filesystem-on-a-live-vm)
-- [_Final note_](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#final-note)
-- [_References_](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#references)
+- [Resizing the storage drive on Proxmox VE](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#resizing-the-storage-drive-on-proxmox-ve)
+- [Extending the root LVM filesystem on a live VM](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#extending-the-root-lvm-filesystem-on-a-live-vm)
+- [Final note](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#final-note)
+- [References](G907%20-%20Appendix%2007%20~%20Resizing%20a%20root%20LVM%20volume.md#references)
### [**G908** - Appendix 08 ~ K3s cluster with two or more server nodes](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#g908-appendix-08-k3s-cluster-with-two-or-more-server-nodes)
-- [_Add a new VM to act as the second server node_](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#add-a-new-vm-to-act-as-the-second-server-node)
-- [_Adapt the Proxmox VE firewall setup_](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#adapt-the-proxmox-ve-firewall-setup)
-- [_Setup of the FIRST K3s server node_](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#setup-of-the-first-k3s-server-node)
-- [_Setup of the SECOND K3s server node_](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#setup-of-the-second-k3s-server-node)
-- [_Regarding the K3s agent nodes_](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#regarding-the-k3s-agent-nodes)
+- [Add a new VM to act as the second server node](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#add-a-new-vm-to-act-as-the-second-server-node)
+- [Adapt the Proxmox VE firewall setup](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#adapt-the-proxmox-ve-firewall-setup)
+- [Setup of the FIRST K3s server node](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#setup-of-the-first-k3s-server-node)
+- [Setup of the SECOND K3s server node](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#setup-of-the-second-k3s-server-node)
+- [Regarding the K3s agent nodes](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md#regarding-the-k3s-agent-nodes)
### [**G909** - Appendix 09 ~ Kubernetes object stuck in Terminating state](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#g909-appendix-09-kubernetes-object-stuck-in-terminating-state)
-- [_Scenario_](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#scenario)
-- [_Solution_](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#solution)
-- [_References_](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#references)
+- [Scenario](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#scenario)
+- [Solution](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#solution)
+- [References](G909%20-%20Appendix%2009%20~%20Kubernetes%20object%20stuck%20in%20Terminating%20state.md#references)
### [**G910** - Appendix 10 ~ Setting up virtual network with Open vSwitch](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#g910-appendix-10-setting-up-virtual-network-with-open-vswitch)
-- [_Installation_](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#installation)
-- [_Replacing the Linux bridge with the OVS one_](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#replacing-the-linux-bridge-with-the-ovs-one)
-- [_Relevant system paths_](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#relevant-system-paths)
-- [_References_](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#references)
+- [Installation](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#installation)
+- [Replacing the Linux bridge with the OVS one](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#replacing-the-linux-bridge-with-the-ovs-one)
+- [Relevant system paths](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#relevant-system-paths)
+- [References](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md#references)
-### [**G911** - Appendix 11 ~ Alternative Nextcloud web server setups](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md#g911-appendix-11-alternative-nextcloud-web-server-setups)
+### [**G911** - Appendix 11 ~ Alternative Nextcloud web server setups](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md)
- [Ideas for the Apache setup](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md#ideas-for-the-apache-setup)
- [Nginx setup](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md#nginx-setup)
- [Relevant system paths](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md#relevant-system-paths)
- [References](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md#references)
-### [**G912** - Appendix 12 ~ Adapting MetalLB config to CR](G912%20-%20Appendix%2012%20~%20Adapting%20MetalLB%20config%20to%20CR.md#g912-appendix-12-adapting-metallb-config-to-cr)
-
-- [_References_](G912%20-%20Appendix%2012%20~%20Adapting%20MetalLB%20config%20to%20CR.md#references)
-
-### [**G913** - Appendix 13 ~ Checking the K8s API endpoints' status](G913%20-%20Appendix%2013%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md)
+### [**G912** - Appendix 12 ~ Checking the K8s API endpoints' status](G912%20-%20Appendix%2012%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md)
-- [_References_](G913%20-%20Appendix%2013%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md#references)
+- [References](G912%20-%20Appendix%2012%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md#references)
-### [**G914** - Appendix 14 ~ Post-update manual maintenance tasks for Nextcloud](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md)
+### [**G913** - Appendix 13 ~ Post-update manual maintenance tasks for Nextcloud](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md)
-- [_Concerns_](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#concerns)
-- [_Procedure_](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#procedure)
-- [_References_](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#references)
+- [Concerns](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#concerns)
+- [Procedure](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#procedure)
+- [References](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md#references)
-### [**G915** - Appendix 15 ~ Updating MariaDB to a newer major version](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md)
+### [**G914** - Appendix 14 ~ Updating MariaDB to a newer major version](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md)
-- [_Concerns_](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#concerns)
-- [_Enabling the update procedure_](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#enabling-the-update-procedure)
-- [_References_](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#references)
+- [Concerns](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#concerns)
+- [Enabling the update procedure](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#enabling-the-update-procedure)
+- [References](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md#references)
-### [**G916** - Appendix 16 ~ Updating PostgreSQL to a newer major version](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md)
+### [**G915** - Appendix 15 ~ Updating PostgreSQL to a newer major version](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md)
-- [_Concerns_](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#concerns)
-- [_Upgrade procedure (for Gitea's PostgreSQL instance)_](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#upgrade-procedure-for-gitea-s-postgresql-instance)
-- [_Kustomize project only for updating PostgreSQL included in this guide series_](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#kustomize-project-only-for-updating-postgresql-included-in-this-guide-series)
-- [Relevant system paths](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#relevant-system-paths)
-- [_References_](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#references)
+- [Concerns](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#concerns)
+- [Upgrade procedure (for Gitea's PostgreSQL instance)](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#upgrade-procedure-for-gitea-s-postgresql-instance)
+- [Kustomize project only for updating PostgreSQL included in this guide series](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#kustomize-project-only-for-updating-postgresql-included-in-this-guide-series)
+- [Relevant system paths](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#relevant-system-paths)
+- [References](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md#references)
## Navigation
diff --git a/G001 - Hardware setup.md b/G001 - Hardware setup.md
index 88d0714..e24fce7 100644
--- a/G001 - Hardware setup.md
+++ b/G001 - Hardware setup.md
@@ -1,57 +1,87 @@
# G001 - Hardware setup
-The first thing you need to have is a capable computer. In the [README](README.md) I've talked about a small or low-end consumer computer, meaning that you don't need the latest and fastest machine available in the market. Any relatively modern small tower PC, or even a normal laptop, could be adequate. Still, your computer must meet certain minimum requirements, or it won't be able to run the Kubernetes cluster the way it's explained in this guide series.
+- [You just need a capable enough computer](#you-just-need-a-capable-enough-computer)
+- [The reference hardware setup](#the-reference-hardware-setup)
+ - [Why this hardware setup?](#why-this-hardware-setup)
+- [References](#references)
+ - [Hardware](#hardware)
+- [Navigation](#navigation)
+
+## You just need a capable enough computer
+
+In the [README](README.md) I talk about a small or low-end consumer-grade computer, meaning that you don't need the latest and fastest machine available in the market. Any relatively modern small tower or mini PC, or even a normal laptop, could be adequate. Still, your computer must meet certain minimum requirements, or it won't be able to run the Kubernetes cluster the way it's explained in this guide.
+
+> [!NOTE]
+> **Virtualizing the Proxmox VE setup is problematic**\
+> It is possible to run the Proxmox VE platform in a VM but, in my experience with VirtualBox at least, configuring the networking to enable access to the VMs run from within Proxmox VE is not straightforward. Depending on the virtualization platform you use, you may be forced to try different configurations and, even then, you might not be able to reach the VMs running in your Proxmox VE server.
+>
+> In short, it is better if you use real hardware to avoid extra pains with the networking aspects of this guide's setup.
## The reference hardware setup
-The hardware setup that serves as platform and reference for this guide series is a slightly upgraded [Lenovo H30-00 desktop computer](https://pcsupport.lenovo.com/us/en/products/desktops-and-all-in-ones/lenovo-h-series-desktops/lenovo-h30-00-desktop) from 2014. After the upgrade, it has the following specs.
+The hardware used in this guide is an upgraded [Packard Bell iMedia S2883 desktop computer](https://archive.org/details/manualzilla-id-7098831) from around 2014. This quite old and rather limited computer has the following specifications (after the upgrade):
+
+- The BIOS firmware is UEFI (Secure Boot) but also provides a CSM mode.
+
+- The **CPU** is an [Intel Pentium J1900](https://www.intel.com/content/www/us/en/products/sku/78867/intel-celeron-processor-j1900-2m-cache-up-to-2-42-ghz/specifications.html). This is a **four one-thread cores** CPU built on a **64 bits architecture** that also comes with **VT-x virtualization technology**.
+
+- The **GPU** is from the Intel® HD Graphics for Intel Atom® Processor Z3700 Series, and comes integrated in the J1900 CPU.
-- **CPU** [Intel Pentium J2900](https://ark.intel.com/content/www/us/en/ark/products/78868/intel-pentium-processor-j2900-2m-cache-up-to-2-67-ghz.html): This is a **four one-thread cores** cpu built on a **64 bits architecture** that also comes with **VT-x virtualization technology**.
+- The **RAM** is made up of two DDR3 4 GiB SDRAM modules, the maximum allowed by the motherboard and the J1900 CPU.
-- **GPU** [Intel® HD Graphics for Intel Atom® Processor Z3700 Series](https://ark.intel.com/content/www/us/en/ark/products/78868/intel-pentium-processor-j2900-2m-cache-up-to-2-67-ghz.html#tab-blade-1-0-4), integrated in the CPU.
+- The **storage** is composed of the following drives:
-- **RAM** is one DDR3 8 GiB module, the maximum allowed by the motherboard and the J2900 CPU.
+ - One internal, 1 TiB, SSD drive, linked to a SATA port.
+ - One internal, 1 TiB, HDD drive, linked to a SATA port.
+ - One external, 2 TiB, HDD drive, plugged to a USB 3 port.
-- The storage has the following setup:
- - One internal, 1 TiB, SSD drive, linked to a SATA 2 port.
- - One internal, 1 TiB, HDD drive, linked to a SATA 2 port .
- - One external, 2 TiB, HDD drive, plugged to a USB 3 port.
+- For **networking**, it has one Realtek gigabit Ethernet controller.
-- The computer also has a bunch of USB 2 connectors plus one USB 3 plug.
+- The computer also has some USB 2 connectors plus one USB 3 plug.
-- One Realtek fast/gigabit Ethernet controller, integrated in the motherboard.
+- The UPS is an [Eaton 3S700D](https://www.eaton.com/at/en-gb/skuPage.3S700D.html) unit.
-- One Realtek wireless network adapter, also integrated in the motherboard.
+This rather cheap rig is somewhat close to what, at the time of writing this, a basic modern NUC or mini PC can come with.
-- Power supply happens to be an external small brick like the ones used in laptops. Good for keeping the power supply's heat out of the computer.
+### Why this hardware setup?
-- UPS [APC Back-UPS ES 700](https://www.apc.com/shop/es/es/products/Back-UPS-700-de-bajo-consumo-de-APC-230-V-CEE-7-7/P-BE700G-SP).
+Let me explain why you should consider a hardware configuration like this as your bare minimum:
-This rather cheap rig is close to what a basic modern NUC or mini PC can come with (at the time of writing this). Now, let me explain why you should consider a hardware configuration like this as your bare minimum.
+- It has an UEFI (Secure Boot) BIOS, necessary to boot up the EFI-based bootloader of Proxmox VE.
- The CPU must be 64 bits since Proxmox VE only runs on 64 bits CPUs.
-- The CPU should have virtualization technology embedded or the virtual machines' performance could be awful. Proxmox VE also expects the CPU to have this capability available.
+- The CPU must have virtualization technology embedded or the virtual machines' performance could be awful. Proxmox VE also expects the CPU to have this capability available.
-- Having less than 8 GiB of RAM won't cut it, the virtual machines you'll use as Kubernetes nodes will require at least 1 GiB each. So, starting from 8 GiB, the more RAM you can put in your computer the better.
+ > [!IMPORTANT]
+ > **Ensure your CPU's virtualization technology is active**\
+ > Check in your computer's UEFI or BIOS to ensure that the virtualization instructions are enabled.
+ >
+ > On the other hand, if you are considering installing Proxmox VE in a virtual machine, do not forget to give that virtual machine access to the virtualization technology of your host's CPU. For instance, in VirtualBox there's an option named `Enable Nested VT-x/AMD-V` that allows you just that (although its activation is not straightforward).
-- Regarding storage, at least you'll need one big enough internal storage drive and another big external one.
- - The internal one should be SSD so you can get the best performance possible out of your system, meaning that in this drive is where you should install the Proxmox VE platform and where you must put the root filesystems of your VMs.
- - The external one could be a 7200 RPM HDD, pluggable through USB 3 (if possible). This drive would serve you as the backup storage.
- - If you happen to have another big storage drive that you can put inside your computer, as I set up in mine, you could use it as data storage.
+- Having less than 8 GiB of RAM won't cut it, the virtual machines you will use as Kubernetes nodes will use 2 GiB each. So, starting from 8 GiB, the more RAM you can put in your computer the better.
+
+- Regarding storage, you will need at least one big enough internal storage drive and another big external one.
+
+ - The internal one should be SSD, enabling you to get the best performance possible out of your system. Is in this drive where you should install the Proxmox VE platform and where you must put the root filesystems of your VMs.
+
+ - The external one could be a 7200 RPM HDD, pluggable through the fastest USB port available. This drive would serve you as the backup storage.
+
+ - If you happen to have another big storage drive that you can put inside your computer, as I set up in mine, you could use it as data storage.
- If you don't have it already, get an UPS. Running a server without one is risking damage or, at least, data losses in case of outages or electric spikes.
-So, although a hardware setup like this won't allow you to use things usually found in professional environments (RAID storage configurations, high availability, etc), you'll get a decent small homelab for your personal usage.
+A hardware setup like this won't allow you to use features usually found in professional environments such as RAID storage or high availability. Still, it will be enough for you to build a decent personal homelab.
## References
-### _Hardware_
+### Hardware
+
+- [Packard Bell iMedia S2883 desktop computer](https://archive.org/details/manualzilla-id-7098831)
+
+- [Intel Pentium J1900](https://www.intel.com/content/www/us/en/products/sku/78867/intel-celeron-processor-j1900-2m-cache-up-to-2-42-ghz/specifications.html)
-- [Lenovo H30-00 desktop computer](https://pcsupport.lenovo.com/us/en/products/desktops-and-all-in-ones/lenovo-h-series-desktops/lenovo-h30-00-desktop)
-- [Intel Pentium J2900](https://ark.intel.com/content/www/us/en/ark/products/78868/intel-pentium-processor-j2900-2m-cache-up-to-2-67-ghz.html)
-- [Intel® HD Graphics for Intel Atom® Processor Z3700 Series](https://ark.intel.com/content/www/us/en/ark/products/78868/intel-pentium-processor-j2900-2m-cache-up-to-2-67-ghz.html#tab-blade-1-0-4)
-- [APC Back-UPS ES 700](https://www.apc.com/shop/es/es/products/Back-UPS-700-de-bajo-consumo-de-APC-230-V-CEE-7-7/P-BE700G-SP)
+- [Eaton 3S700D](https://www.eaton.com/at/en-gb/skuPage.3S700D.html)
## Navigation
diff --git a/G002 - Proxmox VE installation.md b/G002 - Proxmox VE installation.md
index 845fa39..723c51c 100644
--- a/G002 - Proxmox VE installation.md
+++ b/G002 - Proxmox VE installation.md
@@ -1,28 +1,46 @@
# G002 - Proxmox VE installation
-This guide explains how to install a Proxmox VE **7.0** platform into the hardware detailed in the [**G001** guide](G001%20-%20Hardware%20setup.md). This procedure follows a straightforward path, meaning that only some basic parameters will be configured here. Any advanced stuff will be left for later guides.
+- [A procedure to install Proxmox VE in limited consumer hardware](#a-procedure-to-install-proxmox-ve-in-limited-consumer-hardware)
+- [System Requirements](#system-requirements)
+ - [Minimum requirements](#minimum-requirements)
+ - [Recommended requirements](#recommended-requirements)
+- [Installation procedure](#installation-procedure)
+ - [Prepare the Proxmox VE installation media](#prepare-the-proxmox-ve-installation-media)
+ - [Clear your storage drives](#clear-your-storage-drives)
+ - [Installing Proxmox VE](#installing-proxmox-ve)
+ - [Failed installation due to a bootloader setup error](#failed-installation-due-to-a-bootloader-setup-error)
+- [After the installation](#after-the-installation)
+- [Connecting remotely](#connecting-remotely)
+- [References](#references)
+ - [Proxmox](#proxmox)
+ - [Ventoy](#ventoy)
+- [Navigation](#navigation)
+
+## A procedure to install Proxmox VE in limited consumer hardware
+
+This chapter explains how to install a Proxmox VE **9.0** platform into a consumer-grade computer like the one detailed in the [**G001** chapter](G001%20-%20Hardware%20setup.md). This procedure follows a straightforward path, meaning that only some basic but necessary parameters will be configured here. Any advanced stuff will be left for later guides.
## System Requirements
-I've copied below the minimum and recommended requirements for Proxmox VE 7.0, to compare them with the hardware I'm using.
+Here I've copied the [minimum and recommended requirements for Proxmox VE](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_system_requirements).
-### _Minimum requirements_
+### Minimum requirements
-According to the Proxmox VE manual, these minimum requirements are **for evaluation purposes only**, **not for setting up an enterprise-grade production server**.
+According to the Proxmox VE manual, these minimum requirements are **for evaluation purposes only**, **not for setting up an enterprise-grade production server**:
-- CPU: 64bit (Intel EMT64 or AMD64).
-- Intel VT/AMD-V capable CPU/Mainboard for KVM full virtualization support.
-- RAM: 1 GiB, plus additional RAM needed for guests (virtual machines and containers).
+- CPU: 64bit (Intel 64 or AMD64).
+- Intel VT/AMD-V capable CPU/motherboard for KVM full virtualization support.
+- RAM: 1 GB RAM, plus additional RAM needed for guests.
- Hard drive.
- One network card (NIC).
-As shown above, my hardware fits the minimum requirements.
+As you can see, a computer matching the [reference hardware](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup) fits the minimum requirements.
-### _Recommended requirements_
+### Recommended requirements
-Below you can find the minimum requirements for a proper Proxmox VE production server.
+See below the [recommended system requirements for a proper Proxmox VE production server](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#install_recommended_requirements):
-- Intel EMT64 or AMD64 with Intel VT/AMD-V CPU flag.
+- Intel 64 or AMD64 with Intel VT/AMD-V CPU flag.
- Memory: Minimum 2 GB for the OS and Proxmox VE services, plus designated memory for guests. For Ceph and ZFS, additional memory is required; approximately 1GB of memory for every TB of used storage.
@@ -31,193 +49,239 @@ Below you can find the minimum requirements for a proper Proxmox VE production s
- OS storage: Use a hardware RAID with battery protected write cache (“BBU”) or non-RAID with ZFS (optional SSD for ZIL).
- VM storage:
- - For local storage, use either a hardware RAID with battery backed write cache (BBU) or non-RAID for ZFS and Ceph. Neither ZFS nor Ceph are compatible with a hardware RAID controller.
- - Shared and distributed storage is possible.
+
+ - For local storage, use either a hardware RAID with battery backed write cache (BBU) or non-RAID for ZFS and Ceph. Neither ZFS nor Ceph are compatible with a hardware RAID controller.
+
+ - Shared and distributed storage is possible.
+
+ - SSDs with Power-Loss-Protection (PLP) are recommended for good performance. Using consumer SSDs is discouraged.
- Redundant (Multi-)Gbit NICs, with additional NICs depending on the preferred storage technology and cluster setup.
- For PCI(e) passthrough the CPU needs to support the VT-d/AMD-d flag.
-My hardware mostly complies with the CPU requirements, except on the VT-d flag part. Regarding RAM it's not in a bad shape either, specially since I'm not planning to use ZFS or Ceph given their high RAM needs. In storage terms I don't have any fancy enterprise-level thing like redundancy or hardware RAID, but at least I won't spend that much electricity either. Also, let's not forget that I have one SSD on which I'll install the Proxmox VE platform. The thing about the redundant network cards won't be really necessary for me either.
+When comparing these requirements with the [reference hardware](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup), don't lose sight of the goal here: running a small **standalone Proxmox VE node** in limited consumer hardware for personal use. Under that point of view, most of these recommendations, although illustrative, do not really apply for the problem at hand in this guide.
-Overall, I've got enough hardware to run a small **standalone Proxmox VE node**.
+In short, just ensure to use a computer that, at least, matches the [reference hardware](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup). In particular, make sure that your CPU has virtualization capabilities and you have 8GiB of RAM at least.
## Installation procedure
-### _Preparing the Proxmox VE installation media_
+Installing Proxmox VE in your computer is not complicated, but requires some preparation.
-Proxmox VE 7.0 is provided as an ISO image file, which you have to either burn in a CD or DVD or write in a USB drive. Since my computer has no DVD drive anymore, I'll show you the USB path. Proxmox provides some instructions about how to do it from a Linux, MacOS or Windows environment. [Check them out right here](https://pve.proxmox.com/pve-docs/chapter-pve-installation.html#installation_prepare_media)!
+### Prepare the Proxmox VE installation media
-I'll do it from a Windows 10 system, using [**Rufus**](https://rufus.ie/) to write the Proxmox VE ISO into an USB pen drive.
+Proxmox VE is provided as an ISO image file, which you have to either burn in a DVD or write in a USB drive. Get the ISO for Proxmox VE 9.0 from the [_Proxmox Virtual Environment_ section](https://www.proxmox.com/en/downloads/proxmox-virtual-environment) of the [Proxmox site's Downloads page](https://www.proxmox.com/en/downloads). Then, I recommend you to use a tool like [Ventoy](https://www.ventoy.net/en/doc_start.html) to put the ISO in an USB drive and boot it in your computer from there.
-#### **Writing the Proxmox VE ISO from a Windows 10 system with Rufus**
+> [!NOTE]
+> [**Proxmox also provides some instructions about how to write the ISO image from a Linux, MacOS or Windows environment**](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#installation_prepare_media)\
+> Still, using a tool like [Ventoy](https://www.ventoy.net/en/index.html) is much easier and straightforward.
-1. Get the latest Proxmox VE 7.0 ISO from the [official site](https://www.proxmox.com/en/). You'll have to look for it in the site's [_downloads_ section](https://www.proxmox.com/en/downloads). Download the ISO found in the [_Proxmox Virtual Environment_ section](https://www.proxmox.com/en/downloads/category/proxmox-virtual-environment). The [Proxmox VE 7.0-2 ISO Installer](https://www.proxmox.com/en/downloads/item/proxmox-ve-7-0-iso-installer) weights around **1 GiB**.
+### Clear your storage drives
-2. Download the [Rufus](https://rufus.ie/) tool.
+Remember to completely erase the storage drives of your Proxmox VE server-to-be computer. You have to leave them completely empty of data, filesystems and partitions to avoid any potential conflict like, for instance, having an old installation of some outdated, but still bootable, Linux installation.
-3. Ready an USB drive with **4 GiB** at least, just to be sure that the Proxmox VE ISO has enough room to be written on it.
+Therefore, be sure of clearing those drives by using a tool like GParted or KDE Partition Manager from a Linux distribution that can be run in Live mode, such as the official Debian one or any of the Ubuntu-based ones.
-4. Plug your USB into your Windows 10 computer and open Rufus.
+> [!NOTE]
+> **Use a tool like Ventoy to be able to boot several ISOs from same USB drive**\
+> By taking advantage of [Ventoy](https://www.ventoy.net/en/index.html) or a similar tool, you can first boot a Linux Mint ISO, clear your storage drives with GParted in the Live environment, reboot, and then launch the Proxmox VE installer ISO.
-5. In Rufus, choose the Proxmox VE ISO you just downloaded and adjust the `Partition scheme` depending if your target computer is a UEFI system (switch to `GPT`) or not (leave `MBR`). Leave the rest of the parameters with their default values.
+### Installing Proxmox VE
- 
+The Proxmox site has two guides explaining Proxmox VE's installation, [linked in the _References_ section at the end of this document](#proxmox). The steps below are my custom take on this install procedure, adapted to the [reference hardware](G001%20-%20Hardware%20setup.md#the-reference-hardware-setup) used in this guide:
- > **BEWARE!**
- > If you don't configure the `Partition scheme` properly, the Proxmox VE installer won't boot up when you try to launch it in your computer.
+1. Plug the USB drive containing the Proxmox VE installer ISO in your computer, then boot the ISO up. You will eventually be greeted by the following screen:
-6. With the configuration set, press the `START` button. Rufus will ask you what mode to use for writing the Proxmox VE installer ISO. Choose `Write in DD Image mode` and press `OK`.
+ 
- 
+2. Leave selected the **Install Proxmox VE (Graphical)** option and press enter. You'll get into a shell screen, where you'll see some lines from the installer doing stuff like recognizing devices and such:
-7. Rufus will warn you that the procedure will destroy **all data** on your USB device.
+ 
- 
+ After a few seconds, you'll reach the Proxmox VE installer's graphical interface.
- If you're sure that you want to proceed, press `OK`.
+3. **This is not a step**, just a warning the installer could show you if your CPU doesn't have the support for virtualization Proxmox VE needs to have for executing its virtualization stuff with the KVM virtualization engine:
-8. Rufus will then write the Proxmox VE ISO in your USB drive.
+ 
- 
+ If you see this warning, **abort** the installation and boot in your server's BIOS to check if your CPU's virtualization technology support is disabled. If so, enable it and reboot back into the installer again.
-9. Rufus will take a couple of minutes to do its job. When it finishes, you'll see the message `READY` written in the green progress bar.
+ > [!IMPORTANT]
+ > **If your computer's CPU does not support virtualization, you still can install Proxmox VE in it**\
+ > I haven't seen anything in the official Proxmox VE documentation forbidding it. Still, bear in mind that the performance of the virtualized systems you'll create inside Proxmox VE could end being sluggish or too demanding on your hardware. Or just not work at all.
- 
+4. Usually, the first thing the installer will present you with is the **EULA** screen:
-With the ISO properly written in the USB drive, you can take it finally start the installation.
+ 
-### _Prepare your storage drives_
+ Nothing to do here, except clicking on the **I agree** button and move on.
-Remember to empty the storage drives in your server-to-be computer, meaning that you have to leave them completely void of data, filesystems and partitions. This is to avoid any potential conflicts like, for instance, having an old installation of some outdated, but still bootable, Linux installation. So, be sure of clearing those drives, using some Linux distribution that can be run in Live mode, such as the official Debian one or any of the Ubuntu-based ones. Then use a tool like GParted or KDE Partition Manager to just remove all the partitions present on those drives and you'll be good to go.
+5. Here you'll meet the very first thing you'll have to configure, **the storage drive where you want to install Proxmox VE**:
-### _Installing Proxmox VE_
+ 
-The Proxmox site has two guides explaining the Proxmox VE installer, which I've linked to in the _References_ section at the end of this guide. But the steps you'll find below are my own take on this install procedure.
+ In the _Target Harddisk_ list you have to choose on which storage drive you want to install Proxmox VE, and you want it to be the SSD drive to get the best performance out of Proxmox VE. So, assuming the SSD is the first device in the list, choose `/dev/sda` but **do not click** on the **Next** button yet!
-> **BEWARE!**
-> Since I couldn't take screenshots of the installer screens while installing Proxmox VE in my computer, I used a small virtual machine in a Virtual Box environment just to make the captures. That's why you'll see a couple of slightly "odd" things in the screen captures used in the following steps.
+ 
-1. Plug the Proxmox VE USB in the computer, and make it boot from the USB drive.
+ Also notice that, depending on the method you are using to run the Proxmox VE installer ISO, the USB drive may also appear as an option in the target harddisk list (in my setup, it appears as the `/dev/sdd` unit listed last in the snapshot above). Just be sure of NOT choosing it for installing Proxmox VE and you will be fine.
-2. After successfully booting the computer up from the USB drive, you'll eventually be greeted by the following screen.
+6. With the `/dev/sda` device chosen as target harddisk, push the **Options** button to see the _Harddisk options_ window:
- 
+ 
- From the four options available, the only ones you'll ever use are the first (**Install Proxmox VE**) and the third (**Rescue Boot**). The first one is rather self-explanatory, and the third will help you start your installed Proxmox VE in case you happen to have problems booting it up.
+ There you'll see that you can change the filesystem, and also edit a few parameters.
-3. Select the **Install Proxmox VE** option and press enter. You'll get into a shell screen, where you'll see some lines in which the installer is doing stuff like recognizing devices and such. After a few seconds, you'll reach the installer's graphical interface.
+ - `Filesystem`\
+ Leave it as **ext4**, since it's the only adequate one for the hardware available.
-4. **This is not a step**, just a warning the installer could raise you if your CPU doesn't have the support for virtualization Proxmox VE needs to have for executing its virtualization stuff with the KVM virtualization engine.
+ - `hdsize`\
+ By default, the installer assigns the entire space available in the chosen storage drive to the Proxmox VE system. This is not optimal since Proxmox VE does not need that much space by itself (remember, the reference hardware's SSD has 1 TiB), so it's better to adjust this parameter to a much lower value. Leaving it at something like 63 GiB should be more than enough. The rest of the space in the storage drive will be left unpartitioned, something we'll worry about in a later guide.
- 
+ > [!WARNING]
+ > **The `hdsize` is the total size of the filesystem assigned to Proxmox VE**\
+ > The other parameters are contained within this `hdsize` value.
- If you see this warning, **abort** the installation and boot in your server's BIOS to check if your CPU's virtualization technology support is disabled. If so, enable it and reboot back to the installer again.
+ - `swapsize`\
+ To adjust the swap size on any computer, I always use the following rule of thumb. A swap partition should have reserved at least **1.5 times** the amount of RAM available in the system. In this case, since the computer has 8 GiB of RAM, that means reserving 12 GiB for the swap.
- > **IMPORTANT!**
- If your server's CPU doesn't have virtualization support, you still can keep going on with the installation. I haven't seen anything in the official documentation forbidding it, but bear in mind that the performance of the virtualized systems you'll create inside Proxmox VE could end being sluggish or too demanding on your hardware. Or just not work at all.
+ - `maxroot`, `minfree` and `maxvz`\
+ These three are left empty, to let the installer handle them with whatever defaults it uses.
-5. Usually, the first thing you should see of the installer is the **EULA** screen.
+ 
- 
+ See in this snapshot how I've configured the `hdsize` to 63 GB with 12 GB of them reserved for the swap, which leaves around 51 GB for the Proxmox VE installation and data.
- Nothing to do here, except clicking on **I agree** and move on.
+ When you have everything ready in this _Harddisk options_ window, close it by clicking on **OK**, then press on **Next**.
-6. Here you'll meet the very first thing you'll have to configure, **where you want to install Proxmox VE**.
+7. The next screen is the **Localization and Time Zone selection** for your system:
- 
+ 
- The screenshot above is from a VirtualBox machine, in which I had set up three virtual storage drives as stand-ins for the drives in the real computer.
+ Just choose the timezone and keyboard layout fitting your needs and move on.
- 
+8. Now, you'll have to input a proper password and a valid email for the `root` user:
- In the Target Harddisk list you have to choose on which storage drive you want to install Proxmox VE, and you want it in the SSD drive. So, assuming the SSD is the first device in the list, choose `/dev/sda` but **don't click** on the **Next** button yet!
+ 
-7. With the `/dev/sda` device chosen as target harddisk, push the **Options** button, there's something else to configure there.
+ Be aware that this screen will run a validation both over the password and the email values when you click on **Next**. You will not be able to advance in the installation unless you comply with the restrictions imposed by the installer in this step.
- 
+ > [!NOTE]
+ > **The email value can be a ficticious one**\
+ > The email must look realistic, but it does not have to be a real one because the installer only cares about the string itself. The installer will not try to send anything to the specified email.
+ >
+ > A Proxmox VE server can send notifications to that email, but that is a feature not enabled by default.
- There you'll see that you can change the filesystem, and also edit a few parameters.
+9. This step is about setting up your network configuration:
- - `Filesystem`: Leave it as **ext4**, since it's the only adequate one for the hardware available.
+ 
- - `hdsize`: By default, the installer assigns the entire space available in the chosen storage drive to the Proxmox VE system. This is not optimal since Proxmox VE doesn't need that much space by itself (remember, my real SSD has 1 TiB), so it's better to adjust this parameter to a much lower value. Leaving it at something like 50 GiB should be more than enough. The rest of the space in the storage drive will be left unpartitioned, something we'll worry about in a later guide.
- > **BEWARE!**
- > The `hdsize` is the total size of the filesystem assigned to Proxmox VE, and that includes all the parameters below this one.
+ What you're configuring here is through which network controller and network you want to reach the Proxmox VE management console. The installer will try to autodetect and fill the values (it has not worked always in my experience), but some adjustment may be required. In particular, you will always have to specify the hostname FQDN you want for your Proxmox VE server.
- - `swapsize`: to adjust the swap size on any computer, I always use the following rule of thumb. A swap partition should have reserved at least **1.5 times** the amount of RAM available in the system. In this case, since the computer has 8 GiB of RAM, that means reserving 12 GiB for the swap.
+ > [!NOTE]
+ > In this guide, the Proxmox VE hostname's FQDN will be `pve.homelab.cloud`.
- - `maxroot`, `minfree` and `maxvz` are left empty, to let the installer handle them with whatever defaults it uses.
+10. The **summary** screen will show you the configuration you've chosen:
- 
+ 
- When you have everything ready in this screen, click on **Next**.
+ Notice, at the bottom of the screen, the check about **Automatically reboot after successful installation**. If you prefer to reboot manually, uncheck it. Then, if you're happy with the setup, click on **Install**.
-8. The next screen is the **Localization and Time Zone selection** for your system.
+ > [!NOTE]
+ > **The email shown in the screenshot is just an example**\
+ > The `pveroot@homelab.cloud` value is a fake email with an obvious name for illustrative purposes. If you use a real one, Proxmox VE can be configured to send notifications to it.
- 
+11. The next screen will show you a progress bar and some information about the ongoing installation:
- Just choose whatever suits your needs and move on.
+ 
-9. Now, you'll have to input a proper password and a valid email for the `root` user.
+ > [!NOTE]
+ > The installer, on its own, will download and install a more recent version of the Proxmox VE platform, instead of just putting the one included in the Proxmox VE ISO.
- 
+12. The installation will be over after a few minutes. If you disable the automatic reboot, you will see the `Installation successful!` screen:
- This screen will make some validation both over the password and the email fields when you click on **Next**.
+ 
- 
+ This screen also reminds you of rebooting the system and through which IP you can reach the web console of your newly installed Proxmox VE. At this point, reboot and unplug the USB drive you used to install Proxmox VE.
-10. This step is about setting up your network configuration.
+### Failed installation due to a bootloader setup error
- 
+It might happen that the Proxmox VE installer fails at the very end, right when is trying to write the EFI bootloader of the Proxmox VE system in the `/dev/sda` drive. When this happens, the installer will show you an error message like the following one:
- What you're configuring here is through which network controller and network you want to reach the Proxmox VE management console. The installer will autodetect the values, but some adjustment may be required.
+
- Be careful of which network controller you choose as `management interface`. Choosing the wrong one could make your Proxmox VE system unreachable remotely.
+> [!NOTE]
+> This snapshot comes from an old [reddit thread](https://www.reddit.com/r/homelab/comments/s9d2yg/proxmox_ve_install_failed_on_r720_due_to_efi_boot/) by [MattTheHuman](https://www.reddit.com/user/MattTheHuman/) dealing with this issue although in a different hardware setup.
-11. The **summary** screen will show you the configuration you've chosen.
+In my case it happen probably due to a bug or bad UEFI implementation in the firmware of my computer's motherboard, but I cannot really tell. Just know that, if you face this issue, you will be forced to retry the installation of Proxmox VE but doing the following first:
- 
+1. Make the USB drive where you put the Proxmox VE installer ISO boot **in MBR mode**. With a tool like Ventoy this is very easy, although it will erase the drive and force you to copy the ISO again in the drive.
- Notice, at the bottom of the screen, the check about **Automatically reboot after successful installation**. If you prefer to reboot manually, uncheck it. Then, if you're happy with the setup, click on **Install**.
+2. Turn on your computer and get into its BIOS.
+
+ > [!WARNING]
+ > **The next steps done within the BIOS are just orientative**\
+ > The options available in your computer's firmware may differ significantly, although they should be somewhat similar.
+
+3. Go to the screen where you can configure the Secure Boot mode and:
-12. The next screen will show you a progress bar and some information while doing the installation.
+ - Disable the loading of security keys when the computer boots up. In my computer this was the default behavior.
- 
+ - Enable the custom mode and clear all security keys.
- With an SSD as the target hard disk, the installation process will go really fast.
+4. Still in the BIOS, now go to the screen where you can enable the CSM mode. This is the legacy BIOS behavior you need to boot your USB drive in MBR mode.
- 
+5. Save the changes.
- > **BEWARE!**
- > The installer, on its own, will download and install a more recent version of the Proxmox VE platform, instead of just putting the one present in the USB.
+6. Assuming you have the USB drive already plugged in the computer, boot it up and get into the Proxmox VE installer.
-13. When the installation is done, it will ask for a reboot. Unplug the USB in that moment to avoid booting into the Proxmox installation program again.
+7. Complete the installation of Proxmox VE. If it fails again, maybe there is another EFI-related option that is messing up your installation process.
+
+8. If Proxmox VE gets installed successfully, return to the BIOS when you reboot the system.
+
+9. In the BIOS, enable the Security Boot but with custom configuration and disable the CSM mode. This is necessary to allow Proxmox VE to boot up, since it is an EFI-based system and it won't run in CSM mode.
+
+10. Save the changes in the firmware, reboot and Proxmox VE should start running.
+
+Hopefully, these indications will save you from spending days of (almost) fruitless research if you face this very same problem.
## After the installation
-You have installed Proxmox VE and your server has rebooted. Proxmox VE comes with a web console which is accessible through the **port 8006**. So, open a browser and navigate to `https://your.host.ip.address:8006` and you'll reach the Proxmox VE web console.
+You have installed Proxmox VE and your server has rebooted. Proxmox VE comes with a web console which you can access through the **port 8006**. So, open a browser and navigate to the URL the installer indicated you when it finished and you'll reach Proxmox VE's web console.
-
+
-Log in as `root` and take a look around, confirming that the installation it's done. At the time of writing this, the installation procedure left my host with a Proxmox VE **7.0-11** (the version in the ISO was _7.0-2_) **standalone node** running on a **Debian 11 "Bullseye"**.
+> [!NOTE]
+> **Proxmox VE's web console loads a light or dark color theme automatically**\
+> By default, the login screen will use the theme that corresponds to your browser's aspect configuration.
+>
+> Going forward, any other web console snapshot present in the upcoming chapters will be shown using the light color theme.
+
+Log in as `root` and take a look around to confirm that the installation is done. At the time of writing this, the installation procedure left my host with a Proxmox VE **9.0.3 standalone node** running on a **Debian GNU/Linux 13 (trixie)**.
## Connecting remotely
-You can connect already to your standalone PVE node through any SSH client of your choosing, by using the username `root` and the password you set up in the installation process. This is not the most safe configuration, but you'll see how to improve it in a later guide.
+You can connect already to your standalone PVE node through any SSH client of your choosing, by using the username `root` and the password you set up in the installation process. **This is not the safest configuration**, but you'll see how to harden it in a later chapter.
## References
-### _Proxmox_
+### [Proxmox](https://www.proxmox.com/en/)
+
+- [Downloads](https://www.proxmox.com/en/downloads)
+ - [Proxmox Virtual Environment](https://www.proxmox.com/en/downloads/proxmox-virtual-environment)
+
+- [Wiki. Installation](https://pve.proxmox.com/wiki/Installation)
+
+- [Proxmox VE Administration Guide](https://pve.proxmox.com/pve-docs/pve-admin-guide.html)
+ - [2. Installing Proxmox VE](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_installation)
+ - [2.1. System Requirements](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_system_requirements)
+ - [2.2. Prepare Installation Media](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#installation_prepare_media)
-- [Proxmox](https://www.proxmox.com/en/)
-- [Proxmox VE installation guide](https://pve.proxmox.com/wiki/Installation)
-- [Proxmox VE admin guide. Installing Proxmox VE](https://pve.proxmox.com/pve-docs/chapter-pve-installation.html)
-- [Proxmox VE. System Requirements](https://www.proxmox.com/en/proxmox-ve/requirements)
+- [Reddit. Proxmox VE install failed on R720 due to EFI boot using Grub on /dev/sda2 and /dev/sda - Completely Stumped](https://www.reddit.com/r/homelab/comments/s9d2yg/proxmox_ve_install_failed_on_r720_due_to_efi_boot/)
-### _Rufus_
+### [Ventoy](https://www.ventoy.net/en/index.html)
-- [Rufus](https://rufus.ie/)
+- [Document](https://www.ventoy.net/en/doc_start.html)
## Navigation
diff --git a/G003 - Host configuration 01 ~ Apt sources, updates and extra tools.md b/G003 - Host configuration 01 ~ Apt sources, updates and extra tools.md
index 944a649..d2916f9 100644
--- a/G003 - Host configuration 01 ~ Apt sources, updates and extra tools.md
+++ b/G003 - Host configuration 01 ~ Apt sources, updates and extra tools.md
@@ -1,207 +1,233 @@
# G003 - Host configuration 01 ~ Apt sources, updates and extra tools
-## Remember, Proxmox VE 7.0 runs on Debian 11 _bullseye_
-
-Bear always in mind that your Proxmox VE 7.0 runs on a **Debian** _GNU Linux version 11_ (_bullseye_).
+- [Proxmox VE 9.0 runs on Debian 13 "trixie"](#proxmox-ve-90-runs-on-debian-13-trixie)
+- [Editing the apt repository sources](#editing-the-apt-repository-sources)
+ - [Changing the apt repositories](#changing-the-apt-repositories)
+- [Update your system](#update-your-system)
+ - [Consideration about upgrades](#consideration-about-upgrades)
+ - [You can use `apt` directly](#you-can-use-apt-directly)
+- [Installing useful extra tools](#installing-useful-extra-tools)
+ - [Utilities for visualizing sensor information](#utilities-for-visualizing-sensor-information)
+ - [The lm\_sensors package](#the-lm_sensors-package)
+ - [The Stress Terminal UI: s-tui](#the-stress-terminal-ui-s-tui)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox](#proxmox)
+ - [Tools](#tools)
+- [Navigation](#navigation)
+
+## Proxmox VE 9.0 runs on Debian 13 "trixie"
+
+Remember that your Proxmox VE 9.0 runs on a **Debian** _GNU Linux version 13_ (_trixie_).
The Debian version can be checked by opening the file `/etc/os-release` found in the system.
-> **BEWARE!**
-> For more details about Proxmox VE 7.0 itself, you can find a datasheet about it [in this page](https://www.proxmox.com/en/downloads/item/proxmox-ve-datasheet).
-
## Editing the apt repository sources
-Proxmox VE comes with its `apt` system configured to point at a repository for **enterprise subscriptions**. This is a problem because, unless you already have such subscription, you won't be able to perform system updates at all. Look what happens if, in a `root` shell, you try to execute `apt update` with the default setup.
+Proxmox VE comes with its `apt` system configured to point at two repositories for **enterprise subscriptions**, one for Proxmox VE itself while the other is for the Proxmox version of the Ceph distributed storage system. This is a problem because, unless you already have such subscription, you won't be able to perform system updates at all. Look what happens if, in a `root` shell, you try to execute `apt update` with the default setup.
-~~~bash
+~~~sh
$ apt update
-Get:1 http://security.debian.org bullseye-security InRelease [44.1 kB]
-Get:2 http://ftp.es.debian.org/debian bullseye InRelease [154 kB]
-Get:3 http://security.debian.org bullseye-security/main amd64 Packages [16.8 kB]
-Get:4 http://security.debian.org bullseye-security/main Translation-en [8,244 B]
-Get:5 http://ftp.es.debian.org/debian bullseye-updates InRelease [40.1 kB]
-Get:6 http://ftp.es.debian.org/debian bullseye/main amd64 Packages [8,178 kB]
-Err:7 https://enterprise.proxmox.com/debian/pve bullseye InRelease
+Get:1 http://security.debian.org/debian-security trixie-security InRelease [43.4 kB]
+Hit:2 http://deb.debian.org/debian trixie InRelease
+Get:3 http://deb.debian.org/debian trixie-updates InRelease [47.1 kB]
+Get:4 http://security.debian.org/debian-security trixie-security/main amd64 Packages [11.6 kB]
+Get:5 http://security.debian.org/debian-security trixie-security/main Translation-en [10.6 kB]
+Err:6 https://enterprise.proxmox.com/debian/ceph-squid trixie InRelease
401 Unauthorized [IP: 51.91.38.34 443]
-Get:8 http://ftp.es.debian.org/debian bullseye/main Translation-en [6,241 kB]
-Get:9 http://ftp.es.debian.org/debian bullseye/contrib amd64 Packages [50.4 kB]
-Get:10 http://ftp.es.debian.org/debian bullseye/contrib Translation-en [46.9 kB]
-Reading package lists... Done
-E: Failed to fetch https://enterprise.proxmox.com/debian/pve/dists/bullseye/InRelease 401 Unauthorized [IP: 51.91.38.34 443]
-E: The repository 'https://enterprise.proxmox.com/debian/pve bullseye InRelease' is not signed.
-N: Updating from such a repository can't be done securely, and is therefore disabled by default.
-N: See apt-secure(8) manpage for repository creation and user configuration details.
+Err:7 https://enterprise.proxmox.com/debian/pve trixie InRelease
+ 401 Unauthorized [IP: 51.91.38.34 443]
+Error: Failed to fetch https://enterprise.proxmox.com/debian/ceph-squid/dists/trixie/InRelease 401 Unauthorized [IP: 51.91.38.34 443]
+Error: The repository 'https://enterprise.proxmox.com/debian/ceph-squid trixie InRelease' is not signed.
+Notice: Updating from such a repository can't be done securely, and is therefore disabled by default.
+Notice: See apt-secure(8) manpage for repository creation and user configuration details.
+Error: Failed to fetch https://enterprise.proxmox.com/debian/pve/dists/trixie/InRelease 401 Unauthorized [IP: 51.91.38.34 443]
+Error: The repository 'https://enterprise.proxmox.com/debian/pve trixie InRelease' is not signed.
+Notice: Updating from such a repository can't be done securely, and is therefore disabled by default.
+Notice: See apt-secure(8) manpage for repository creation and user configuration details.
~~~
-See the line Err:7 indicating an error in the process: since you don't have a valid enterprise subscription, your system is `Unauthorized` to get updates from the enterprise repository.
+See the lines like the `Err:6` one indicating an error in the process. Since you don't have a valid enterprise subscription, your system is `Unauthorized` to get updates from Proxmox's enterprise repository.
+
+### Changing the apt repositories
-### _Changing the apt repositories_
+You need to disable the enterprise repositories and enable the repository for non-subscribers.
-You need to disable the enterprise repository and enable the repository for non-subscribers.
+1. Access your Proxmox VE web console as `root`, and browse to your `pve` node's `Updates > Repositories` section:
-1. Access your Proxmox VE web console as `root`, and browse to your `pve` node's `Updates > Repositories` section.
+ 
- 
+ Notice the yellow warnings about having the enterprise repositories enabled with no active subscription. This is related to the warning you saw when you logged in the web console.
- You'll notice the warning about having the enterprise repository enabled with no active subscription. This is related to the warning you saw when you logged in the web console.
+2. Notice that there are two enterprise repositories, one for the [Ceph distributed storage technology](https://ceph.io/en/) embedded in Proxmox while the other is for Proxmox VE itself. **You have to disable both**:
-2. Select the enterprise repository, the one with the URI `https://enterprise.proxmox.com/debian/pve`.
+ Begin with the Ceph enterprise repository, the one with the URI `https://enterprise.proxmox.com/debian/ceph-squid`.
- 
+ 
- See that the `Disable` button is now active, so press it to disable the enterprise repository.
+ See that the `Disable` button is now active, so press it to disable the enterprise repository. Then, do the same with the other enterprise repository, the one for Proxmox VE with the URI `https://enterprise.proxmox.com/debian/pve`.
-3. With the enterprise repository disabled, the web console will warn you that you won't get any updates for the Proxmox VE platform itself.
+ 
- 
+3. With both Proxmox enterprise repositories disabled, the web console will warn you that you won't get any updates for your Proxmox VE platform:
-4. Click on the `Add` button now. The web console will prompt the same warning you saw when you logged in.
+ 
- 
+4. Click on the `Add` button now. The web console will prompt the same warning you saw when you logged in:
+
+ 
Click on `OK` and you'll get to the windows where you can add apt repositories.
- 
+ 
-5. On that window, choose the `No-Subscription` option from the `Repository` list and then press on `Add`.
+5. On that window, choose the `No-Subscription` option from the `Repository` list and then press on `Add`:
- 
+ 
-6. With the `No-Subscription` repository added, you'll see a different status in the `Repositories` screen.
+6. With the `No-Subscription` repository added, you'll see a different status in the `Repositories` screen:
- 
+ 
- What the new warning means is that the no-subscription repository is not the safest one to use for real production use. Still, usually it'll be good enough for your personal needs.
+ What the new warning means is that the no-subscription repository is not the safest one to use for real production use. Regardless, it should be good enough for your personal homelab needs.
## Update your system
-Now you can go to the `Updates` screen and see what's pending.
-
-1. Browse to the `Updates` tab, and click on the `Refresh` button to be sure that you're getting the most recent list of updates.
+Now you can browse to the `Updates` screen and see what's pending:
- 
+1. Browse to the `Updates` tab, and click on the `Refresh` button to be sure that you're getting the most recent list of updates:
- You'll see the warning window about not having a valid subscription.
+ 
- 
+ Pressing the refresh button will launch again the warning window about not having a valid subscription:
- Close that window and you'll meet a new one in which you'll see the `apt update` task's progress.
+ 
- 
+ Close that window and you'll meet a new one in which you'll see the `apt update` task's progress:
- When you see the line `TASK OK`, close the window to go back to the updates list.
+ 
- 
+ When you see the line `TASK OK`, close the window to go back to the updates list:
- Above, you can see that there are a lot of updates that have to be applied. In other times, this page may appear with just a few packages or empty.
+ 
-2. To apply all the updates, click on the `Upgrade` button.
+ See above that there are a number of updates, from different origins, to be applied. In future attempts, this page will show a different selection of pending packages or none at all.
- 
+2. To apply all the updates, click on the `Upgrade` button:
-3. By default, the web console will open a shell console, using your `root` user, in which it'll launch the `apt dist-upgrade` command.
+ 
- 
+3. By default, the web console will open a noVNC shell console, using your `root` user, in which it'll launch the `apt dist-upgrade` command:
- Pay attention to when the apt command asks you for confirmation so it can proceed with the update. Also, be aware that some packages may require your input for some reason or other.
+ 
-4. When the apt command finishes, it'll return the control to the prompt within the shell console.
+ > [!IMPORTANT]
+ > **Pay attention to when the `apt` command requests your confirmation to proceed!**\
+ > Also, be aware that some packages may require your input for some reason or other.
- 
+4. When the apt command finishes, it'll return the control to the prompt within the shell console:
- Type `exit` to get out of the shell console, or just close the window directly.
+ 
- 
+ Type `exit` to logout from the shell console and close its window, or just close the window directly:
-5. Back in the `Updates` screen of your `pve` node, you'll see that the updates list hasn't been refreshed. So, press again on the `Refresh` button to update the list.
+ 
- 
+5. Back in the `Updates` view of your `pve` node, you'll see that the updates list hasn't been refreshed. So, press again on the `Refresh` button to update the list:
-6. The `Updates` screen may or may not show more updates to apply after refreshing. So, keep on applying the upgrades until none appear listed in this screen.
+ 
- 
+6. The `Updates` view may or may not show more updates to apply after refreshing. So, keep on applying the upgrades until none appears listed in this page:
-7. If you've applied many updates, or some of them were kernel related, it's better if you reboot the system. Just press on the `Reboot` button while having your `pve` node selected.
+ 
- 
+7. If you've applied many updates, or if some of them were kernel-related, it's better if you reboot the system. Just press on the `Reboot` button while having your `pve` node selected:
- The Proxmox VE web console will ask you to confirm the action, so click on `Yes` to proceed.
+ 
- 
+ The Proxmox VE web console will ask you to confirm the action. Click on `Yes` to proceed:
-8. After the reboot, just log back in the web console and check that Proxmox VE is running fine.
+ 
-While the installation left me with Proxmox VE in its version 7.0-11, after the first update, Proxmox VE got upgraded to the 7.0-14 version.
+8. After the reboot, just log back in the web console as `root` and check that Proxmox VE still runs fine.
-### _Consideration about upgrades_
+### Consideration about upgrades
-As you've seen before, you can end having to apply several updates at once in your system. In theory, a good administrator has to be diligent and verify that each update is safe to apply. In reality, trying to do that usually is not doable. Still, you should at least be aware of the updates that directly affect the Proxmox VE platform, the ones that can update to a more recent minor or major version. Those are the ones that could break things in your setup, specially the major ones (for instance, when going from a version 6.4-x to a 7.0-x one).
+As you have seen before, you can end having to apply several updates at once in your system. In theory, a good administrator has to be diligent and verify that each update is safe to apply. In reality, trying to do that is not possible. Still, you should at least be aware of the updates that directly affect the Proxmox VE platform, the ones that can update to a more recent minor or major version. Those are the ones that could break things in your setup, specially the major ones (for instance, when going from a version 8.y.z to a 9.y.z one).
-So, my advice here is, since you only have one standalone node so, before you apply such updates, you should make a clone of your only node's Proxmox VE root filesystem (or the entire drive) with a tool like **Clonezilla**. This way, if something goes south in the upgrade, you can always go back to the previous stable state.
+My advice here is, since you only have one standalone node so, before you apply such updates, you should make a clone of your only node's Proxmox VE root filesystem (or the entire drive) with a tool like **Clonezilla**. This way, if something goes south in the upgrade, you can always go back to the previous stable state.
-Check out the [**G905** appendix guide](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md) to see how to use Clonezilla to backup your host's storage drives.
+> [!NOTE]
+> Check out the [**G905** appendix chapter](G905%20-%20Appendix%2005%20~%20Cloning%20storage%20drives%20with%20Clonezilla.md) to see how to use Clonezilla to backup your host's storage drives.
-### _You can use `apt` directly_
+### You can use `apt` directly
-Instead of using the `Updates` screen in the web console, you could just use the `apt` command directly through an SSH shell or by opening a shell directly from the web console, as in any other Debian-based Linux system.
+Instead of using the `Updates` screen in the web console, you can use the `apt` command directly through an SSH shell or by opening a shell directly from the web console.
-Personally, I prefer to connect through a SSH client to the server, as it's explained in the [**G901** appendix guide about PuTTY](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md). If you prefer to open the shell from the Proxmox VE web console, just know that it has three different options.
+Personally, I prefer to connect through a SSH client to the server, as it's explained in the [**G901** appendix chapter about PuTTY](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md). If you prefer to open the shell from the Proxmox VE web console, know that it offers three different options:
-
+
-I recommend the `xterm.js` option, since that shell allows you to copy and paste, unlike the **noVCN** one. `SPICE` doesn't open you a shell, it gives you a file that you have to use in a special client prepared to use the SPICE protocol.
+I recommend the `xterm.js` option, since that shell allows you to copy and paste, unlike the **noVCN** one. `SPICE` does not open you a shell, it gives you a file that you have to use in a special client prepared to use the SPICE protocol.
## Installing useful extra tools
-Now that you can use the `apt` command properly, it's time to install some useful tools for different purposes. Those packages are the following:
-
-- `ethtool` : Tool to query and change settings such as speed, auto-
- negotiation and checksum offload on many network devices, in particular on
- Ethernet devices.
+Now that you can use the `apt` command properly, it's time to install some useful tools for different purposes:
-- `htop`: Interactive text-based process viewer, similar to `top` but much more user friendly and colorful (on terminals that support color).
+- `htop`\
+ Interactive text-based process viewer, similar to `top` but much more user friendly and colorful (on terminals that support color).
-- `net-tools`: Package that includes several useful commands for network management.
+- `net-tools`\
+ Package that includes several useful commands for network management.
-- `sudo`: A command that allows a sysadmin to give limited `root`
- privileges to users and log root activity.
+- `sudo`\
+ A command that allows a sysadmin to give limited `root` privileges to users and log root activity.
-- `tree`: Is a recursive directory listing command that produces a depth indented listing of files.
+- `tree`\
+ Is a recursive directory listing command that produces a depth indented listing of files.
-- `vim`: A more complete version of the `vi` editor, which includes fancy things like syntax coloring.
+- `vim`\
+ A more complete version of the `vi` editor, which includes fancy things like syntax coloring.
To install all of the above at once, open a shell terminal as `root` and use the following command:
-~~~bash
-$ apt install -y ethtool htop net-tools sudo tree vim
+~~~sh
+$ apt install -y htop net-tools sudo tree vim
~~~
-### _Utilities for visualizing sensor information_
+### Utilities for visualizing sensor information
-Any modern computer comes with a bunch of integrated sensors, usually ones that return CPU's cores temperatures, fan speeds and voltages. Sure you'd like to see those values through the shell easily, right? There are a bunch of tools which do that, but here I'll show you the two that I found more interesting.
+Any modern computer comes with a bunch of integrated sensors, usually ones that return CPU's cores temperatures, fan speeds and voltages. Sure you'd like to see those values through the shell easily, right? There is a bunch of tools that give such information, but here I'll propose you the two that I found more interesting.
-#### **The lm_sensors package**
+#### The lm_sensors package
-The lm_sensors package provides a `sensors` command that allows you to see the values returned by the sensors integrated in a Linux host like yours. To be able to use that command, you'll need to install and configure the `lm_sensors` package as follows.
+The `lm_sensors` package provides a `sensors` command that allows you to see the values returned by the sensors integrated in a Linux host like your PVE server.
-1. Open a shell in your main `pve` node as `root` (or as a `sudo`-able user if you already got one), then execute the next `apt` command.
+> [!NOTE]
+> **The `lm_sensors` package is no longer updated**\
+> It still works, in particular with old hardware like the one used in this guide, but don't be surprised if it stops working in the future.
- ~~~bash
+To be able to use the `sensors` command, you'll need to install and configure the `lm_sensors` package as follows:
+
+1. Open a shell in your main `pve` node as `root` (or as a `sudo`-able user if you already got one), then execute the next `apt` command:
+
+ ~~~sh
$ apt install -y lm-sensors
~~~
-2. Execute `sensors-detect`. This will launch a scan on your system looking for all the sensors available in it, so it can determine which kernel modules `lm_sensors` has to use. This scan is automatic, but the command will ask you on every step of the procedure.
+2. Execute `sensors-detect`. This will launch a scan on your system looking for all the sensors available in it, so it can determine which kernel modules `lm_sensors` has to use. This scan is automatic, but the command will ask you on every step of the procedure:
- > **BEWARE!**
- > It might be possible that a step could give some trouble if executed in your system, so read the question asked on each step and, in case of doubt, answer `no` to the step you feel unsure of.
+ > [!WARNING]
+ > **Some steps might give trouble if executed in your system!**\
+ > Read the question asked on each step and, in case of doubt, answer `no` to the step you feel unsure of.
- ~~~bash
- $ sensors-detect
- # sensors-detect revision $Revision$
- # System: LENOVO 90C2001USP [Lenovo H30-00]
- # Board: LENOVO Aptio CRB
- # Kernel: 5.4.124-1-pve x86_64
- # Processor: Intel(R) Pentium(R) CPU J2900 @ 2.41GHz (6/55/8)
+ ~~~sh
+ # sensors-detect version 3.6.2
+ # System: Packard Bell iMedia S2883
+ # Kernel: 6.14.8-2-pve x86_64
+ # Processor: Intel(R) Celeron(R) CPU J1900 @ 1.99GHz (6/55/8)
This program will help you determine which kernel modules you need
to load to use lm_sensors most effectively. It is generally safe
@@ -209,7 +235,7 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
unless you know what you're doing.
Some south bridges, CPUs or memory controllers contain embedded sensors.
- Do you want to scan for them? This is totally safe. (YES/no):
+ Do you want to scan for them? This is totally safe. (YES/no):
Module cpuid loaded successfully.
Silicon Integrated Systems SIS5595... No
VIA VT82C686 Integrated Sensors... No
@@ -223,6 +249,8 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
AMD Family 17h thermal sensors... No
AMD Family 15h power sensors... No
AMD Family 16h power sensors... No
+ Hygon Family 18h thermal sensors... No
+ AMD Family 19h thermal sensors... No
Intel digital thermal sensor... Success!
(driver `coretemp')
Intel AMB FB-DIMM thermal sensor... No
@@ -232,13 +260,13 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
Some Super I/O chips contain embedded sensors. We have to write to
standard I/O ports to probe them. This is usually safe.
- Do you want to scan for Super I/O sensors? (YES/no):
+ Do you want to scan for Super I/O sensors? (YES/no):
Probing for Super-I/O at 0x2e/0x2f
Trying family `National Semiconductor/ITE'... No
Trying family `SMSC'... No
Trying family `VIA/Winbond/Nuvoton/Fintek'... No
Trying family `ITE'... Yes
- Found `ITE IT8728F Super IO Sensors' Success!
+ Found `ITE IT8772E Super IO Sensors' Success!
(address 0xa30, driver `it87')
Probing for Super-I/O at 0x4e/0x4f
Trying family `National Semiconductor/ITE'... No
@@ -246,51 +274,7 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
Trying family `VIA/Winbond/Nuvoton/Fintek'... No
Trying family `ITE'... No
- Some systems (mainly servers) implement IPMI, a set of common interfaces
- through which system health data may be retrieved, amongst other things.
- We first try to get the information from SMBIOS. If we don't find it
- there, we have to read from arbitrary I/O ports to probe for such
- interfaces. This is normally safe. Do you want to scan for IPMI
- interfaces? (YES/no):
- Probing for `IPMI BMC KCS' at 0xca0... No
- Probing for `IPMI BMC SMIC' at 0xca8... No
-
- Some hardware monitoring chips are accessible through the ISA I/O ports.
- We have to write to arbitrary I/O ports to probe them. This is usually
- safe though. Yes, you do have ISA I/O ports even if you do not have any
- ISA slots! Do you want to scan the ISA I/O ports? (yes/NO): yes
- Probing for `National Semiconductor LM78' at 0x290... No
- Probing for `National Semiconductor LM79' at 0x290... No
- Probing for `Winbond W83781D' at 0x290... No
- Probing for `Winbond W83782D' at 0x290... No
-
- Lastly, we can probe the I2C/SMBus adapters for connected hardware
- monitoring devices. This is the most risky part, and while it works
- reasonably well on most systems, it has been reported to cause trouble
- on some systems.
- Do you want to probe the I2C/SMBus adapters now? (YES/no):
- Using driver `i2c-i801' for device 0000:00:1f.3: BayTrail (SOC)
-
- Next adapter: SMBus I801 adapter at f000 (i2c-0)
- Do you want to scan it? (yes/NO/selectively): yes
- Client found at address 0x50
- Probing for `Analog Devices ADM1033'... No
- Probing for `Analog Devices ADM1034'... No
- Probing for `SPD EEPROM'... Yes
- (confidence 8, not a hardware monitoring chip)
- Probing for `EDID EEPROM'... No
-
- Next adapter: i915 gmbus ssc (i2c-1)
- Do you want to scan it? (yes/NO/selectively): yes
-
- Next adapter: i915 gmbus vga (i2c-2)
- Do you want to scan it? (yes/NO/selectively): yes
-
- Next adapter: i915 gmbus panel (i2c-3)
- Do you want to scan it? (yes/NO/selectively): yes
-
- Next adapter: i915 gmbus dpc (i2c-4)
- Do you want to scan it? (yes/NO/selectively): yes
+ [...]
Next adapter: i915 gmbus dpb (i2c-5)
Do you want to scan it? (yes/NO/selectively): yes
@@ -298,16 +282,19 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
Next adapter: i915 gmbus dpd (i2c-6)
Do you want to scan it? (yes/NO/selectively): yes
+ Next adapter: AUX B/DP B (i2c-7)
+ Do you want to scan it? (yes/NO/selectively): yes
- Now follows a summary of the probes I have just done.
- Just press ENTER to continue:
- Driver `coretemp':
- * Chip `Intel digital thermal sensor' (confidence: 9)
+ Now follows a summary of the probes I have just done.
+ Just press ENTER to continue:
Driver `it87':
- * ISA bus, address 0xa30
- Chip `ITE IT8728F Super IO Sensors' (confidence: 9)
+ * ISA bus, address 0xa30
+ Chip `ITE IT8772E Super IO Sensors' (confidence: 9)
+
+ Driver `coretemp':
+ * Chip `Intel digital thermal sensor' (confidence: 9)
To load everything that is needed, add this to /etc/modules:
#----cut here----
@@ -318,7 +305,7 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
If you have some drivers built into your kernel, the list above will
contain too many modules. Skip the appropriate ones!
- Do you want to add these lines automatically to /etc/modules? (yes/NO))yes
+ Do you want to add these lines automatically to /etc/modules? (yes/NO)yes
Successful!
Monitoring programs won't work until the needed modules are
@@ -328,113 +315,133 @@ The lm_sensors package provides a `sensors` command that allows you to see the v
Unloading cpuid... OK
~~~
- You can see, in the output above, that I've allowed the `sensors-detect` command to execute all of its steps on my system, and that there were no issues. See how the final question asks for your permission to write some lines in the `/etc/modules` file. Say `yes` to it, but bear in mind that, if you uninstall the `lm_sensors` package later, those lines will remain in `/etc/modules`.
+ I've ommited a big chunk of the `sensors-detect` command's output since it resulted to be very long in my reference hardware, but know that I executed all steps except one without issues. See how the final question asks for your permission to write some lines in the `/etc/modules` file. Say `yes` to it, but bear in mind that, if you uninstall the `lm_sensors` package later, those lines will remain written in `/etc/modules`.
- Below you can see the lines sensors-detect wrote, in my PVE host, in the `/etc/modules` file. Bear in mind that this lines may be different in your system.
+ Below you can see the lines sensors-detect wrote in the `/etc/modules` file of my PVE host. Bear in mind that these lines may be different in your system.
- ~~~bash
- # Generated by sensors-detect on Tue Nov 9 17:45:42 2021
+ ~~~sh
+ # /etc/modules is obsolete and has been replaced by /etc/modules-load.d/.
+ # Please see modules-load.d(5) and modprobe.d(5) for details.
+ #
+ # Updating this file still works, but it is undocumented and unsupported.
+
+ # Generated by sensors-detect on Wed Aug 20 10:57:42 2025
# Chip drivers
coretemp
it87
~~~
-3. To ensure that all the modules configured by `sensors-detect` are loaded, reboot your system.
+ > [!NOTE]
+ > **The `/etc/modules` file has been replaced by the `/etc/modules-load.d/` directory**\
+ > Notice the related warning about this change at the beggining of the `/etc/modules` file. If you check inside the `/etc/modules-load.d/` directory, you will find a `modules.conf` symlink file pointing to the `/etc/modules` file ensuring retrocompatibility with packages that still haven't adapted to this change like `lm_sensors`.
+
+3. To ensure that all the modules configured by `sensors-detect` are loaded, reboot your system:
- ~~~bash
+ ~~~sh
$ reboot
~~~
-4. After the reboot, open a new shell and try the `sensors` command.
+4. After the reboot, open a new shell and try the `sensors` command:
- ~~~bash
+ ~~~sh
$ sensors
soc_dts1-virtual-0
Adapter: Virtual device
- temp1: +37.0°C
+ temp1: +36.0°C
- it8728-isa-0a30
+ it8772-isa-0a30
Adapter: ISA adapter
- in0: 684.00 mV (min = +1.58 V, max = +0.35 V) ALARM
- in1: 1.37 V (min = +1.98 V, max = +3.02 V) ALARM
- in2: 2.84 V (min = +0.02 V, max = +1.78 V) ALARM
- +3.3V: 3.29 V (min = +5.88 V, max = +0.65 V) ALARM
- in4: 1.99 V (min = +2.40 V, max = +2.40 V) ALARM
- in5: 768.00 mV (min = +2.56 V, max = +1.38 V) ALARM
- in6: 312.00 mV (min = +0.16 V, max = +2.45 V)
- 3VSB: 3.26 V (min = +0.94 V, max = +0.65 V) ALARM
- Vbat: 3.19 V
- fan1: 1834 RPM (min = 10 RPM)
- fan2: 3835 RPM (min = 64 RPM)
- fan3: 0 RPM (min = 19 RPM) ALARM
- temp1: +30.0°C (low = -107.0°C, high = +58.0°C) sensor = thermal diode
- temp2: +32.0°C (low = +48.0°C, high = -80.0°C) ALARM sensor = thermal diode
- temp3: -128.0°C (low = -82.0°C, high = +1.0°C) ALARM sensor = disabled
+ in0: 708.00 mV (min = +2.56 V, max = +1.22 V) ALARM
+ in1: 1.37 V (min = +1.50 V, max = +0.84 V) ALARM
+ in2: 2.06 V (min = +1.69 V, max = +2.46 V)
+ in3: 2.00 V (min = +2.05 V, max = +2.63 V) ALARM
+ in4: 2.03 V (min = +2.27 V, max = +2.18 V) ALARM
+ in5: 2.03 V (min = +1.24 V, max = +0.76 V) ALARM
+ in6: 2.98 V (min = +1.78 V, max = +1.32 V) ALARM
+ 3VSB: 3.36 V (min = +4.66 V, max = +2.90 V) ALARM
+ Vbat: 3.26 V
+ fan2: 0 RPM (min = 32 RPM) ALARM
+ fan3: 1555 RPM (min = 14 RPM)
+ temp1: +31.0°C (low = +61.0°C, high = -11.0°C) ALARM sensor = thermistor
+ temp2: +34.0°C (low = -17.0°C, high = -93.0°C) sensor = thermistor
+ temp3: -70.0°C (low = +127.0°C, high = -2.0°C) sensor = Intel PECI
+ pwm1: 64% (freq = 23437 Hz) MANUAL CONTROL
+ pwm2: 64% (freq = 23437 Hz)
+ pwm3: 58% (freq = 23437 Hz)
intrusion0: ALARM
acpitz-acpi-0
Adapter: ACPI interface
- temp1: +26.8°C (crit = +90.0°C)
+ temp1: +37.0°C
soc_dts0-virtual-0
Adapter: Virtual device
- temp1: +36.0°C
+ temp1: +35.0°C
coretemp-isa-0000
Adapter: ISA adapter
- Core 0: +35.0°C (high = +105.0°C, crit = +105.0°C)
- Core 1: +35.0°C (high = +105.0°C, crit = +105.0°C)
+ Core 0: +34.0°C (high = +105.0°C, crit = +105.0°C)
+ Core 1: +34.0°C (high = +105.0°C, crit = +105.0°C)
Core 2: +35.0°C (high = +105.0°C, crit = +105.0°C)
Core 3: +35.0°C (high = +105.0°C, crit = +105.0°C)
~~~
- Notice how the command outputs all sorts of information from the system: different temperature measurements from different adapters and interfaces, the speed of the fans present in my host and also some voltage information. Also see how the command has printed `ALARM` on several lines, which are warnings of things the command is finding odd. Since my computer is working fine, this is more probably a question of configuring the command so it evaluates the values properly. Also, as you may imagine, the output of this command will be quite different in your machine.
+ Notice how the command outputs all sorts of information from the system: different temperature measurements from different adapters and interfaces, the speed of the fans present in my host and also some voltage information. Also see how the command has printed `ALARM` on several lines, which are warnings of things the command is finding odd. Since my computer is working fine, this is more probably a question of configuring the command so it evaluates the values properly. As you may imagine, the output of this command will be quite different in your machine.
-#### **The Stress Terminal UI: s-tui**
+#### The Stress Terminal UI: s-tui
-The Stress Terminal UI, or just `s-tui`, is a command that gives you a much more graphical vision of the current performance of your hardware. To get it, just install its package with `apt`.
+The _Stress Terminal UI_, or just `s-tui`, is a command that gives you a much more graphical vision of the current performance of your hardware. To get it, just install its package with `apt`.
-~~~bash
+~~~sh
$ apt install -y s-tui
~~~
With the package installed, just execute the `s-tui` command.
-> **BEWARE!**
+> [!IMPORTANT]
> When using a **non-root** user, execute this command with `sudo` so it can access all the system sensors.
-~~~bash
+~~~sh
$ s-tui
~~~
You should see the main screen of `s-tui` immediately.
-
+
You can use the arrows or the Page Up/Down keys to browse in the left-side menu and even change some options. Going down in the menu, you'll see all the sensors this command is able to read. The settings of `s-tui` are kept in the user's `.config/s-tui` folder.
## Relevant system paths
-### _Directories_
+### Directories
- `$HOME/.config/s-tui`
+- `/etc/`
+- `/etc/modules-load.d/`
+
+### Files
+
+- `/etc/modules`
+- `/etc/modules-load.d/modules.conf`
## References
-### _Proxmox VE_
+### [Proxmox](https://www.proxmox.com/en/)
+
+- [Proxmox VE Wiki](https://pve.proxmox.com/wiki/Main_Page)
+ - [Proxmox Package Repositories](https://pve.proxmox.com/wiki/Package_Repositories)
+ - [Proxmox VE No-Subscription Repository](https://pve.proxmox.com/wiki/Package_Repositories#sysadmin_no_subscription_repo)
+ - [Roadmap](https://pve.proxmox.com/wiki/Roadmap)
-- [Proxmox VE 7.0 Datasheet](https://www.proxmox.com/en/downloads/item/proxmox-ve-datasheet)
-- [Proxmox Package Repositories](https://pve.proxmox.com/wiki/Package_Repositories)
-- [Proxmox VE No-Subscription Repository](https://pve.proxmox.com/wiki/Package_Repositories#sysadmin_no_subscription_repo)
- [How to: Fix Proxmox/PVE update failed(Failed to fetch 401 Unauthorized) (TASK ERROR: command ‘apt-get update’ failed: exit code 100)](https://dannyda.com/2020/06/19/how-to-fix-proxmox-pve6-1-26-1-7-update-failedfailed-to-fetch-401-unauthorized-task-error-command-apt-get-update-failed-exit-code-100/)
-- [Proxmox roadmap](https://pve.proxmox.com/wiki/Roadmap)
-### _Tools_
+### Tools
+- [Ceph](https://ceph.io/en/)
- [Clonezilla](https://clonezilla.org/)
- [Find fan speed and cpu temp in Linux](https://unix.stackexchange.com/questions/328906/find-fan-speed-and-cpu-temp-in-linux)
- [Lm-sensors: Monitoring CPU And System Hardware Temperature](https://www.unixmen.com/lm-sensors-monitoring-cpu-system-hardware-temperature/)
-- [Lm_sensors - Linux hardware monitoring](https://hwmon.wiki.kernel.org/lm_sensors)
+- [Lm_sensors - Linux hardware monitoring](https://archive.kernel.org/oldwiki/hwmon.wiki.kernel.org/lm_sensors.html)
- [The lm-sensors package on GitHub](https://github.com/lm-sensors/lm-sensors)
- [The Stress Terminal UI: s-tui on GitHub](https://github.com/amanusk/s-tui)
diff --git a/G004 - Host configuration 02 ~ UPS management with NUT.md b/G004 - Host configuration 02 ~ UPS management with NUT.md
index 02a1ea3..9c7541e 100644
--- a/G004 - Host configuration 02 ~ UPS management with NUT.md
+++ b/G004 - Host configuration 02 ~ UPS management with NUT.md
@@ -1,60 +1,68 @@
# G004 - Host configuration 02 ~ UPS management with NUT
-Any server must be always connected to an UPS! You really need to protect your server from electrical surges, power cuts or outages. Here we'll assume that you already have one UPS plugged by USB to your computer.
+- [Any server must be always connected to an UPS unit](#any-server-must-be-always-connected-to-an-ups-unit)
+- [Connecting your UPS with your PVE node using NUT](#connecting-your-ups-with-your-pve-node-using-nut)
+- [Checking the NUT logs](#checking-the-nut-logs)
+- [Executing instant commands on your UPS unit](#executing-instant-commands-on-your-ups-unit)
+- [Other possibilities with NUT](#other-possibilities-with-nut)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Network UPS Tools (NUT)](#network-ups-tools-nut)
+ - [Related NUT contents](#related-nut-contents)
+ - [About UPS units](#about-ups-units)
+- [Navigation](#navigation)
-The program we're going to use is a generic package called **NUT (Network UPS Tool)**, although be aware that your UPS brand may already have their own particular software for UPS management (like in the case of **APC** and its related `apcupsd` tool for Linux systems).
+## Any server must be always connected to an UPS unit
-> **BEWARE!**
-> It could be that NUT doesn't support your concrete UPS model, but it may have compatible drivers for your UPS brand. Check on the [NUT hardware compatibility list](https://networkupstools.org/stable-hcl.html) to verify if your brand or model line has compatible drivers there.
+You really need to protect your server from electrical surges, power cuts or outages. Here we'll assume that you already have one UPS unit plugged by USB to your Proxmox VE server.
-## Connecting your UPS with your pve node using NUT
+In this chapter you'll learn how to make your PVE node monitor the UPS unit with a generic package called **NUT (Network UPS Tool)**. However, be aware that your UPS unit's brand may already have its own particular software for UPS management.
-In this guide, I'll work with my _APC Back-UPS_ unit. This model (and similar ones) comes with a **special USB cable** to connect the UPS with any computer. If your UPS doesn't have this cable, **you won't be able to proceed with the instructions explained below**.
+> [!IMPORTANT]
+> **NUT may not support your concrete UPS unit**\
+> Still, it may have compatible drivers for your UPS unit's brand. Check in the [NUT hardware compatibility list](https://networkupstools.org/stable-hcl.html) to verify if your brand or model line has compatible drivers there.
-Assuming your UPS unit has the required USB cable, here's how to proceed with its configuration in your server.
+## Connecting your UPS with your PVE node using NUT
-1. First, check the following.
- - Your server is plugged in one of your UPS's _protected_ sockets.
- - You have your UPS connected with its special cable to an USB plug in your server.
+In this chapter, I'll work with my _Eaton 3S700D_ UPS unit. This model (and similar ones) comes with a **USB 2.0 cable** to connect the UPS unit with any computer.
+
+> [!WARNING]
+> If your UPS unit does not come with a USB cable, **you won't be able to proceed with the instructions explained below**.
+
+Assuming your UPS unit has the required USB cable, here's how to proceed with its configuration in your server:
+
+1. First, check the following:
+
+ - Your server is plugged in one of your UPS's _battery-protected_ sockets.
+ - You have your UPS connected with its USB cable to an USB plug in your server.
- The UPS unit is _on_. Obviously, if it weren't, you wouldn't be able to switch your server on.
-2. With your Proxmox VE server running, get into the `pve` node through a `root` shell and execute the `lsusb` command.
+2. With your Proxmox VE server running, get into the `pve` node through a `root` shell and execute the `lsusb` command:
- ~~~bash
+ ~~~sh
$ lsusb
- Bus 002 Device 002: ID 0bc2:3330 Seagate RSS LLC Raptor 3.5" USB 3.0
- Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
- Bus 001 Device 003: ID 0bda:0129 Realtek Semiconductor Corp. RTS5129 Card Reader Controller
- Bus 001 Device 005: ID 051d:0002 American Power Conversion Uninterruptible Power Supply
- Bus 001 Device 004: ID 046d:c52b Logitech, Inc. Unifying Receiver
- Bus 001 Device 002: ID 05e3:0610 Genesys Logic, Inc. Hub
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
+ Bus 001 Device 003: ID 05e3:0608 Genesys Logic, Inc. Hub
+ Bus 001 Device 004: ID 0bda:0129 Realtek Semiconductor Corp. RTS5129 Card Reader Controller
+ Bus 001 Device 005: ID 046d:c52b Logitech, Inc. Unifying Receiver
+ Bus 001 Device 006: ID 0463:ffff MGE UPS Systems UPS
+ Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
+ Bus 002 Device 002: ID 152d:0578 JMicron Technology Corp. / JMicron USA Technology Corp. JMS578 SATA 6Gb/s
~~~
- The `lsusb` gives you a list of the USB devices currently connected to your physical server. Please notice how my _APC Back-UPS_ unit is listed in the `Bus 001 Device 005` line, confirming the correct connection of the unit to the `pve` node.
+ The `lsusb` gives you a list of the USB devices currently connected to your physical server. Please notice how my _Eaton 3S700D_ UPS unit is listed in the `Bus 001 Device 006` line with the rather misleading name `MGE UPS Systems UPS`, confirming the correct connection of the unit to the `pve` node.
-3. Now you need to install the **NUT** software package.
+3. Now you need to install the **NUT** software package:
- ~~~bash
+ ~~~sh
$ apt install -y nut
~~~
- In the `apt` output, notice the following lines pointing at an issue with the NUT service.
-
- ~~~bash
- Created symlink /etc/systemd/system/multi-user.target.wants/nut-monitor.service → /lib/systemd/system/nut-monitor.service.
- Job for nut-monitor.service failed because the service did not take the steps required by its unit configuration.
- See "systemctl status nut-monitor.service" and "journalctl -xe" for details.
- Setting up nut-server (2.7.4-13) ...
- Created symlink /etc/systemd/system/multi-user.target.wants/nut-server.service → /lib/systemd/system/nut-server.service.
- nut-driver.service is a disabled or a static unit, not starting it.
- ~~~
+4. You need to configure NUT properly so it can manage your UPS unit. There are several files to change, let's start editing `/etc/nut/nut.conf`. But before you change it, first make a backup of it:
- Don't worry about this during the `apt` installation, it's happening because you still have to configure NUT.
-
-4. You need to configure NUT properly so it can manage your UPS unit. There are several files to change, let's start editing `/etc/nut/nut.conf`. But before you change it, first make a backup of it.
-
- ~~~bash
+ ~~~sh
$ cd /etc/nut
$ cp nut.conf nut.conf.orig
~~~
@@ -71,73 +79,105 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
MODE=standalone
~~~
- By putting `standalone` you're enabling NUT to work in local mode or, in other words, it will "worry" only about your `pve` node. Please check the comments in the `nut.conf` to have a better idea of which are the modes that NUT supports.
+ By setting the `standalone` mode you're enabling NUT to work in local mode or, in other words, it will "worry" only about your `pve` node.
-5. You need to tell NUT about the UPS unit connected to your system. To do that, you must edit the file `/etc/nut/ups.conf`, so make a backup of it.
+ > [!NOTE]
+ > Check the comments in the `nut.conf` to have a better idea of which are the modes that NUT supports.
- ~~~bash
+5. You need to tell NUT about the UPS unit connected to your system. To do that, you must edit the file `/etc/nut/ups.conf`, so make a backup of it:
+
+ ~~~sh
$ cp ups.conf ups.conf.orig
~~~
- Then, **append** the following parameters, although with values fitting for your UPS unit.
+ Then, **append** the following parameters to the `ups.conf` file, although with values fitting for your UPS unit.
~~~properties
- [apc]
- driver = usbhid-ups
- port = auto
- desc = "APC Back-UPS ES 700"
+ [eaton]
+ driver = usbhid-ups
+ port = auto
+ desc = "Eaton 3S700D"
~~~
I'll explain the parameters above:
- - `[apc]`: this is the name that will identify your UPS unit in NUT. This can be any string of characters, but **without spaces**. It's recommended you type something there that truly identifies your UPS in a clear manner.
+ - `[eaton]`\
+ This is the name that will identify your UPS unit in NUT. This can be any string of characters, but **without spaces**. It's recommended you type something there that truly identifies your UPS in a clear manner.
- - `driver`: here you indicate NUT how to connect to your UPS unit. For UPS units with their data port plugged in a USB, the correct value is `usbhid-ups`.
+ - `driver`\
+ Here you indicate NUT how to connect to your UPS unit. For UPS units with their data port plugged in a USB, the correct value is `usbhid-ups`.
- - `port`: for an USB connection this has to be left to `auto`.
+ - `port`\
+ For an USB connection this has to be left to `auto`.
- - `desc`: this is just a description string in which you can type the full name and model of your UPS unit, or anything else you might prefer.
+ - `desc`\
+ This is just a descriptive string where you can type the full name and model of your UPS unit, or anything else you might prefer.
-6. At this point, we can check if NUT can start its driver and detect your UPS unit properly.
+6. At this point, we can check if NUT can start its driver and detect your UPS unit properly:
- ~~~bash
+ ~~~sh
$ upsdrvctl start
- Network UPS Tools - UPS driver controller 2.7.4
- Network UPS Tools - Generic HID driver 0.41 (2.7.4)
- USB communication driver 0.33
- Using subdriver: APC HID 0.96
+ Network UPS Tools - UPS driver controller 2.8.1
+ Network UPS Tools - Generic HID driver 0.52 (2.8.1)
+ USB communication driver (libusb 1.0) 0.46
+ Using subdriver: MGE HID 1.46
+ ~~~
+
+ The command tells you what components it's using and, in the last line, also indicates you what driver NUT used to connect to your particular UPS. In this guide's example, you can see it's using the correct MGE "subdriver".
+
+ On the other hand, if you get the following warnings:
+
+ ~~~sh
+ libusb1: Could not open any HID devices: insufficient permissions on everything
+ No matching HID UPS found
+ upsnotify: notify about state 4 with libsystemd: was requested, but not running as a service unit now, will not spam more about it
+ upsnotify: failed to notify about state 4: no notification tech defined, will not spam more about it
+ Driver failed to start (exit status=1)
~~~
- The command tells you what components it's using and, in the last line, also indicates you what driver NUT used to connect to your particular UPS. In this guide's example, you can see it's using the correct APC "subdriver".
+ Just reboot your Proxmox VE server and try the `upsdrvctl start` command again, it should run fine this time.
-7. Next, let's configure the NUT daemon, so edit the file `/etc/nut/upsd.conf`. Again, make a backup first.
+ > [!NOTE]
+ > **Don't worry if you get a "Duplicate driver" warning**\
+ > In the output of the `upsdrvctl start` command you may get a warning like this one:
+ >
+ > `Duplicate driver instance detected (PID file /run/nut/usbhid-ups-eaton.pid exists)! Terminating other driver!`
+ >
+ > It seems that NUT had already created automatically a driver for my UPS unit in the moment I had configured it in the `ups.conf` file. This is not an issue at this point, and NUT itself takes care of this duplicity.
- ~~~bash
+7. Next, let's configure the NUT daemon, so edit the file `/etc/nut/upsd.conf`. Again, make a backup first:
+
+ ~~~sh
$ cp upsd.conf upsd.conf.orig
~~~
- Then, uncomment only the `LISTEN` line referred to the IPv4 connection. It should end looking as follows.
+ Edit the `upsd.conf` file to uncomment only the `LISTEN` line referred to the IPv4 connection. It should end looking as follows.
~~~properties
- # LISTEN
[]
+ # LISTEN []
LISTEN 127.0.0.1 3493
# LISTEN ::1 3493
~~~
The `LISTEN` lines declares on which ports the `upsd` daemon will listen, and provides a basic access control mechanism. Uncomment the IPv6 line when you use this protocol in your network setup.
-8. In NUT there are also users, which are **NOT** the same ones as in the `passwd` file of your `pve` node. At this point, you'll require two user: one for the NUT monitor agent (`upsmon`) and other for acting as a NUT ups administrator(`upsadmin`). To add them, you must edit the file `/etc/nut/upsd.users`. First, back it up.
+8. In NUT there are also users, which are **NOT** the same ones as in the `passwd` file of your `pve` node. At this point, you'll require two user:
+
+ - One for the NUT monitor agent (`upsmon`)
+ - Other for acting as a NUT ups administrator(`upsadmin`).
- ~~~bash
+ To add them, you must edit the file `/etc/nut/upsd.users`. First, back it up:
+
+ ~~~sh
$ cp upsd.users upsd.users.orig
~~~
- Then, **append** the following configuration block.
+ Then, **append** to `upsd.users` the following configuration block:
~~~properties
[upsmon]
password = s3c4R3_p4sSw0rD!
- upsmon master
+ upsmon primary
[upsadm]
password = D1Ff3rEnT_s3c4R3_p4sSw0rD!
@@ -147,19 +187,27 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
The parameters in the lines above mean the following.
- - `[upsmon]`/`[upsadm]` : this is the user's name. It can be any string, **but with no spaces**.
+ - `[upsmon]`/`[upsadm]`\
+ This is the NUT user's name. It can be any string, **but with no spaces**.
- - `password` : the user's password. Please bear in mind that this is an **unencrypted** configuration file, be careful with who can access it.
+ - `password`\
+ The NUT user's password. Please bear in mind that this is an **unencrypted** configuration file, be careful with who can access it.
- - `upsmon master` : roughly speaking, this line says the user is from a machine directly connected to the UPS unit, and that NUT should be run with `master` privileges there.
+ - `upsmon primary`\
+ Roughly speaking, this line means that the NUT user is from a machine directly connected to the UPS unit, and that NUT should be run with high privileges there.
+
+ > [!NOTE]
+ > The UPS primary and secondary types are further explained [in the NUT `upsmon` man documentation](https://networkupstools.org/docs/man/upsmon.html), in the **UPS types** section.
- - `actions = SET` : allows the user to set values on the managed UPS unit.
+ - `actions = SET`\
+ Allows the NUT user to set values on the managed UPS unit.
- - `instcmds = ALL` : allows the user to execute all available instant commands supported by the managed UPS unit.
+ - `instcmds = ALL`\
+ Allows the NUT user to execute all available instant commands supported by the managed UPS unit.
-9. Finally, you need to configure the NUT monitoring service that watches over the UPS unit. The configuration file you have to edit is `/etc/nut/upsmon.conf`, so back it up.
+9. Finally, you need to configure the NUT monitoring service that watches over the UPS unit. The configuration file you have to edit is `/etc/nut/upsmon.conf`, so back it up:
- ~~~bash
+ ~~~sh
$ cp upsmon.conf upsmon.conf.orig
~~~
@@ -187,7 +235,22 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
#
# upsmon runs this command when the system needs to be brought down.
#
- # This should work just about everywhere ... if it doesn't, well, change it.
+ # This should work just about everywhere ... if it doesn't, well, change it,
+ # perhaps to a more complicated custom script.
+ #
+ # Note that while you experiment with the initial setup and want to test how
+ # your configuration reacts to power state changes and ultimately when power
+ # is reported to go critical, but do not want your system to actually turn
+ # off, consider setting the SHUTDOWNCMD temporarily to do something benign -
+ # such as posting a message with 'logger' or 'wall' or 'mailx'. Do be careful
+ # to plug the UPS back into the wall in a timely fashion.
+ #
+ # For Windows setup use something like:
+ # SHUTDOWNCMD "C:\\WINDOWS\\system32\\shutdown.exe -s -t 0"
+ # If you have command line using space character you have to add double quote to them, like this:
+ # SHUTDOWNCMD "\"C:\\Program Files\\some command.bat\" -first_arg -second_arg"
+ # Or use the old DOS 8.3 file name, like this:
+ # SHUTDOWNCMD "C:\\PROGRA~1\\SOMECO~1.bat -first_arg -second_arg"
#SHUTDOWNCMD "/sbin/shutdown -h +0"
~~~
@@ -198,7 +261,7 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
# --------------------------------------------------------------------------
# Customized settings
- MONITOR apc@localhost 1 upsmon s3c4R3_p4sSw0rD! master
+ MONITOR eaton@localhost 1 upsmon s3c4R3_p4sSw0rD! primary
SHUTDOWNCMD "logger -t upsmon.conf \"SHUTDOWNCMD calling /sbin/shutdown to shut down system\" ; /sbin/shutdown -h +0"
NOTIFYMSG ONLINE "UPS %s: On line power."
@@ -226,21 +289,26 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
RBWARNTIME 7200 # 2 hours
~~~
- Here's an explanation for the parameters shown above:
+ Here's an explanation for all the added parameters in the configuration snippet:
- - **`MONITOR`** : this is the line where you must type the name you gave to your UPS unit (`apc` in the example), the username (`upsmon`) and your NUT user's password (`s3c4R3_p4sSw0rD!`). The number `1` indicates the number of power supplies feeding your system through the connected UPS.
+ - `MONITOR`\
+ This is the line where you must specify your UPS unit (`eaton@localhost`), the username (`upsmon`) and your NUT user's password (`s3c4R3_p4sSw0rD!`). The number `1` indicates the number of power supplies feeding your system through the connected UPS.
- - **`SHUTDOWN`** : declares the command that is to be used to shut down the host. In the line shown above, it's prepared to write a log before it executes the system's shut down.
+ - `SHUTDOWNCMD`\
+ Declares the command to be used to shut down the host. In the line shown in the snippet, the command is prepared to write a log before it executes the system's shut down.
- - **`NOTIFYMSG`** : each of these lines are assigning a text message to each NOTIFY event. Within each message, the marker `%s` is replaced by the **name of the UPS** which has produced this event. `upsmon` passes this message to program wall to notify the system administrator of the event.
+ - `NOTIFYMSG`\
+ Each of these lines are assigning a text message to each NOTIFY event. Within each message, the marker `%s` is replaced by the **name of the UPS unit** producing the event. `upsmon` passes this message to program wall to notify the system administrator of the event.
- - **`NOTIFYFLAG`** : each line declare what is to be done at each `NOTIFY` event. Up to three flags can be specified for each event. In the example, all events will be registered in the host's `/var/log/syslog` log file.
+ - `NOTIFYFLAG`\
+ Each line declare what is to be done at each `NOTIFY` event. Up to three flags can be specified for each event. In the example, all events will be registered in the host's `/var/log/syslog` log file.
- - **`RBWARNTIME`** : when an UPS says it needs to have its battery replaced, `upsmon` will generate a `NOTIFY_REPLBATT` event. This line says that this warning will be repeated every this many **seconds**.
+ - `RBWARNTIME`\
+ When an UPS unit says it needs to have its battery replaced, `upsmon` will generate a `NOTIFY_REPLBATT` event. This parameter means that this warning will be repeated every this many **seconds**.
10. Before you can try the configuration, restart the NUT-related services to be sure that they load with the right parameters.
- ~~~bash
+ ~~~sh
$ systemctl restart nut-client.service nut-server.service
~~~
@@ -248,86 +316,112 @@ Assuming your UPS unit has the required USB cable, here's how to proceed with it
11. Check if the NUT services are working properly with your UPS unit.
- - `upscmd -l upsname`: this will list you the instant commands supported by the UPS unit named `upsname` (replace this with the name you gave to your UPS in point 5). Below you can see its output with my UPS unit.
+ - `upscmd -l upsunitname`\
+ Lists the instant commands supported by the UPS unit named `upsunitname` (replace the placeholder string with the name you gave to your UPS back in point 5). Below you can see its output with this guide's UPS unit:
- ~~~bash
- $ upscmd -l apc
- Instant commands supported on UPS [apc]:
+ ~~~sh
+ $ upscmd -l eaton
+ Instant commands supported on UPS [eaton]:
beeper.disable - Disable the UPS beeper
beeper.enable - Enable the UPS beeper
beeper.mute - Temporarily mute the UPS beeper
beeper.off - Obsolete (use beeper.disable or beeper.mute)
beeper.on - Obsolete (use beeper.enable)
+ driver.killpower - Tell the driver daemon to initiate UPS shutdown; should be unlocked with driver.flag.allow_killpower option or variable setting
+ driver.reload - Reload running driver configuration from the file system (only works for changes in some options)
+ driver.reload-or-error - Reload running driver configuration from the file system (only works for changes in some options); return an error if something changed and could not be applied live (so the caller can restart it with new options)
+ driver.reload-or-exit - Reload running driver configuration from the file system (only works for changes in some options); exit the running driver if something changed and could not be applied live (so service management framework can restart it with new options)
load.off - Turn off the load immediately
load.off.delay - Turn off the load with a delay (seconds)
- shutdown.reboot - Shut down the load briefly while rebooting the UPS
+ load.on - Turn on the load immediately
+ load.on.delay - Turn on the load with a delay (seconds)
+ shutdown.return - Turn off the load and return when power is back
+ shutdown.stayoff - Turn off the load and remain off
shutdown.stop - Stop a shutdown in progress
- test.panel.start - Start testing the UPS panel
- test.panel.stop - Stop a UPS panel test
~~~
- - `upsc upsname`: this returns the statistics of the UPS unit specified as `upsname` (again, here you would put your own UPS unit's name). See below the output it returns in my system.
+ - `upsc upsunitname`\
+ Returns the statistics from the UPS unit specified as `upsunitname` (again, replace this string with your own UPS unit's name). See below the output this commands returns for this guide's reference hardware:
- ~~~bash
- $ upsc apc
+ ~~~sh
+ $ upsc eaton
Init SSL without certificate database
battery.charge: 100
- battery.charge.low: 10
- battery.charge.warning: 50
- battery.date: not set
- battery.mfr.date: 2018/04/22
- battery.runtime: 2205
- battery.runtime.low: 120
+ battery.charge.low: 20
+ battery.runtime: 3840
battery.type: PbAc
- battery.voltage: 13.7
- battery.voltage.nominal: 12.0
- device.mfr: APC
- device.model: Back-UPS ES 700G
- device.serial: 5B1816T44974
+ device.mfr: EATON
+ device.model: Eaton 3S 700
+ device.serial: Blank
device.type: ups
+ driver.debug: 0
+ driver.flag.allow_killpower: 0
driver.name: usbhid-ups
driver.parameter.pollfreq: 30
driver.parameter.pollinterval: 2
driver.parameter.port: auto
- driver.parameter.synchronous: no
- driver.version: 2.7.4
- driver.version.data: APC HID 0.96
- driver.version.internal: 0.41
- input.sensitivity: low
- input.transfer.high: 266
- input.transfer.low: 180
- input.voltage: 230.0
- input.voltage.nominal: 230
- ups.beeper.status: disabled
+ driver.parameter.synchronous: auto
+ driver.state: quiet
+ driver.version: 2.8.1
+ driver.version.data: MGE HID 1.46
+ driver.version.internal: 0.52
+ driver.version.usb: libusb-1.0.28 (API: 0x100010a)
+ input.transfer.high: 264
+ input.transfer.low: 184
+ outlet.1.desc: PowerShare Outlet 1
+ outlet.1.id: 1
+ outlet.1.status: on
+ outlet.1.switchable: no
+ outlet.desc: Main Outlet
+ outlet.id: 0
+ outlet.switchable: yes
+ output.frequency.nominal: 50
+ output.voltage: 230.0
+ output.voltage.nominal: 230
+ ups.beeper.status: enabled
ups.delay.shutdown: 20
- ups.firmware: 871.O4 .I
- ups.firmware.aux: O4
- ups.load: 1
- ups.mfr: APC
- ups.mfr.date: 2018/04/22
- ups.model: Back-UPS ES 700G
- ups.productid: 0002
- ups.serial: 5B1816T44974
+ ups.delay.start: 30
+ ups.firmware: 02.08.0010
+ ups.load: 4
+ ups.mfr: EATON
+ ups.model: Eaton 3S 700
+ ups.power.nominal: 700
+ ups.productid: ffff
+ ups.realpower: 22
+ ups.serial: Blank
ups.status: OL
- ups.timer.reboot: 0
ups.timer.shutdown: -1
- ups.vendorid: 051d
+ ups.timer.start: -1
+ ups.type: offline / line interactive
+ ups.vendorid: 0463
~~~
-> **BEWARE!**
-> Don't forget to check the `/var/log/syslog` file to detect any problems with the NUT services!
+## Checking the NUT logs
+
+NUT's services write logs in the journal of the Proxmox VE system. Look out for them with the `journalctl` command:
+
+- `nut-server` log entries from the NUT server.
+- `nut-monitor` log entries from the NUT monitor.
+- `upsd` log entries from the UPS daemon.
-## Executing instant commands on the UPS unit
+Remember to check them out, specially when you have created or modified your NUT configuration. For instance, if NUT cannot connect with your UPS unit, you'll see in your PVE system's journal warning logs from the NUT monitor:
-To execute the so called **instant commands** on your UPS unit, you have to use the command `upscmd` with your **NUT administrator user** defined previously. A safe way to test this is by disabling and enabling the beeper usually embedded in any UPS.
+~~~sh
+Aug 26 10:53:52 pve nut-monitor[6684]: UPS eaton@localhost: Not available.
+Aug 26 10:53:57 pve nut-monitor[6684]: Poll UPS [eaton@localhost] failed - Poll UPS [eaton@localhost] failed - [eaton] does not exist on server localhost
+~~~
+
+## Executing instant commands on your UPS unit
-~~~bash
-$ upscmd apc beeper.disable
+To execute the so called **instant commands** on your UPS unit (wich you can get them with `upscmd -l upsunitname`), you have to use the command `upscmd` with your **NUT administrator user** [defined in the previous section](#connecting-your-ups-with-your-pve-node-using-nut). A safe way to test this is by disabling and enabling the beeper usually embedded in any UPS:
+
+~~~sh
+$ upscmd eaton beeper.disable
Username (root): upsadm
Password:
OK
-$ upscmd apc beeper.enable
+$ upscmd eaton beeper.enable
Username (root): upsadm
Password:
OK
@@ -335,31 +429,32 @@ OK
You can also execute the whole `upscmd` command in just one line:
-~~~bash
-$ upscmd -u upsadm -p D1Ff3rEnT_s3c4R3_p4sSw0rD! apc beeper.disable
+~~~sh
+$ upscmd -u upsadm -p D1Ff3rEnT_s3c4R3_p4sSw0rD! eaton beeper.disable
~~~
-> **BEWARE!**
-> Don't execute the `upscmd` like that in your normal shell, to avoid exposing your password in the shell history (in bash is the `.bash_history` text file). Use this one-line format **only for tasks automatizations in shell scripts**.
+> [!WARNING]
+> **Use this one-line format only for task automation with shell scripts**\
+> Do not execute the `upscmd` with the password in your normal shell, to avoid exposing your password in your user's shell history (the `.bash_history` text file in bash shells).
Also, remember that:
1. You'll have to check out first with `upscmd -l upsunitname` which are the instant commands supported by your UPS unit.
-2. The NUT software doesn't known if the instant command has really been executed on the UPS, it only sees what the UPS unit is answering back in response to the command's request.
+2. The NUT software does not known if the instant command has really been executed on the UPS unit, it only sees what the UPS unit is answering back in response to the command's request.
## Other possibilities with NUT
-If you feel curious about what else you can do with NUT, there's a pdf document that provides a good number of configuration examples. [Get it in this GitHub page](https://github.com/networkupstools/ConfigExamples/releases/tag/book-2.0-20210521-nut-2.7.4).
+If you feel curious about what else you can do with NUT, there's a pdf document that provides a good number of configuration examples. [Get it in this GitHub page](https://github.com/networkupstools/ConfigExamples/releases/tag/book-3.0-20230319-nut-2.8.0).
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/nut`
- `/var/log`
-### _Files_
+### Files
- `/etc/nut/nut.conf`
- `/etc/nut/nut.conf.orig`
@@ -375,14 +470,16 @@ If you feel curious about what else you can do with NUT, there's a pdf document
## References
-### _NUT_
+### [Network UPS Tools (NUT)](https://networkupstools.org/)
+
+- [Hardware compatibility list](https://networkupstools.org/stable-hcl.html)
+- [User manual](https://networkupstools.org/docs/user-manual.chunked/index.html)
+- [Config Examples book (version 3.0)](https://github.com/networkupstools/ConfigExamples/releases/tag/book-3.0-20230319-nut-2.8.0)
+- [Man for `upsmon`](https://networkupstools.org/docs/man/upsmon.html)
-- [NUT (Network UPS Tool)](https://networkupstools.org/)
-- [NUT Hardware compatibility list](https://networkupstools.org/stable-hcl.html)
-- [NUT User manual (chunked)](https://networkupstools.org/docs/user-manual.chunked/index.html)
-- [NUT documentation and scripts](http://rogerprice.org/NUT/)
-- [NUT config examples document on GitHub](https://github.com/networkupstools/ConfigExamples/releases/tag/book-2.0-20210521-nut-2.7.4)
-- [Monitorización de un SAI con GNU/Debian Linux](http://index-of.co.uk/SISTEMAS-OPERATIVOS/NUT%20Debian%20UPS%20Monitor.pdf) (in Spanish)
+### Related NUT contents
+
+- [NUT Configuration Examples](http://rogerprice.org/NUT/)
- [Instalar y configurar NUT por SNMP](https://blog.ichasco.com/instalar-y-configurar-nut-por-snmp/) (in Spanish)
- [Monitoring a UPS with nut on Debian or Ubuntu Linux](https://blog.shadypixel.com/monitoring-a-ups-with-nut-on-debian-or-ubuntu-linux/)
- [Can't get upsmon service started to monitor (and respond to) remote UPS](https://serverfault.com/questions/865147/cant-get-upsmon-service-started-to-monitor-and-respond-to-remote-ups)
@@ -390,6 +487,9 @@ If you feel curious about what else you can do with NUT, there's a pdf document
- [HOWTO: Configure A UPS on Proxmox 5.x](https://diyblindguy.com/howto-configure-ups-on-proxmox/)
- [UPS Server on Raspberry Pi](https://www.reddit.com/r/homelab/comments/5ssb5h/ups_server_on_raspberry_pi/)
- [Configuring NUT for the Eaton 3S UPS on Ubuntu Linux](https://srackham.wordpress.com/2013/02/27/configuring-nut-for-the-eaton-3s-ups-on-ubuntu-linux/)
+
+### About UPS units
+
- [UPS HowTo](https://tldp.org/HOWTO/UPS-HOWTO/)
## Navigation
diff --git a/G005 - Host configuration 03 ~ LVM storage.md b/G005 - Host configuration 03 ~ LVM storage.md
index 0dca763..d8180e6 100644
--- a/G005 - Host configuration 03 ~ LVM storage.md
+++ b/G005 - Host configuration 03 ~ LVM storage.md
@@ -1,67 +1,81 @@
# G005 - Host configuration 03 ~ LVM storage
-After installing Proxmox VE with (almost) default settings, the storage is still not ready since it needs some reorganization.
-
-As a reference, I'll use in this guide my own server's storage setup.
+- [Your Proxmox VE server's storage needs to be reorganized](#your-proxmox-ve-servers-storage-needs-to-be-reorganized)
+- [Initial filesystem configuration (**PVE web console**)](#initial-filesystem-configuration-pve-web-console)
+- [Initial filesystem configuration (**shell as root**)](#initial-filesystem-configuration-shell-as-root)
+ - [Checking the filesystem with `fdisk`](#checking-the-filesystem-with-fdisk)
+ - [Visualizing the filesystem structure with `lsblk`](#visualizing-the-filesystem-structure-with-lsblk)
+ - [Investigating the LVM system with its own set of commands](#investigating-the-lvm-system-with-its-own-set-of-commands)
+- [Configuring the unused storage drives](#configuring-the-unused-storage-drives)
+ - [Seeing the new storage volumes in Proxmox VE's web console](#seeing-the-new-storage-volumes-in-proxmox-ves-web-console)
+- [LVM rearrangement in the main storage drive](#lvm-rearrangement-in-the-main-storage-drive)
+ - [Removing the `data` LVM thin pool](#removing-the-data-lvm-thin-pool)
+ - [Extending the `root` logical volume](#extending-the-root-logical-volume)
+ - [Creating a new partition and a new VG in the unallocated space on the `sda` drive](#creating-a-new-partition-and-a-new-vg-in-the-unallocated-space-on-the-sda-drive)
+- [References](#references)
+ - [Logical Volume Management (LVM)](#logical-volume-management-lvm)
+- [Navigation](#navigation)
+
+## Your Proxmox VE server's storage needs to be reorganized
+
+After installing Proxmox VE with (almost) default settings, the storage is still not ready since it needs some reorganization. As a reminder, the storage available in this guide's reference hardware is the following:
- One internal, 1 TiB, SSD drive, linked to a SATA 2 port.
- One internal, 1 TiB, HDD drive, linked to a SATA 2 port .
- One external, 2 TiB, HDD drive, linked to a USB 3 port.
-Also, remember that...
+Also, keep in mind that:
-- Proxmox VE 7.0 is installed in the SSD drive, but only using 50 GiB of its storage space.
+- The Proxmox VE system is installed in the SSD drive, but only using 63 GiB of its available storage space.
- The filesystem is `ext4`.
-## Initial filesystem configuration (**web console**)
-
-Log in the **web console** as `root`. In a recently installed Proxmox VE node, you'll see that, at the `Datacenter` level, the `Storage` already has an initial configuration.
+## Initial filesystem configuration (**PVE web console**)
-
+Log in the **web console** as `root`. In a recently installed Proxmox VE node, you'll see that, at the `Datacenter` level, the `Storage` already has an initial configuration:
-Meanwhile, at the `pve` **node** level you can see which storage drives you have available in your physical system. Remember, **one node represents one physical server**.
+
-
+Meanwhile, at the `pve` **node** level you can see which storage drives you have available in your physical system. Remember, **one node represents one physical server**:
-Proxmox shows some technical details from each disk, and also about the partitions present on each of them. At this point, only the `/dev/sda` ssd drive has partitions, the ones corresponding to the Proxmox VE installation. Additionally, notice that Proxmox VE is unable to tell what type of device is the `/dev/sdc` one. This is because that's the one connected through the **USB 3** connection, but this means no trouble at all.
+
-On the other hand, be aware that Proxmox VE has installed itself in a LVM structure, but the web console won't show you much information about it.
+Proxmox shows some technical details from each disk, and also about the partitions present on each of them. At this point, only the `/dev/sda` ssd drive has partitions, the ones corresponding to the Proxmox VE installation.
-Go to the `Disks > LVM` option at your **node** level.
+On the other hand, be aware that Proxmox VE has installed itself within a LVM structure, but the web console won't show you much information about it. Go to the `Disks > LVM` option at your **node** level:
-
+
-There's a volume group in your `sda` device which fills most of the space in the SSD drive. But this screen **doesn't show you the complete underlying LVM structure**.
+There is a volume group in your `sda` device which fills most of the space in the SSD drive. But this screen **does not show you the complete underlying LVM structure**.
-In the `Disks > LVM-Thin` screen you can see a bit more information regarding the **LVM-Thin pools** enabled in the system.
+In the `Disks > LVM-Thin` screen you can see a bit more information regarding the **LVM-Thin pools** enabled in the system:
-
+
The LVM thinpool created by the Proxmox VE installation it's called `data` and appears unused. In Proxmox VE, a LVM thinpool is were the disk images for the virtual machines and containers can be stored and grow dynamically.
-Under the unfolded node in the `Datacenter` tree on the left, you can see two leafs. Those leafs are the storages shown at the `Datacenter` level.
-
-Clicking on the `local` one will show you a screen like the following.
+When you unfold the `pve` node under the `Datacenter` tree on the left, you can see three leafs. Two of those leafs are the storages shown at the `Datacenter` level. Clicking on the `local (pve)` one will show you a screen like the following:
-
+
-This page offers several tabs like `Summary`, which is the one shown above. The tabs shown will change depending on the type of storage, and they also depend on what Proxmox VE content types have been enabled on the storage. Notice how the `Usage` of `Storage 'local' on node 'pve'` has a capacity of 12.84 GiB. This is the main system partition, but it's just a portion of the `pve` LVM volume group's total capacity.
+This page offers several tabs like `Summary`, which is the one shown above. The tabs shown will change depending on the type of storage, and they also depend on what Proxmox VE content types have been enabled on the storage. Notice how the `Usage` statistic of the `Storage 'local' on the node 'pve'` indicates a total capacity of 25.71 GiB. This is the main system partition, but it's just a portion of the `pve` LVM volume group's total capacity.
-The rest of the space is assigned to the LVM-Thin pool, which can be seen by browsing into the `local-lvm` leaf.
+The rest of the space is assigned to the LVM-Thin pool, which can be seen by browsing into the `local-lvm (pve)` leaf:
-
+
-The `Storage 'local-lvm' on node 'pve'` shows in `Usage` that this thinpool has a capacity of 18.38 GiB, which is the rest of storage space available in the `pve` volume group (50 GiB), and is still empty.
+The `Storage 'local-lvm' on node 'pve'` shows in `Usage` that this thinpool has a capacity of 17.04 GiB, which is the rest of storage space available in the `pve` volume group (63 GiB minus the 12 GiB of swap space), and is still empty.
-Finally, notice that the 12 GiB swap volume, also existing within the LVM structure, is not shown by the web console.
+Finally, notice that the web console doesn't show the 12 GiB swap volume also existing within the LVM structure.
## Initial filesystem configuration (**shell as root**)
-To have a more complete idea of how the storage is organized in your recently installed Proxmox VE server, get into the shell as `root` (either remotely through **PuTTY** or by using one of the shells provided by the Proxmox web console).
+To have a more complete idea of how the storage is organized in your recently installed Proxmox VE server, get into the shell as `root` (either remotely through a client like **PuTTY** or by using one of the shells provided by the Proxmox web console).
-The first thing to do is check the filesystem structure in your connected storage drives.
+### Checking the filesystem with `fdisk`
-~~~bash
+The first thing to do is to check the filesystem structure in your available storage drives with `fdisk`:
+
+~~~sh
$ fdisk -l
Disk /dev/sda: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors
Disk model: Samsung SSD 860
@@ -69,19 +83,21 @@ Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
-Disk identifier: BB9279D2-E0C0-4514-BC2D-4E074BE2071A
+Disk identifier: DE398227-5837-44FF-9422-173FEFB80BDC
Device Start End Sectors Size Type
/dev/sda1 34 2047 2014 1007K BIOS boot
-/dev/sda2 2048 1050623 1048576 512M EFI System
-/dev/sda3 1050624 104857600 103806977 49.5G Linux LVM
+/dev/sda2 2048 2099199 2097152 1G EFI System
+/dev/sda3 2099200 132120576 130021377 62G Linux LVM
Disk /dev/sdb: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors
-Disk model: ST1000DM003-9YN1
+Disk model: WDC WD10JPVX-22J
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
+Disklabel type: gpt
+Disk identifier: 3E558705-367B-2444-949B-F5B848C14B9F
Disk /dev/mapper/pve-swap: 12 GiB, 12884901888 bytes, 25165824 sectors
@@ -90,82 +106,85 @@ Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
-Disk /dev/mapper/pve-root: 12.25 GiB, 13153337344 bytes, 25690112 sectors
+Disk /dev/mapper/pve-root: 24.5 GiB, 26302480384 bytes, 51372032 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
-Disk /dev/sdc: 1.82 TiB, 2000398931968 bytes, 3907029164 sectors
-Disk model: External
+Disk /dev/sdc: 1.82 TiB, 2000398934016 bytes, 3907029168 sectors
+Disk model: 015-2E8174
Units: sectors of 1 * 512 = 512 bytes
-Sector size (logical/physical): 512 bytes / 512 bytes
-I/O size (minimum/optimal): 512 bytes / 512 bytes
+Sector size (logical/physical): 512 bytes / 4096 bytes
+I/O size (minimum/optimal): 4096 bytes / 4096 bytes
+Disklabel type: gpt
+Disk identifier: 8E9192A6-1F3F-412A-9B0C-0B1D6CE8B414
~~~
-> **BEWARE!**
-> The `fdisk` command **doesn't** return an alphabetically ordered list.
-
-The `sda` device is the 1TiB SSD drive in which Proxmox VE is installed. In it's block, below its list of technical capabilities, you can also see the list of the **real partitions** (the `/dev/sda#` lines) created in it by the Proxmox VE installation. The `sda1` and `sda2` are partitions used essentially for booting the system up, and the `sda3` is the one that contains the whole `pve` LVM volume group for Proxmox VE.
+The `sda` device is the 1TiB SSD drive in which Proxmox VE is installed. In it's block, below its list of technical capabilities, you can also see the list of the **real partitions** (the `/dev/sda#` lines under **Device**) created in it by the Proxmox VE installation. The `sda1` and `sda2` are partitions used essentially for booting the system up, and the `sda3` is the one that contains the whole `pve` LVM volume group for Proxmox VE.
-Below the `sda` information block, you can see the details of the `sdb` and `sdc` storage devices (for instance, `fdisk` correctly recognizes the `sdc` drive as `External`, which is the HDD connected through the USB 3 plug). In this case, both of them are not partitioned at all and, therefore, completely empty. That's why they don't have a listing below each of them like in the case of the `sda` device.
+Below the `sda` information block, you can see the details of the `sdb` and `sdc` storage devices. In this case, both of them are not partitioned at all and, therefore, completely empty. That's why they don't have a listing of devices like the `sda` unit.
-And let's not forget the two remaining devices seen by `fdisk`, one per each LVM **logical volumes** (kind of virtual partitions) present in the system. From the point of view of the `fdisk` command, they are just like any other storage devices, although the command gives a bit less information about them. Still, you can notice how these volumes are mounted on a `/dev/mapper` route instead of hanging directly from `/dev`, this is something related to their _logical_ nature. Also, notice the following regarding the LVM volumes shown in the previous listing:
+And let's not forget the two remaining devices seen by `fdisk`, one per each LVM **logical volumes** (kind of virtual partitions) present in the system. From the point of view of the `fdisk` command, they are just like any other storage device, although the command gives a bit less information about them. Still, you can notice how these volumes are mounted on a `/dev/mapper` route instead of hanging directly from `/dev`, this is something related to their _logical_ nature. Also, notice the following regarding the LVM volumes shown in the previous listing:
- The one called `pve-swap` is the swapping partition, as it names implies.
-- The one called `pve-root` is the one in which the whole Debian 11 system is installed, the so called `/` filesystem.
+- The one called `pve-root` is the one in which the whole Debian system is installed, the so called `/` filesystem.
- There's no mention at all of the **LVM-Thin pool** called `data` you saw in your PVE web console.
-Thanks to the `fdisk` command, now you really have a good picture of what's going on inside your storage drives, but it's still a bit lacking. There are a couple commands more that will help you visualize better the innards of your server's filesystem.
+Thanks to the `fdisk` command, now you really have a good picture of what's going on inside your storage drives, but it is still a bit lacking. There are other commands that will help you visualize better the innards of your server's filesystem.
+
+### Visualizing the filesystem structure with `lsblk`
-The first one is `lsblk`:
+With the `lsblk` command you can see your PVE node's filesystem structure in branch format:
-~~~bash
+~~~sh
$ lsblk
-NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 1007K 0 part
-├─sda2 8:2 0 512M 0 part /boot/efi
-└─sda3 8:3 0 49.5G 0 part
- ├─pve-swap 253:0 0 12G 0 lvm [SWAP]
- ├─pve-root 253:1 0 12.3G 0 lvm /
- ├─pve-data_tmeta 253:2 0 1G 0 lvm
- │ └─pve-data 253:7 0 17.1G 0 lvm
- └─pve-data_tdata 253:3 0 17.1G 0 lvm
- └─pve-data 253:7 0 17.1G 0 lvm
+├─sda2 8:2 0 1G 0 part
+└─sda3 8:3 0 62G 0 part
+ ├─pve-swap 252:0 0 12G 0 lvm [SWAP]
+ ├─pve-root 252:1 0 24.5G 0 lvm /
+ ├─pve-data_tmeta 252:2 0 1G 0 lvm
+ │ └─pve-data 252:4 0 15.9G 0 lvm
+ └─pve-data_tdata 252:3 0 15.9G 0 lvm
+ └─pve-data 252:4 0 15.9G 0 lvm
sdb 8:16 0 931.5G 0 disk
sdc 8:32 0 1.8T 0 disk
~~~
This command not only sees all the physical storage drives available (`TYPE disk`) in the system and their partitions (`TYPE part` that, at this point, are only inside the `sda` device), it also gives information about the LVM filesystem itself (`TYPE lvm`).
-It correctly shows the `pve-swap`, the `pve-root` and their mount points, but also lists the two elements that compose an LVM-Thin pool: its metadata (`pve-data_tmeta`) and the reserved space itself (`pve-data`). Also, the default branch format is really helpful to see where is what, although the `lsblk` command supports a few other output formats as well.
+It correctly shows the `pve-swap`, the `pve-root` and their mount points, but also lists the two elements that compose an LVM-Thin pool: its metadata (`pve-data_tmeta`) and the reserved space itself (`pve-data`). Know that, although the default branch format is really helpful to see where is what, the `lsblk` command supports a few other output formats as well.
-With the two previous commands you get the Linux's point of view, but you also need to know how the LVM system sees itself. For this, LVM has its own set of commands, and one of them is `vgs`.
+### Investigating the LVM system with its own set of commands
-~~~bash
+With the two previous commands you get the Linux's point of view, but you also need to know how the LVM system sees itself. For this, LVM has its own set of commands, and one of them is `vgs`:
+
+~~~sh
$ vgs
VG #PV #LV #SN Attr VSize VFree
- pve 1 3 0 wz--n- <49.50g 6.12g
+ pve 1 3 0 wz--n- <62.00g <7.63g
~~~
-This commands informs about the LVM _volume groups_ present on the system. In the example there's only one called `pve` (also prefix for the light volumes names as in `pve-root`).
+This commands informs about the LVM _volume groups_ present on the system. In the snippet there's only one called `pve` (also prefix for the light volumes names as in `pve-root`).
Another interesting command is `lvs`:
-~~~bash
+~~~sh
$ lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- data pve twi-a-tz-- 17.12g 0.00 1.58
- root pve -wi-ao---- 12.25g
- swap pve -wi-ao---- 12.00g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ data pve twi-a-tz-- 15.87g 0.00 1.58
+ root pve -wi-ao---- <24.50g
+ swap pve -wi-ao---- 12.00g
~~~
-In the output above, the command lists the logical volumes and also the thin pool. Notice how the names (`LV` column) have changed: instead of being listed as `pve-something`, the list shows them indicating their volume group (`pve`) in a different column (`VG`). Also, notice how the real available space in the thin `data` pool is shown properly (17.12 GiB, under the `LSize` column).
+In the output above, the command lists the logical volumes and also the thin pool. Notice how the names (`LV` column) have changed: instead of being listed as `pve-something`, the list shows them indicating their volume group (`pve`) in a different column (`VG`). Also, notice how the real available space in the thin `data` pool is shown properly (15.87 GiB, under the `LSize` column).
-The last command I'll show you here is `vgdisplay`.
+The last command I'll show you here is `vgdisplay`:
-~~~bash
+~~~sh
$ vgdisplay
--- Volume group ---
VG Name pve
@@ -181,34 +200,36 @@ $ vgdisplay
Max PV 0
Cur PV 1
Act PV 1
- VG Size <49.50 GiB
+ VG Size <62.00 GiB
PE Size 4.00 MiB
- Total PE 12671
- Alloc PE / Size 11103 / 43.37 GiB
- Free PE / Size 1568 / 6.12 GiB
- VG UUID z2FKMR-3mDj-p59m-1X8X-Ezgw-CLH5-4sLBou
+ Total PE 15871
+ Alloc PE / Size 13918 / <54.37 GiB
+ Free PE / Size 1953 / <7.63 GiB
+ VG UUID QLLDZO-5nRt-ye9r-1xtq-KBl9-drYB-iTHKRw
~~~
-This command gives back details from the volume groups present in the system. In this case, its output its only related to the sole volume group present now, the `pve` group.
-
-See the different parameters shown in the output, and notice how the commands gives you the count of logical (`LV`) and **physical** (`PV`) volumes present in the group. Bear in mind that a LVM physical volume can be either a **whole** storage drive or just a **real** partition.
+This command outputs details from the volume groups present in the system. In this case, it only shows information from the sole volume group present at this point in the reference setup, the `pve` group.
-Now that you have the whole picture of how the storage setup is organized in a newly installed Proxmox VE server, let's rearrange it to a more convenient one.
+See the different parameters shown in the output, and notice how the commands gives you the count of logical (`LV`) and **physical** (`PV`) volumes present in the group. Bear in mind that a LVM physical volume can be either a **whole** storage drive or just a partition of the drive.
## Configuring the unused storage drives
-Let's begin by making the two empty HDDs' storage available for the LVM system.
+Now that you have the whole picture of how the storage setup is organized in a newly installed Proxmox VE server, let's rearrange it to a more convenient one. Let's begin by making the two empty HDDs' storage available for the LVM system.
-1. First, make a partition on each empty storage device that takes the **whole space** available. You'll do this with `sgdisk`.
+1. First, make a partition on each empty storage device that takes the **whole space** available. You'll do this with `sgdisk`:
- ~~~bash
+ ~~~sh
$ sgdisk -N 1 /dev/sdb
+ Creating new GPT entries in memory.
+ The operation has completed successfully.
$ sgdisk -N 1 /dev/sdc
+ Creating new GPT entries in memory.
+ The operation has completed successfully.
~~~
- The `sgdisk` command may return lines like the following.
+ The `sgdisk` command may return lines like in the following snippets:
- ~~~bash
+ ~~~sh
Warning: Partition table header claims that the size of partition table
entries is 0 bytes, but this program supports only 128-byte entries.
Adjusting accordingly, but partition table may be garbage.
@@ -216,96 +237,95 @@ Let's begin by making the two empty HDDs' storage available for the LVM system.
The operation has completed successfully.
~~~
- Usually you should expect seeing only the last line, but depending on what you has been done to the storage drives previously, you might also see the partition table warning or anything else detected by `sgdisk`.
+ ~~~sh
+ Warning: The kernel is still using the old partition table.
+ The new table will be used at the next reboot or after you
+ run partprobe(8) or kpartx(8)
+ The operation has completed successfully.
+ ~~~
+
+ Usually, you should expect seeing only the last line but, depending on what has been done to the storage drives previously, you may also see some warning about any possible issue detected by `sgdisk`.
-2. Then, you can check the new partitions with `fdisk -l`.
+2. Then, you can check the new partitions with `fdisk -l`:
- ~~~bash
+ ~~~sh
$ fdisk -l /dev/sdb /dev/sdc
Disk /dev/sdb: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors
- Disk model: ST1000DM003-9YN1
+ Disk model: WDC WD10JPVX-22J
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
- Disk identifier: 018E5948-DFFC-42E9-917F-21BEA499603B
+ Disk identifier: 3E558705-367B-2444-949B-F5B848C14B9F
Device Start End Sectors Size Type
/dev/sdb1 2048 1953525134 1953523087 931.5G Linux filesystem
- Disk /dev/sdc: 1.82 TiB, 2000398931968 bytes, 3907029164 sectors
- Disk model: External
+ Disk /dev/sdc: 1.82 TiB, 2000398934016 bytes, 3907029168 sectors
+ Disk model: 015-2E8174
Units: sectors of 1 * 512 = 512 bytes
- Sector size (logical/physical): 512 bytes / 512 bytes
- I/O size (minimum/optimal): 512 bytes / 512 bytes
+ Sector size (logical/physical): 512 bytes / 4096 bytes
+ I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
- Disk identifier: 3DB0F854-3FA0-45DF-9F75-0DF25D369D00
+ Disk identifier: 8E9192A6-1F3F-412A-9B0C-0B1D6CE8B414
Device Start End Sectors Size Type
- /dev/sdc1 2048 3907029130 3907027083 1.8T Linux filesystem
+ /dev/sdc1 2048 3907029134 3907027087 1.8T Linux filesystem
~~~
Remember how the `sdb` and `sdc` devices didn't had a listing under their technical details? Now you see that they have one partition each.
-3. Next, lets create a _physical volume_ (or PV) with each of those new partitions. For this operation, you'll need to use the `pvcreate` command.
+3. Next, lets create a _physical volume_ (or PV) with each of those new partitions. For this operation, you'll need to use the `pvcreate` command:
- > **BEWARE!**
- > The `pvcreate` command will fail if it finds references to a previous LVM structure in the storage drive its trying to turn into a physical volume. So, if pvcreate returns a message like `Can't open /dev/sdb1 exclusively. Mounted filesystem?`, you'll need to remove all the LVM structure that might be lingering in your storage device. [Follow this guide](https://www.thegeekdiary.com/lvm-error-cant-open-devsdx-exclusively-mounted-filesystem/) to know more about this issue.
+ > [!WARNING]
+ > **The `pvcreate` command will fail if it finds references to a previous LVM structure in the storage drive it is trying to turn into a physical volume**\
+ > If `pvcreate` returns a message like `Can't open /dev/sdb1 exclusively. Mounted filesystem?`, you'll need to remove all the LVM structure that might be lingering in your storage device.
+ >
+ > [Follow this guide](https://www.thegeekdiary.com/lvm-error-cant-open-devsdx-exclusively-mounted-filesystem/) to know more about this issue.
- ~~~bash
+ ~~~sh
$ pvcreate --metadatasize 1g -y -ff /dev/sdb1
- Wiping ntfs signature on /dev/sdb1.
- Wiping atari signature on /dev/sdb1.
Physical volume "/dev/sdb1" successfully created.
$ pvcreate --metadatasize 2g -y -ff /dev/sdc1
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
- Wiping zfs_member signature on /dev/sdc1.
Physical volume "/dev/sdc1" successfully created.
~~~
- > **BEWARE!**
- > The `Wiping` lines mean that the command is removing any references, or signatures, of previous filesystems that were used in the storage devices. And, after the signatures wiping, `pvcreate` returns a success message if everything has gone right.
+ > [!NOTE]
+ > **Don't worry about any `Wiping` lines returned by `pvcreate`**\
+ > The `Wiping` lines mean that the command is removing any references, or signatures, of previous filesystems that were used in the storage devices. These lines may look like this:
+ >
+ > ~~~sh
+ > Wiping ntfs signature on /dev/sdb1.
+ > Wiping zfs_member signature on /dev/sdc1.
+ > ~~~
See that, in the commands above, and following the rule of thumb of 1 MiB per 1 GiB, I've assigned 1 GiB on the `sdb1` PV and 2 GiB on the `sdc1` PV as the size for their LVM metadata space. If more is required in the future, this can be reconfigured later.
-4. Check your new physical volumes executing `pvs`.
+4. Check your new physical volumes executing `pvs`:
- ~~~bash
+ ~~~sh
$ pvs
PV VG Fmt Attr PSize PFree
- /dev/sda3 pve lvm2 a-- <49.50g 6.12g
+ /dev/sda3 pve lvm2 a-- <62.00g <7.63g
/dev/sdb1 lvm2 --- 931.51g 931.51g
/dev/sdc1 lvm2 --- <1.82t <1.82t
~~~
See how the new physical volumes, `sdb1` and `sdc1`, appear under the already present one, `sda3`.
-5. The next step is to create a _volume group_ (or VG) for each one of the new PVs. Do this with `vgcreate`.
+5. The next step is to create a _volume group_ (or VG) for each one of the new PVs. Do this with `vgcreate`:
- ~~~bash
+ ~~~sh
$ vgcreate hddint /dev/sdb1
+ Volume group "hddint" successfully created
$ vgcreate hddusb /dev/sdc1
+ Volume group "hddusb" successfully created
~~~
-6. To check the new VGs you can use `vgdisplay`.
+6. To check the new VGs you can use `vgdisplay`:
- ~~~bash
+ ~~~sh
$ vgdisplay
--- Volume group ---
VG Name hddusb
@@ -326,70 +346,70 @@ Let's begin by making the two empty HDDs' storage available for the LVM system.
Total PE 476419
Alloc PE / Size 0 / 0
Free PE / Size 476419 / <1.82 TiB
- VG UUID TCRKW4-r3yN-eE1G-J3gj-Jj0L-252h-tlTXzS
+ VG UUID 0Yf1Dw-Wjsc-oSr4-RDGY-dgzh-USUZ-Wy9Yia
--- Volume group ---
- VG Name hddint
+ VG Name pve
System ID
Format lvm2
Metadata Areas 1
- Metadata Sequence No 1
+ Metadata Sequence No 7
VG Access read/write
VG Status resizable
MAX LV 0
- Cur LV 0
- Open LV 0
+ Cur LV 3
+ Open LV 2
Max PV 0
Cur PV 1
Act PV 1
- VG Size <930.51 GiB
+ VG Size <62.00 GiB
PE Size 4.00 MiB
- Total PE 238210
- Alloc PE / Size 0 / 0
- Free PE / Size 238210 / <930.51 GiB
- VG UUID kxf4wG-iBgs-IZkC-AExc-CKx2-zCuv-WftidX
+ Total PE 15871
+ Alloc PE / Size 13918 / <54.37 GiB
+ Free PE / Size 1953 / <7.63 GiB
+ VG UUID QLLDZO-5nRt-ye9r-1xtq-KBl9-drYB-iTHKRw
--- Volume group ---
- VG Name pve
+ VG Name hddint
System ID
Format lvm2
Metadata Areas 1
- Metadata Sequence No 7
+ Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
- Cur LV 3
- Open LV 2
+ Cur LV 0
+ Open LV 0
Max PV 0
Cur PV 1
Act PV 1
- VG Size <49.50 GiB
+ VG Size <930.51 GiB
PE Size 4.00 MiB
- Total PE 12671
- Alloc PE / Size 11103 / 43.37 GiB
- Free PE / Size 1568 / 6.12 GiB
- VG UUID z2FKMR-3mDj-p59m-1X8X-Ezgw-CLH5-4sLBou
+ Total PE 238210
+ Alloc PE / Size 0 / 0
+ Free PE / Size 238210 / <930.51 GiB
+ VG UUID HABjIK-BEER-NphN-cBML-TwXR-qfXQ-2kBszZ
~~~
- A shorter way of checking the volume groups is with the command `vgs`.
+ A shorter way of checking the volume groups is with the command `vgs`:
- ~~~bash
+ ~~~sh
$ vgs
VG #PV #LV #SN Attr VSize VFree
hddint 1 0 0 wz--n- <930.51g <930.51g
hddusb 1 0 0 wz--n- <1.82t <1.82t
- pve 1 3 0 wz--n- <49.50g 6.12g
+ pve 1 3 0 wz--n- <62.00g <7.63g
~~~
-### _Seeing the new storage volumes on Proxmox VE_
+### Seeing the new storage volumes in Proxmox VE's web console
-If you're wondering if any of this changes appear in the Proxmox web console, just open it and browse to the `pve` node level. There, open the `Disks` screen:
+If you're wondering if any of this changes appear in the Proxmox web console, just open it and browse to the `pve` node level. There, open the `Disks` view:
-
+
-You can see how the `sdb` and `sdc` devices now show their new LVM partitions. And in the `Disks > LVM` section:
+You can see how the `sdb` and `sdc` devices now show their new LVM partitions. Meanwhile, in the `Disks > LVM` section:
-
+
The new volume groups `hddint` and `hddusb` appear above the system's `pve` one (by default, they appear alphabetically ordered by the Volume Group _Name_ column).
@@ -399,88 +419,87 @@ At this point, you have an initial arrangement for your unused storage devices.
The main storage drive, the SSD unit, has an LVM arrangement that is not optimal for the small server you want to build. Let's create a new differentiated storage unit in the SSD drive, one that is not the `pve` volume group used by the Proxmox VE system itself. This way, you'll keep thing separated and reduce the chance of messing anything directly related to your Proxmox VE installation.
-### _Removing the `data` LVM thin pool_
+### Removing the `data` LVM thin pool
-The installation process of Proxmox VE created a LVM-thin pool called `data`, which is part of the `pve` volume group. You want to reclaim this space, so let's remove this `data`:
+The installation process of Proxmox VE created a LVM-thin pool called `data`, which is part of the `pve` volume group. You need to reclaim this space, so let's remove this `data`:
-1. On the Proxmox VE web console, go to the `Datacenter > Storage` option and take our the `local-lvm` volume from the list by selecting it and pressing on the `Remove` button.
+1. On the Proxmox VE web console, go to the `Datacenter > Storage` option and take our the `local-lvm` volume from the list by selecting it and pressing on the `Remove` button:
- 
+ 
- A dialog will ask for your confirmation.
+ A dialog will ask for your confirmation:
- 
+ 
- The volume will be immediately taken out from the storage list.
+ The volume will be immediately taken out from the storage list:
- 
+ 
- This hasn't erased the LVM thinpool volume itself, only made it unavailable for Proxmox VE.
+ This action hasn't erased the LVM thinpool volume itself, only has made it unavailable for Proxmox VE.
2. To remove the `data` thinpool volume, get into the shell as `root` and execute the following.
- ~~~bash
+ ~~~sh
$ lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- data pve twi-a-tz-- 17.12g 0.00 1.58
- root pve -wi-ao---- 12.25g
- swap pve -wi-ao---- 12.00g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ data pve twi-a-tz-- 15.87g 0.00 1.58
+ root pve -wi-ao---- <24.50g
+ swap pve -wi-ao---- 12.00g
~~~
The `data` volume is under the `pve` volume group (`VG`). Knowing this, execute `lvremove` to delete this logical volume (`LV`).
- ~~~bash
+ ~~~sh
$ lvremove pve/data
~~~
Notice how you have to specify the VG before the name of the LV you want to remove. The command will ask you to confirm the removal. Answer with `y`:
- ~~~bash
- Do you really want to remove and DISCARD active logical volume pve/data? [y/n]: y
- Logical volume "data" successfully removed
+ ~~~sh
+ Do you really want to remove active logical volume pve/data? [y/n]: y
+ Logical volume "data" successfully removed.
~~~
3. To verify that the `data` volume doesn't exist anymore, you can check it with the `lvs` command.
- ~~~bash
+ ~~~sh
$ lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root pve -wi-ao---- 12.25g
- swap pve -wi-ao---- 12.00g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ root pve -wi-ao---- <24.50g
+ swap pve -wi-ao---- 12.00g
~~~
- Only two logical volumes are listed now, and with the `pvs` command you can see how much space you have available now in the physical volume where the `data` thin pool was present. In this case it was in the `sda3` unit.
+ Only two logical volumes are listed now, and with the `pvs` command you can see how much space you have available in the physical volume where the `data` thin pool was present. In this case it was in the `sda3` unit.
- ~~~bash
+ ~~~sh
$ pvs
- PV VG Fmt Attr PSize PFree
- /dev/sda3 pve lvm2 a-- <49.50g <25.25g
+ /dev/sda3 pve lvm2 a-- <62.00g 25.50g
/dev/sdb1 hddint lvm2 a-- <930.51g <930.51g
/dev/sdc1 hddusb lvm2 a-- <1.82t <1.82t
~~~
- Also, if you go back to the Proxmox VE web console, you'll see there how the thin pool is not present anymore in your `pve` node's `Disks > LVM-Thin` screen.
+ Also, if you go back to the Proxmox VE web console, you'll see there how the thin pool is not present anymore as a leaf under your `pve` node nor in the `Disks > LVM-Thin` screen.
- 
+ 
-### _Extending the `root` logical volume_
+### Extending the `root` logical volume
Now that you have a lot of free space in the /dev/sda3, let's give more room to your system's `root` volume.
-1. First locate, with `lvs`, where in your system's LVM structure the `root` LV is.
+1. First locate with `lvs` where in your system's LVM structure the `root` LV is:
- ~~~bash
+ ~~~sh
$ lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root pve -wi-ao---- 12.25g
- swap pve -wi-ao---- 12.00g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ root pve -wi-ao---- <24.50g
+ swap pve -wi-ao---- 12.00g
~~~
- It's in the `pve` VG.
+ It is in the `pve` VG.
-2. Now you need to know exactly how much space you have available in the `pve` VG. Use `vgdisplay` to see all the details of this group.
+2. Now you need to know exactly how much space you have available in the `pve` VG. Use `vgdisplay` to see all the details of this group:
- ~~~bash
+ ~~~sh
$ vgdisplay pve
--- Volume group ---
VG Name pve
@@ -496,132 +515,167 @@ Now that you have a lot of free space in the /dev/sda3, let's give more room to
Max PV 0
Cur PV 1
Act PV 1
- VG Size <49.50 GiB
+ VG Size <62.00 GiB
PE Size 4.00 MiB
- Total PE 12671
- Alloc PE / Size 6208 / 24.25 GiB
- Free PE / Size 6463 / <25.25 GiB
- VG UUID z2FKMR-3mDj-p59m-1X8X-Ezgw-CLH5-4sLBou
+ Total PE 15871
+ Alloc PE / Size 9343 / <36.50 GiB
+ Free PE / Size 6528 / 25.50 GiB
+ VG UUID QLLDZO-5nRt-ye9r-1xtq-KBl9-drYB-iTHKRw
~~~
The line you must pay attention to is the `Free PE / Size`. **PE** stands for _Physical Extend_ size, and is the number of extends you have available in the volume group. The previous `Alloc PE` line gives you the _allocated_ extends or storage already in use.
-3. You need to use the `lvextend` command to expand the `root` LV in the free space you have in the `pve` group.
-
- ~~~bash
- $ lvextend -l +6463 -r pve/root
- Size of logical volume pve/root changed from 12.25 GiB (3136 extents) to <37.50 GiB (9599 extents).
+3. You need to use the `lvextend` command to expand the `root` LV in the free space you have in the `pve` group:
+
+ ~~~sh
+ $ lvextend -l +6528 -r pve/root
+ File system ext4 found on pve/root mounted at /.
+ Size of logical volume pve/root changed from <24.50 GiB (6271 extents) to <50.00 GiB (12799 extents).
+ Extending file system ext4 to <50.00 GiB (53682896896 bytes) on pve/root...
+ resize2fs /dev/pve/root
+ resize2fs 1.47.2 (1-Jan-2025)
+ Filesystem at /dev/pve/root is mounted on /; on-line resizing required
+ old_desc_blocks = 4, new_desc_blocks = 7
+ The filesystem on /dev/pve/root is now 13106176 (4k) blocks long.
+
+ resize2fs done
+ Extended file system ext4 on pve/root.
Logical volume pve/root successfully resized.
- resize2fs 1.46.2 (28-Feb-2021)
- Filesystem at /dev/mapper/pve-root is mounted on /; on-line resizing required
- old_desc_blocks = 2, new_desc_blocks = 5
- The filesystem on /dev/mapper/pve-root is now 9829376 (4k) blocks long.
~~~
Two options are specified to the `lvextend` command.
- - `-l +6463` : specifies the number of extents (`+6463`) you want to give to the logical volume.
+ - `-l +6528`\
+ Specifies the number of extents (`+6528`) you want to give to the logical volume.
- - `-r` : tells the `lvextend` command to call the `resizefs` procedure, after extending the volume, to expand the filesystem within the volume over the newly assigned storage space.
+ - `-r`\
+ Orders the `lvextend` command to call the `resizefs` procedure, after extending the volume, to expand the filesystem within the volume over the newly assigned storage space.
-4. As a final verification, check with the commands `lvs`, `vgs`, `pvs` and `df` that the root partition has taken up the entire free space available in the `pve` volume group.
+4. As a final verification, check with the commands `lvs`, `vgs`, `pvs` and `df` that the root partition has taken up the entire free space available in the `pve` volume group:
- ~~~bash
+ ~~~sh
$ lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root pve -wi-ao---- <37.50g
+ root pve -wi-ao---- <50.00g
swap pve -wi-ao---- 12.00g
+ ~~~
+
+ ~~~sh
$ vgs
VG #PV #LV #SN Attr VSize VFree
hddint 1 0 0 wz--n- <930.51g <930.51g
hddusb 1 0 0 wz--n- <1.82t <1.82t
- pve 1 2 0 wz--n- <49.50g 0
+ pve 1 2 0 wz--n- <62.00g 0
+ ~~~
+
+ ~~~sh
$ pvs
PV VG Fmt Attr PSize PFree
- /dev/sda3 pve lvm2 a-- <49.50g 0
+ /dev/sda3 pve lvm2 a-- <62.00g 0
/dev/sdb1 hddint lvm2 a-- <930.51g <930.51g
/dev/sdc1 hddusb lvm2 a-- <1.82t <1.82t
+ ~~~
+
+ ~~~sh
$ df -h
Filesystem Size Used Avail Use% Mounted on
- udev 3.9G 0 3.9G 0% /dev
- tmpfs 785M 996K 784M 1% /run
- /dev/mapper/pve-root 37G 3.5G 32G 10% /
- tmpfs 3.9G 43M 3.8G 2% /dev/shm
+ udev 3.8G 0 3.8G 0% /dev
+ tmpfs 783M 1.3M 782M 1% /run
+ /dev/mapper/pve-root 50G 3.5G 44G 8% /
+ tmpfs 3.9G 46M 3.8G 2% /dev/shm
+ efivarfs 128K 39K 85K 32% /sys/firmware/efi/efivars
tmpfs 5.0M 0 5.0M 0% /run/lock
- /dev/sda2 511M 328K 511M 1% /boot/efi
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/systemd-journald.service
+ tmpfs 3.9G 0 3.9G 0% /tmp
/dev/fuse 128M 16K 128M 1% /etc/pve
- tmpfs 785M 0 785M 0% /run/user/0
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/getty@tty1.service
+ tmpfs 783M 4.0K 783M 1% /run/user/0
~~~
- You'll see that the `root` volume has grown from 12.25 GiB to 37.50 GiB, the `pve` VG doesn't have any empty space (`VFree`) available, the `/dev/sda3` physical volume also doesn't have any space (`PFree`) free either, and that the `/dev/mapper/pve-root` `Size` corresponds to what the `root` LV has free.
+ You'll see that the `root` volume has grown from 24.50 GiB to 50.00 GiB, the `pve` VG does not have any empty space (`VFree`) available, the `/dev/sda3` physical volume also does not have any space (`PFree`) free either, and that the `/dev/mapper/pve-root` `Size` corresponds to what the `root` LV has free.
-### _Creating a new partition and a new VG in the unallocated space on the `sda` drive_
+### Creating a new partition and a new VG in the unallocated space on the `sda` drive
What remains to do is to make usable all the still unallocated space within the `sda` drive. So, let's make a new partition in it.
-1. In a `root` shell, use the following `fdisk` command.
+1. In a `root` shell, use the following `fdisk` command:
- ~~~bash
+ ~~~sh
$ fdisk /dev/sda
- Welcome to fdisk (util-linux 2.36.1).
+ Welcome to fdisk (util-linux 2.41).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
+ This disk is currently in use - repartitioning is probably a bad idea.
+ It's recommended to umount all file systems, and swapoff all swap
+ partitions on this disk.
+
Command (m for help):
~~~
-2. You're in the `fdisk` partition editor now. Be careful what you do here or you might mess your drive up! Input the command `F` to check the empty space available in the `sda` drive. You should see an output like the following.
+ > [!IMPORTANT]
+ > **The `fdisk` command warns you about messing with the `/dev/sda` unit while in use**\
+ > In this particular scenario, this will not be an issue since you are going to create a new partition in the free space available.
+ >
+ > Still, modifying the partition table of an active storage unit is not recommended.
+ >
+ > **Never do this in a real production environment!**
- ~~~bash
+2. You're in the `fdisk` partition editor now. Be careful what you do here or you might mess your PVE node's root filesystem up! Input the command `F` to check the empty space available in the `sda` drive:
+
+ ~~~sh
Command (m for help): F
- Unpartitioned space /dev/sda: 881.51 GiB, 946516729344 bytes, 1848665487 sectors
+
+ Unpartitioned space /dev/sda: 868.51 GiB, 932558085632 bytes, 1821402511 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
Start End Sectors Size
- 104859648 1953525134 1848665487 881.5G
+ 132122624 1953525134 1821402511 868.5G
~~~
-3. With the previous information you're sure now of where the free space begins and ends, and also its size (881.5 GiB in my case). Now you can create a new partition, type `n` as the command.
+3. With the previous information you're sure now of where the free space begins and ends, and also its size (868.5 GiB in my case). Now you can create a new partition, type `n` as the command:
- ~~~bash
+ ~~~sh
Command (m for help): n
Partition number (4-128, default 4):
~~~
- It asks you the number you want for the new partition, and since there are already three other partitions in `sda`, it has to be any number between 4 and 128 (both included). The default value (4) is fine, so **just press enter** on this question.
+ The first thing `fdisk` asks you is the number you want for the new partition. Since there are already three other partitions in `sda`, it has to be any number between 4 and 128 (both included). The default value (4) is fine, so **just press enter** on this question.
-4. The next question `fdisk` asks you is about which `sector` the new `sda4` partition should **start** in your `sda` drive.
+4. The next question `fdisk` asks you is about which `sector` the new `sda4` partition should **start** in your `sda` drive:
- ~~~bash
- First sector (104857601-1953525134, default 104859648):
+ ~~~sh
+ First sector (132120577-1953525134, default 132122624):
~~~
- Notice how the first sector chosen by default is the same one you saw before with the `F` command as the `Start` of the free space. Again, the default value (104859648) is good, since you want the `sda4` partition to start at the very beginning of the available unallocated space. So **press enter** to accept the default value.
+ Notice how the first sector chosen by default is the same one you saw before with the `F` command as the `Start` of the free space. Again, the default value (`132122624`) is good, since you want the `sda4` partition to start at the very beginning of the available unallocated space. Therefore, **press enter** to accept the default value.
-5. The last question is about on which sector the new `sda4` partition has to **end**.
+5. The last question is about on which sector the new `sda4` partition has to **end**:
- ~~~bash
- Last sector, +/-sectors or +/-size{K,M,G,T,P} (104859648-1953525134, default 1953525134):
+ ~~~sh
+ Last sector, +/-sectors or +/-size{K,M,G,T,P} (132122624-1953525134, default 1953523711):
~~~
- Curiously enough, it offers you as a possibility the sector in which your partition is starting. Notice how the default value proposed is the free space's `End`. This value is right what we want so the new `sda4` partition takes all the available free space. **Just press enter** to accept the default value.
+ Curiously enough, it offers you as a possibility the sector in which your partition is starting. Notice how the default value proposed is the free space's `End`. This value is right what you want to make the new `sda4` partition take all the available free space. **Just press enter** to accept the default value.
-6. The `fdisk` program will warn you about the partition's creation and return you to its command line.
+6. The `fdisk` program will warn you about the partition's creation and return you to its command line:
- ~~~bash
- Created a new partition 4 of type 'Linux filesystem' and of size 881.5 GiB.
+ ~~~sh
+ Created a new partition 4 of type 'Linux filesystem' and of size 868.5 GiB.
Command (m for help):
~~~
- Bear in mind that, although `fdisk` says it has done it, in fact **the new partition table is only in memory**. The change still has to be saved in the real `sda`'s partition table.
+ > [!WARNING]
+ > **Although `fdisk` says it has created the partition, be aware that, in fact, the new partition table is only in memory**\
+ > The change still has to be saved in the real `sda`'s partition table.
7. To verify that the partition has been registered, use the command `p` to see the current partition table in `fdisk`'s memory.
- ~~~bash
+ ~~~sh
Command (m for help): p
Disk /dev/sda: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors
Disk model: Samsung SSD 860
@@ -629,30 +683,34 @@ What remains to do is to make usable all the still unallocated space within the
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
- Disk identifier: BB9279D2-E0C0-4514-BC2D-4E074BE2071A
+ Disk identifier: DE398227-5837-44FF-9422-173FEFB80BDC
Device Start End Sectors Size Type
/dev/sda1 34 2047 2014 1007K BIOS boot
- /dev/sda2 2048 1050623 1048576 512M EFI System
- /dev/sda3 1050624 104857600 103806977 49.5G Linux LVM
- /dev/sda4 104859648 1953525134 1848665487 881.5G Linux filesystem
+ /dev/sda2 2048 2099199 2097152 1G EFI System
+ /dev/sda3 2099200 132120576 130021377 62G Linux LVM
+ /dev/sda4 132122624 1953523711 1821401088 868.5G Linux filesystem
~~~
- See how the new `sda4` partition is named correlatively to the other three already existing ones, and takes up the previously unassigned free space (881.8 GiB). Also notice how `fdisk` indicates that the partition is of the `Type Linux filesystem`, not `Linux LVM`.
+ See how the new `sda4` partition is named correlatively to the other three `sda` already existing ones, and takes up the previously unassigned free space (868.5 GiB). Also notice how `fdisk` indicates that the partition is of the `Type Linux filesystem`, not `Linux LVM`.
8. To turn the partition into a Linux LVM type, use the `t` command.
- ~~~bash
+ ~~~sh
Command (m for help): t
Partition number (1-4, default 4):
~~~
- Notice how the command asks you first which partition you want to change, and it also offers by default the newest one (number `4`). In my case, the default value 4 is the correct one, so I just pressed enter here.
+ Notice how the command asks you first which partition you want to change, and it also offers by default the newest one (number `4`). In this case, the default value 4 is the correct one, so press enter here.
-9. The next question is about what type you want to change the partition into. If you don't know the numeric code of the type you want, type `L` on this question and you'll get a long list with all the types available. To exit the types listing press `q` and you'll return to the question.
+9. The next question is about what type you want to change the partition into. If you don't know the numeric code of the type you want, type `L` on this question and you'll get a long list with all the types available. To exit the types listing press `q` and you'll return to the question:
- ~~~bash
- Partition type (type L to list all types): L
+ > [!WARNING]
+ > **The number identifying the type can change between `fdisk` versions!**\
+ > Always check with `L` which number corresponds to the type you want to use.
+
+ ~~~sh
+ Partition type or alias (type L to list all): L
1 EFI System C12A7328-F81F-11D2-BA4B-00A0C93EC93B
2 MBR partition scheme 024DEE41-33E7-11D3-9D69-0008C781F39F
3 Intel Fast Flash D3BFE2DE-3DAF-11DF-BA40-E3A556D89593
@@ -660,49 +718,66 @@ What remains to do is to make usable all the still unallocated space within the
5 Sony boot partition F4019732-066E-4E12-8273-346C5641494F
6 Lenovo boot partition BFBFAFE7-A34F-448A-9A5B-6213EB736C22
7 PowerPC PReP boot 9E1A2D38-C612-4316-AA26-8B49521E5A8B
+ 7 PowerPC PReP boot 9E1A2D38-C612-4316-AA26-8B49521E5A8B
+ 8 ONIE boot 7412F7D5-A156-4B13-81DC-867174929325
+ 8 ONIE boot 7412F7D5-A156-4B13-81DC-867174929325
8 ONIE boot 7412F7D5-A156-4B13-81DC-867174929325
9 ONIE config D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149
- 10 Microsoft reserved E3C9E316-0B5C-4DB8-817D-F92DF00215AE
- 11 Microsoft basic data EBD0A0A2-B9E5-4433-87C0-68B6B72699C7
- 12 Microsoft LDM metadata 5808C8AA-7E8F-42E0-85D2-E1E90434CFB3
- 13 Microsoft LDM data AF9B60A0-1431-4F62-BC68-3311714A69AD
- 14 Windows recovery environment DE94BBA4-06D1-4D40-A16A-BFD50179D6AC
- 15 IBM General Parallel Fs 37AFFC90-EF7D-4E96-91C3-2D7AE055B174
- 16 Microsoft Storage Spaces E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D
- 17 HP-UX data 75894C1E-3AEB-11D3-B7C1-7B03A0000000
- 18 HP-UX service E2A1E728-32E3-11D6-A682-7B03A0000000
- 19 Linux swap 0657FD6D-A4AB-43C4-84E5-0933C84B4F4F
- 20 Linux filesystem 0FC63DAF-8483-4772-8E79-3D69D8477DE4
- 21 Linux server data 3B8F8425-20E0-4F3B-907F-1A25A76F98E8
- 22 Linux root (x86) 44479540-F297-41B2-9AF7-D131D5F0458A
- 23 Linux root (x86-64) 4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709
- 24 Linux root (ARM) 69DAD710-2CE4-4E3C-B16C-21A1D49ABED3
- 25 Linux root (ARM-64) B921B045-1DF0-41C3-AF44-4C6F280D3FAE
- 26 Linux root (IA-64) 993D8D3D-F80E-4225-855A-9DAF8ED7EA97
- 27 Linux reserved 8DA63339-0007-60C0-C436-083AC8230908
- 28 Linux home 933AC7E1-2EB4-4F13-B844-0E14E2AEF915
- 29 Linux RAID A19D880F-05FC-4D3B-A006-743F0F84911E
- 30 Linux LVM E6D6D379-F507-44C2-A23C-238F2A3DF928
+ 10 Microsoft reserved E3C9E316-0B5C-4DB8-817D-F92DF00215AE
+ 11 Microsoft basic data EBD0A0A2-B9E5-4433-87C0-68B6B72699C7
+ 12 Microsoft LDM metadata 5808C8AA-7E8F-42E0-85D2-E1E90434CFB3
+ 13 Microsoft LDM data AF9B60A0-1431-4F62-BC68-3311714A69AD
+ 14 Windows recovery environment DE94BBA4-06D1-4D40-A16A-BFD50179D6AC
+ 15 IBM General Parallel Fs 37AFFC90-EF7D-4E96-91C3-2D7AE055B174
+ 16 Microsoft Storage Spaces E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D
+ 17 HP-UX data 75894C1E-3AEB-11D3-B7C1-7B03A0000000
+ 18 HP-UX service E2A1E728-32E3-11D6-A682-7B03A0000000
+ 19 Linux swap 0657FD6D-A4AB-43C4-84E5-0933C84B4F4F
+ 20 Linux filesystem 0FC63DAF-8483-4772-8E79-3D69D8477DE4
+ 21 Linux server data 3B8F8425-20E0-4F3B-907F-1A25A76F98E8
+ 22 Linux root (x86) 44479540-F297-41B2-9AF7-D131D5F0458A
+ 23 Linux root (x86-64) 4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709
+ 24 Linux root (Alpha) 6523F8AE-3EB1-4E2A-A05A-18B695AE656F
+ 25 Linux root (ARC) D27F46ED-2919-4CB8-BD25-9531F3C16534
+ 26 Linux root (ARM) 69DAD710-2CE4-4E3C-B16C-21A1D49ABED3
+ 27 Linux root (ARM-64) B921B045-1DF0-41C3-AF44-4C6F280D3FAE
+ 28 Linux root (IA-64) 993D8D3D-F80E-4225-855A-9DAF8ED7EA97
+ 29 Linux root (LoongArch-64) 77055800-792C-4F94-B39A-98C91B762BB6
+ 30 Linux root (MIPS-32 LE) 37C58C8A-D913-4156-A25F-48B1B64E07F0
+ 31 Linux root (MIPS-64 LE) 700BDA43-7A34-4507-B179-EEB93D7A7CA3
+ 32 Linux root (HPPA/PARISC) 1AACDB3B-5444-4138-BD9E-E5C2239B2346
+ 33 Linux root (PPC) 1DE3F1EF-FA98-47B5-8DCD-4A860A654D78
+ 34 Linux root (PPC64) 912ADE1D-A839-4913-8964-A10EEE08FBD2
+ 35 Linux root (PPC64LE) C31C45E6-3F39-412E-80FB-4809C4980599
+ 36 Linux root (RISC-V-32) 60D5A7FE-8E7D-435C-B714-3DD8162144E1
+ 37 Linux root (RISC-V-64) 72EC70A6-CF74-40E6-BD49-4BDA08E8F224
+ 38 Linux root (S390) 08A7ACEA-624C-4A20-91E8-6E0FA67D23F9
+ 39 Linux root (S390X) 5EEAD9A9-FE09-4A1E-A1D7-520D00531306
+ 40 Linux root (TILE-Gx) C50CDD70-3862-4CC3-90E1-809A8C93EE2C
+ 41 Linux reserved 8DA63339-0007-60C0-C436-083AC8230908
+ 42 Linux home 933AC7E1-2EB4-4F13-B844-0E14E2AEF915
+ 43 Linux RAID A19D880F-05FC-4D3B-A006-743F0F84911E
+ 44 Linux LVM E6D6D379-F507-44C2-A23C-238F2A3DF928
...
~~~
- When you've have located the type you want, in this case `Linux LVM`, return to the question by pressing `q` and type the type's index number. For the `Linux LVM` is `30`.
+ When you've have located the type you want, in this case `Linux LVM`, return to the question by pressing `q` and type the type's index number. For the `Linux LVM` is `44`:
- ~~~bash
- Partition type (type L to list all types): 30
+ ~~~sh
+ Partition type or alias (type L to list all): 44
~~~
-10. After indicating the type and pressing enter, `fdisk` will indicate you if the change has been done properly and will exit the command.
+10. After indicating the type and pressing enter, `fdisk` will indicate you if the change has been done properly and will exit the command:
- ~~~bash
+ ~~~sh
Changed type of partition 'Linux filesystem' to 'Linux LVM'.
Command (m for help):
~~~
-11. Check again the partition table, with the `p` command, to verify that the change has been done.
+11. To verify that `fdisk` has applied the type change, check the partition table again with the `p` command:
- ~~~bash
+ ~~~sh
Command (m for help): p
Disk /dev/sda: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors
Disk model: Samsung SSD 860
@@ -710,37 +785,37 @@ What remains to do is to make usable all the still unallocated space within the
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
- Disk identifier: BB9279D2-E0C0-4514-BC2D-4E074BE2071A
+ Disk identifier: DE398227-5837-44FF-9422-173FEFB80BDC
Device Start End Sectors Size Type
/dev/sda1 34 2047 2014 1007K BIOS boot
- /dev/sda2 2048 1050623 1048576 512M EFI System
- /dev/sda3 1050624 104857600 103806977 49.5G Linux LVM
- /dev/sda4 104859648 1953525134 1848665487 881.5G Linux LVM
+ /dev/sda2 2048 2099199 2097152 1G EFI System
+ /dev/sda3 2099200 132120576 130021377 62G Linux LVM
+ /dev/sda4 132122624 1953523711 1821401088 868.5G Linux LVM
~~~
-12. Now that you have the new `sda4` partition ready, exit the `fdisk` program with the `w` command. This will write the changes to the `sda` drive's partition table.
+12. Now that you have the new `sda4` partition ready, exit the `fdisk` program with the `w` command. **This will write the changes to the `sda` drive's partition table**:
- ~~~bash
+ ~~~sh
Command (m for help): w
The partition table has been altered.
Syncing disks.
~~~
- See how `fdisk` gives you some output about the update before returning you to the shell.
+ See how `fdisk` gives you some final output about the update before returning you to the shell.
-13. With the `lsblk` command you can see that the `sda4` now appears as another branch of the `sda` tree, right below the `sda3`'s structure.
+13. With the `lsblk` command you can see that the `sda4` now appears as another branch of the `sda` tree, right below the `sda3`'s structure:
- ~~~bash
+ ~~~sh
$ lsblk
- NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+ NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 1007K 0 part
- ├─sda2 8:2 0 512M 0 part /boot/efi
- ├─sda3 8:3 0 49.5G 0 part
- │ ├─pve-swap 253:0 0 12G 0 lvm [SWAP]
- │ └─pve-root 253:1 0 37.5G 0 lvm /
- └─sda4 8:4 0 881.5G 0 part
+ ├─sda2 8:2 0 1G 0 part
+ ├─sda3 8:3 0 62G 0 part
+ │ ├─pve-swap 252:0 0 12G 0 lvm [SWAP]
+ │ └─pve-root 252:1 0 50G 0 lvm /
+ └─sda4 8:4 0 868.5G 0 part
sdb 8:16 0 931.5G 0 disk
└─sdb1 8:17 0 931.5G 0 part
sdc 8:32 0 1.8T 0 disk
@@ -749,75 +824,74 @@ What remains to do is to make usable all the still unallocated space within the
Notice how the `sda4` TYPE is simply indicated as `part`.
-14. Now that the `sda4` partition is ready, let's create a new **LVM physical volume** with it. Use the `pvcreate` command.
+14. Now that the `sda4` partition is ready, let's create a new **physical LVM volume** with it. Use the `pvcreate` command:
- ~~~bash
+ ~~~sh
$ pvcreate /dev/sda4
Physical volume "/dev/sda4" successfully created.
~~~
-15. To verify that `sda4` is now also a PV, you can use the `lvmdiskscan` command.
+15. To verify that `sda4` is now also a PV, you can use the `lvmdiskscan` command:
- ~~~bash
+ ~~~sh
$ lvmdiskscan
- /dev/sda2 [ 512.00 MiB]
- /dev/sda3 [ <49.50 GiB] LVM physical volume
- /dev/sda4 [ 881.51 GiB] LVM physical volume
/dev/sdb1 [ 931.51 GiB] LVM physical volume
+ /dev/sda2 [ 1.00 GiB]
+ /dev/sda3 [ <62.00 GiB] LVM physical volume
/dev/sdc1 [ <1.82 TiB] LVM physical volume
+ /dev/sda4 [ 868.51 GiB] LVM physical volume
0 disks
1 partition
0 LVM physical volume whole disks
4 LVM physical volumes
~~~
- Also, you can execute the `pvs` command to see if the `sda4` PV is there (which should).
+ Also, you can execute the `pvs` command to see if the `sda4` PV is there:
- ~~~bash
+ ~~~sh
$ pvs
PV VG Fmt Attr PSize PFree
- /dev/sda3 pve lvm2 a-- <49.50g 0
- /dev/sda4 lvm2 --- 881.51g 881.51g
+ /dev/sda3 pve lvm2 a-- <62.00g 0
+ /dev/sda4 lvm2 --- 868.51g 868.51g
/dev/sdb1 hddint lvm2 a-- <930.51g <930.51g
/dev/sdc1 hddusb lvm2 a-- <1.82t <1.82t
~~~
-16. What's left to do is to create a **volume group** over the `sda4` PV. For this, execute a `vgcreate` command.
+16. What's left to do is to create a **volume group** over the `sda4` PV. For this, execute a `vgcreate` command:
- ~~~bash
- $ vgcreate ssdint /dev/sda4
+ ~~~sh
+ $ vgcreate ssdint /dev/sda4
Volume group "ssdint" successfully created
~~~
-17. Finally, check with the `vgs` command if this new `ssdint` VG has been really created.
+17. Finally, check with the `vgs` command if this new `ssdint` VG has been really created:
- ~~~bash
+ ~~~sh
$ vgs
VG #PV #LV #SN Attr VSize VFree
hddint 1 0 0 wz--n- <930.51g <930.51g
hddusb 1 0 0 wz--n- <1.82t <1.82t
- pve 1 2 0 wz--n- <49.50g 0
- ssdint 1 0 0 wz--n- <881.51g <881.51g
+ pve 1 2 0 wz--n- <62.00g 0
+ ssdint 1 0 0 wz--n- <868.51g <868.51g
~~~
-With all these steps you've got an independent space in your SSD storage unit, meant to separate the storage used for virtual machines and containers, services and other non-Proxmox VE system related files (like ISO images or container templates) from what's the Proxmox VE system itself.
+With all these steps you've got an independent space in your SSD storage unit, meant to separate the storage used for virtual machines and containers, services and other non-Proxmox VE system related files (like ISO images or container templates) from what is the Proxmox VE system itself.
## References
-### _LVM_
+### Logical Volume Management (LVM)
- [Logical Volume Management Explained on Linux](https://devconnected.com/logical-volume-management-explained-on-linux/)
- [Understanding LVM In Linux (Create Logical Volume) RHEL/CentOS 7&8](https://tekneed.com/understanding-lvm-with-examples-advantages-of-lvm/)
- [RED HAT ENTERPRISE LINUX **8** - CONFIGURING AND MANAGING LOGICAL VOLUMES](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_logical_volumes/index)
-- [Setup Flexible Disk Storage with Logical Volume Management (LVM) in Linux – PART 1](https://www.tecmint.com/create-lvm-storage-in-linux/)
+- [How to Create Disk Storage with Logical Volume Management (LVM) in Linux – PART 1](https://www.tecmint.com/create-lvm-storage-in-linux/)
- [How to Extend/Reduce LVM’s (Logical Volume Management) in Linux – Part II](https://www.tecmint.com/extend-and-reduce-lvms-in-linux/)
- [How to Take ‘Snapshot of Logical Volume and Restore’ in LVM – Part III](https://www.tecmint.com/take-snapshot-of-logical-volume-and-restore-in-lvm/)
- [Setup Thin Provisioning Volumes in Logical Volume Management (LVM) – Part IV](https://www.tecmint.com/setup-thin-provisioning-volumes-in-lvm/)
- [Logical Volume Management in Linux](https://www.techtutsonline.com/logical-volume-management-in-linux/)
-- [Thin Provisioning in LVM2](https://www.theurbanpenguin.com/thin-provisioning-lvm2/)
-- [How to Manage and Use LVM (Logical Volume Management) in Ubuntu](https://www.howtogeek.com/howto/40702/how-to-manage-and-use-lvm-logical-volume-management-in-ubuntu/)
-- [`pvcreate` error : Can’t open /dev/sdx exclusively. Mounted filesystem?](https://www.thegeekdiary.com/lvm-error-cant-open-devsdx-exclusively-mounted-filesystem/)
+- [How to Manage and Use LVM (Logical Volume Management) in Ubuntu](https://www.howtogeek.com/40702/how-to-manage-and-use-lvm-logical-volume-management-in-ubuntu/)
+- [pvcreate error : Can’t open /dev/sdx exclusively. Mounted filesystem?](https://www.ucartz.com/clients/knowledgebase/1376/pvcreate-error--Cant-open-ordevorsdx-exclusively.-Mounted-filesystem.html)
## Navigation
-[<< Previous (**G004. Host configuration 02**)](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G006. Host configuration 04**) >>](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md)
+[<< Previous (**G004. Host configuration 02**)](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G006. Host configuration 04**) >>](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md)
diff --git a/G006 - Host configuration 04 ~ Removing subscription warning.md b/G006 - Host configuration 04 ~ Removing Proxmox's subscription warning.md
similarity index 50%
rename from G006 - Host configuration 04 ~ Removing subscription warning.md
rename to G006 - Host configuration 04 ~ Removing Proxmox's subscription warning.md
index 9f4fc7c..f15dced 100644
--- a/G006 - Host configuration 04 ~ Removing subscription warning.md
+++ b/G006 - Host configuration 04 ~ Removing Proxmox's subscription warning.md
@@ -1,38 +1,55 @@
-# G006 - Host configuration 04 ~ Removing subscription warning
+# G006 - Host configuration 04 ~ Removing Proxmox's subscription warning
-Every time you login into the Proxmox VE web console, you are met with the following warning.
+- [About the Proxmox subscription warning](#about-the-proxmox-subscription-warning)
+- [Removing the subscription warning](#removing-the-subscription-warning)
+- [Reverting the changes](#reverting-the-changes)
+- [Change executed in just one command line](#change-executed-in-just-one-command-line)
+- [Final note](#final-note)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+- [Navigation](#navigation)
-
+## About the Proxmox subscription warning
-This is bothersome, but has an easy solution.
+Every time you log in the Proxmox VE web console, or when you get into the updates section, you are met with the following warning:
-1. Open a root shell and execute the next `cd` command.
+
+
+This is bothersome, but there is a way to make the web console stop showing it.
+
+## Removing the subscription warning
+
+Follow this procedure to remove or disable Proxmox's subscription warning:
+
+1. Open a root shell and `cd` to `/usr/share/javascript/proxmox-widget-toolkit`:
~~~bash
$ cd /usr/share/javascript/proxmox-widget-toolkit
~~~
-2. In that `proxmox-widget-toolkit` directory there's a javascript library file called `proxmoxlib.js`. Make a backup of it.
+2. In that `proxmox-widget-toolkit` directory there's a javascript library file called `proxmoxlib.js`. Make a `.orig` backup of it:
~~~bash
$ cp proxmoxlib.js proxmoxlib.js.orig
~~~
-3. Open the `proxmoxlib.js` file with a proper text editor (vi, vim or nano). Then, in the javascript code, search for the following text.
+3. Open the `proxmoxlib.js` file with a proper text editor (vi, vim or nano). Then, in the javascript code, search for the following text:
~~~js
Ext.Msg.show({
- title: gettext('No valid subscription'),
+ title: gettext('No valid subscription'),
~~~
-4. When you locate it (just search the `No valid subscription` string, its unique in the code), replace `Ext.Msg.show` with `void`, as shown below.
+4. When you locate it (just search the `No valid subscription` string, its unique in the code), replace `Ext.Msg.show` with `void`:
~~~js
void({ //Ext.Msg.show({
- title: gettext('No valid subscription'),
+ title: gettext('No valid subscription'),
~~~
-5. Save the change and exit the editor, then restart the Proxmox web service.
+5. Save the change and exit the editor, then restart the Proxmox VE web service:
~~~bash
$ systemctl restart pveproxy.service
@@ -46,14 +63,15 @@ This is bothersome, but has an easy solution.
If you need to undo the change explained before, you have three options to revert it:
-1. Undoing manually the changes you made in the `proxmoxlib.js`file.
-2. Restoring the backup file you created of the file within the `proxmox-widget-toolkit` directory.
+1. Manually undoing the change you made in the `proxmoxlib.js` file.
+
+2. Restoring the `.orig` backup you created of the file within the `proxmox-widget-toolkit` directory:
~~~bash
$ mv proxmoxlib.js.orig proxmoxlib.js
~~~
-3. Reinstall the proxmox-widget-toolkit package from the repository
+3. Reinstalling the `proxmox-widget-toolkit` package from the repository:
~~~bash
$ apt-get install --reinstall proxmox-widget-toolkit
@@ -64,20 +82,20 @@ If you need to undo the change explained before, you have three options to rever
To do the change in just one (long) command line, just use the following shell command.
~~~bash
-$ sed -Ezi.bkp "s/(Ext.Msg.show\(\{\s+title: gettext\('No valid sub)/void\(\{ \/\/\1/g" /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && systemctl restart pveproxy.service
+$ sed -Ezi.orig "s/(Ext.Msg.show\(\{\s+title: gettext\('No valid sub)/void\(\{ \/\/\1/g" /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js && systemctl restart pveproxy.service
~~~
## Final note
-This fix is known to work on any version starting from Proxmox VE **5.1** up to **7.0-x**. Also, bear in mind that any Proxmox VE update will undo this change and restore the warning, so you'll be force to apply this modification again.
+This fix is known to work on any version starting from Proxmox VE **5.1** up to **9.0**. Bear also in mind that later Proxmox VE updates may undo this change and restore the warning, forcing your to apply this modification again.
## Relevant system paths
-### _Directories_
+### Directories
- `/usr/share/javascript/proxmox-widget-toolkit`
-### _Files_
+### Files
- `/usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js`
diff --git a/G007 - Host hardening 01 ~ TFA authentication.md b/G007 - Host hardening 01 ~ TFA authentication.md
index ea1a572..f97c5dd 100644
--- a/G007 - Host hardening 01 ~ TFA authentication.md
+++ b/G007 - Host hardening 01 ~ TFA authentication.md
@@ -1,70 +1,111 @@
# G007 - Host hardening 01 ~ TFA authentication
-One of the first things you should do to harden your standalone PVE node is to enable the **TFA** (Two Factor Authentication, also known as _2FA_) for all your users, and specially for the `root` user.
+- [Enable Two Factor Authentication in your PVE system](#enable-two-factor-authentication-in-your-pve-system)
+- [Enabling TFA for SSH access](#enabling-tfa-for-ssh-access)
+ - [1. Installation of Google Authenticator's PAM package](#1-installation-of-google-authenticators-pam-package)
+ - [2. Configuration of a Google Authenticator TOTP token](#2-configuration-of-a-google-authenticator-totp-token)
+ - [2.1 Interactive execution of Google Authenticator program](#21-interactive-execution-of-google-authenticator-program)
+ - [2.2 Automated execution of Google authenticator program](#22-automated-execution-of-google-authenticator-program)
+ - [3. File with the generated authentication token configuration](#3-file-with-the-generated-authentication-token-configuration)
+ - [4. Enforcing TFA on remote ssh access](#4-enforcing-tfa-on-remote-ssh-access)
+- [Enforcing TFA TOTP for accessing the Proxmox VE web console](#enforcing-tfa-totp-for-accessing-the-proxmox-ve-web-console)
+- [Enforcing TFA TOTP as a default requirement for `pam` realm](#enforcing-tfa-totp-as-a-default-requirement-for-pam-realm)
+- [Incompatibility of PVE web console login with TFA enforced local shell access](#incompatibility-of-pve-web-console-login-with-tfa-enforced-local-shell-access)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [About TFA and Google Authenticator](#about-tfa-and-google-authenticator)
+ - [Configuring ssh login with password + TOTP](#configuring-ssh-login-with-password--totp)
+ - [About Linux PAM](#about-linux-pam)
+- [Navigation](#navigation)
+
+## Enable Two Factor Authentication in your PVE system
+
+One of the first things you should do to harden your standalone Proxmox VE node is to enable the Two Factor Authentication (**TFA**, also known as _2FA_) for all your users, **and specially for the `root` user**.
## Enabling TFA for SSH access
-First, you must be aware that the Proxmox VE platform provides its own TFA system for login into its web console, but **it doesn't apply for local shell or remote ssh access**. So, in this section you'll see how to enable a TFA token system for ssh and web access, although **not for direct local shell login**.
+First, you must be aware that the Proxmox VE platform provides its own TFA system for login into its web console, but **it does not apply for local shell or remote ssh access**. So, in this section you'll see how to enable a TFA token system valid for both ssh and web access, although **not for direct local shell login**.
-### _1. Installation of Google authenticator's PAM package_
+### 1. Installation of Google Authenticator's PAM package
Open a shell console as `root` (the only user you should have available yet) and execute the `apt` commands below.
- ~~~bash
+ ~~~sh
$ apt update
$ apt install -y libpam-google-authenticator
~~~
-This installs the `google-authenticator` program, which will allow you to generate TFA tokens for your Debian system's users.
+This installs the `google-authenticator` program, enabling you to generate TFA tokens for the users of your PVE server's underlying Debian system.
-### _2. Configuration of a Google authenticator TOTP token_
+### 2. Configuration of a Google Authenticator TOTP token
-At this point, be aware that the Google authenticator program offers two ways of being executed: one is fully interactive and the other is meant to be used in shell scripting. The latter **is not documented** in the related program's `man` page, although it's the one that allows you to set the TFA token configuration in a more precise manner.
+At this point, be aware that the Google Authenticator program offers two ways of being executed: one is fully interactive and the other is meant to be used in shell scripting.
-#### **2.1 Interactive execution of Google authenticator program**
+#### 2.1 Interactive execution of Google Authenticator program
-The interactive process is a step-by-step execution that asks the user to define some parameters for generating a new TFA token. Since those steps don't cover all the possible parameters handled by the command, this interactive method should be used only for a first-contact with the command. In following executions, and specially for shell scripting purposes, **the automated execution** (explained in the next subsection) **should be the one used always**.
+The interactive process is a step-by-step execution that asks the user to define some parameters for generating a new TFA token. Since those steps do not cover all the possible parameters handled by the command, this interactive method should be used only for a first-contact with the command.
+
+> [!IMPORTANT]\
+> **Better use the [automated execution method](#22-automated-execution-of-google-authenticator-program) over this interactive one**\
+> The automated method enables you to use all the options available for generating TFA tokens with the `google-authenticator` program, specially when using it in shell scripting.
To start the interactive process, just launch the google authenticator program as shown below.
- ~~~bash
- $ google-authenticator -Q UTF8
+ ~~~sh
+ $ google-authenticator -Q utf8
~~~
Notice how a parameter `-Q` is specified in the command above.
-- `-Q UTF8` : by default, the QR code generated by the `google-authenticator` command is printed with `ANSI` characters. This usually makes the printed QR too big for your shell screen and impossible to be copied or read by your authenticator app. Therefore, it's better to use the `UTF8` character set, so the QR is printed in a smaller and much more convenient character format.
+- `-Q utf8`\
+ By default, the QR code generated by the `google-authenticator` command is printed with ANSI characters. This usually makes the printed QR too big for your shell screen and impossible to be copied or read by your authenticator app. Therefore, it's better to use Unicode's UTF-8 character set, so the QR is printed in a smaller and much more convenient character format.
The program will ask for your input in the following steps:
-1. **Do you want authentication tokens to be time-based (y/n)**
- Answer `y` here. The time-based token (TOTP) it's the most common and convenient one.
+1. `Do you want authentication tokens to be time-based (y/n)`\
+ Answer `y` here. The time-based token is a _Time-based One-Time Password_, or _TOTP_, and is the most common and convenient token to use in TFA.
-2. In the following step, the `google-authenticator` program will show you a **QR code** and below:
+2. In the following step, the `google-authenticator` program will show you a **QR code** and below it:
- - **Your new secret key is**: this is the secret key associated to this new TFA code.
+ - `Your new secret key is:`\
+ This is the secret key associated to this new TFA code.
- - **Enter code from app (-1 to skip)**: here you can input the code generated by your TFA app to verify that you got the QR right, or just type `-1` to skip this check.
+ - `Enter code from app (-1 to skip):`
+ Here you can input the code generated by your TFA app to verify that you got the QR right, or just type `-1` to skip this check.
Regardless of you validating the TFA token or not, you'll get the emergency scratch codes associated to it. Make a screen capture to get that QR and save it, and also copy all the codes (**secret key**, **verification code** and **emergency scratch codes**) in a safe storage (like a password manager).
-3. **Do you want me to update your "$HOME/.google_authenticator" file? (y/n)**
- Answer `y` so the command writes this authentication token's configuration in a `.google_authenticator` plain text file. By default, the command saves this file at the home directory of your current user, so **notice** that I've replaced with a `$HOME` placeholder the path to the user's home in the question. You could change the path and the file name so you could generate and save different TFA tokens on different files or in different paths.
+3. `Do you want me to update your "/root/.google_authenticator" file? (y/n)`\
+ Answer `y` so the command writes this authentication token's configuration in a `.google_authenticator` plain text file. By default, the command saves this file at the home directory of your current user (in this case, `root`).
+
+ > [!IMPORTANT]
+ > **Answering `n` to this question finishes the interactive procedure**\
+ > If you answer `n` here, the `google-authenticator` command will not ask with the questions shown in the next steps and will return you to the shell prompt.
+
+ > [!NOTE]
+ > **It is possible to use other paths and other file names**\
+ > You can change the path and the file name, allowing you to generate and save different TFA tokens on different files or in different paths.
+ >
+ > This possibility will neither be shown nor used in this guide.
-4. **Do you want to disallow multiple uses of the same authentication token? This restricts you to one login about every 30s, but it increases your chances to notice or even prevent man-in-the-middle attacks (y/n)**
+4. `Do you want to disallow multiple uses of the same authentication token? This restricts you to one login about every 30s, but it increases your chances to notice or even prevent man-in-the-middle attacks (y/n)`\
Answer `y` to limit to one login per 30 seconds the usage of each TOTP token.
-5. **By default, a new token is generated every 30 seconds by the mobile app. In order to compensate for possible time-skew between the client and the server, we allow an extra token before and after the current time. This allows for a time skew of up to 30 seconds between authentication server and client. If you experience problems with poor time synchronization, you can increase the window from its default size of 3 permitted codes (one previous code, the current code, the next code) to 17 permitted codes (the 8 previous codes, the current code, and the 8 next codes). This will permit for a time skew of up to 4 minutes between client and server. Do you want to do so? (y/n)**
- Answer `n` here. This option is for taking into account possible time differences between the server and the authentication app that gives you the timed codes. Nowadays, all systems are in proper sync thanks to time servers on internet.
+5. `By default, a new token is generated every 30 seconds by the mobile app. In order to compensate for possible time-skew between the client and the server, we allow an extra token before and after the current time. This allows for a time skew of up to 30 seconds between authentication server and client. If you experience problems with poor time synchronization, you can increase the window from its default size of 3 permitted codes (one previous code, the current code, the next code) to 17 permitted codes (the 8 previous codes, the current code, and the 8 next codes). This will permit for a time skew of up to 4 minutes between client and server. Do you want to do so? (y/n)`\
+ Answer `n` here. This option is for taking into account possible time differences between the server and the authentication app that gives you the timed codes. Nowadays, all systems are in proper sync thanks to time servers available on internet.
-6. **If the computer that you are logging into isn't hardened against brute-force login attempts, you can enable rate-limiting for the authentication module. By default, this limits attackers to no more than 3 login attempts every 30s. Do you want to enable rate-limiting? (y/n)**
+6. `If the computer that you are logging into isn't hardened against brute-force login attempts, you can enable rate-limiting for the authentication module. By default, this limits attackers to no more than 3 login attempts every 30s. Do you want to enable rate-limiting? (y/n)`\
Answer `y` to this final question. This limits the attempts to input the authentication code to 3 per each 30 seconds.
-#### **2.2 Automated execution of Google authenticator program**
+The resulting configuration is stored by default in the $HOME directory of the current user, in this case the `root` superuser, as a `.google_authenticator` file. Remove the file when you want to regenerate the TOTP token and its associated keys.
-The `google-authenticator` program can be executed with parameters so it can be included in automated procedures. For some reason, this is not documented in the `google-authenticator`'s corresponding `man` page. So, to see what options this command offers, execute `google-authenticator -h` and read it's output.
+#### 2.2 Automated execution of Google authenticator program
-~~~bash
+The `google-authenticator` program can be executed with parameters so it can be included in automated procedures. You can also list those parameters with the `--help` option:
+
+~~~sh
$ google-authenticator -h
google-authenticator []
-h, --help Print this message
@@ -88,281 +129,326 @@ google-authenticator []
-e, --emergency-codes=N Number of emergency codes to generate
~~~
-> **BEWARE!**
-> If you've executed before the command in the interactive mode, remember that you'll already have a configured TOTP token defined in a `.google_authenticator` file within your user's home directory. That file will be overwritten when you execute the `google_authenticator` command again.
+> [!WARNING]
+> **If already executed the `google-authenticator` command in the interactive mode, you'll have a configured TOTP token**\
+> This TOTP token's setup will be stored in a `.google_authenticator` file within your user's home directory.
+>
+> **The file `.google_authenticator` will be overwritten when you execute the `google-authenticator` command again for the same user and location!**.
-Now that you know all the options available in the `google_authenticator` command, you can build a command line to replicate and enhance the TOTP token configuration made in the previously explained interactive method. Check it out below.
+Now that you know all the options available in the `google-authenticator` command, you can build a command line to replicate and enhance the TOTP token configuration made in the previously explained interactive method:
-> **BEWARE!**
-> The following command line is just an example, **don't execute it as is**. Among it's many options there are two, `-l` and `-i` , that you must always edit to fit your requirements.
+> [!IMPORTANT]
+> **The following command line is just an example, DO NOT execute it as is**\
+> Among it's many options there are two, `-l` and `-i` , that you must always edit to fit your requirements.
-~~~bash
-$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i pve.deimos.cloud -l root@pam
+~~~sh
+$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q utf8 -i pve.homelab.cloud -l root@pam
~~~
-Next I'll explain the options set in the example command line above.
+Next are explained the options set in the example command line above:
-- `-t -d -f -r 3 -R 30 -w 3` : these options set up this way configure the TOTP token quite as it was set in the interactive procedure. Check their purpose in the `google-authenticator` command's help.
+- `-t -d -f -r 3 -R 30 -w 3`\
+ These options configure the TOTP token almost like it was set in the [interactive procedure](#21-interactive-execution-of-google-authenticator-program). Check their purpose in the `google-authenticator` command's help (`-h`) or `man` content.
-- `-Q UTF8` : by default, the QR code shown by the `google-authenticator` command is printed with `ANSI` characters. This can make the printed QR too big for your shell window and impossible to be copied or read by your authenticator app. Therefore, it's better to use the `UTF8` character set, so the QR is printed in a smaller but equally valid format.
+- `-Q utf8`\
+ By default, the QR code shown by the `google-authenticator` command is printed with `ANSI` characters. This can make the printed QR too big for your shell window and impossible to be copied or read by your authenticator app. Therefore, it's better to use Unicode's UTF-8 character set, printing the QR in a smaller but equally valid format.
-- `-i pve.deimos.cloud` : the **issuer** is the name of the entity that issued the authentication token. In your authenticator app, it helps you identify to which system corresponds each TOTP code. In this guide it would be the main sole PVE node itself, so this value is filled by default with whatever hostname it has, like `pve`. It's better to put a more descriptive string here, like your full `pve` node's FQDN to avoid confusion.
+- `-i pve.homelab.cloud`\
+ The **issuer** is the name of the entity that issued the authentication token. In your authenticator app, it helps you identify to which system corresponds each TOTP code. In this guide it would be the main sole PVE node itself, so this value is filled by default with whatever hostname it has, like `pve`. It's better to put a more descriptive string here, like your full `pve` node's FQDN to avoid confusion.
-- `-l root@pam` : the **label** is another identifying value shown by authenticator apps for each TOTP code. By default, the `google-authenticator` command fills this label with the username of your current user, plus your system's hostname. Instead of using the hostname, use the user's authentication realm, `pam` in this case, so this label also looks as how the Proxmox VE web console presents the username.
+- `-l root@pam`\
+ The **label** is another identifying value shown by authenticator apps for each TOTP code. By default, the `google-authenticator` command fills this label with the username of your current user, plus your system's hostname. Instead of using the hostname, use the user's authentication realm, `pam` in this case, so this label also looks as how the Proxmox VE web console presents the username.
- 
+ 
When you have set up your command to your preferences, execute it and you'll get an output like the following:
-~~~bash
+~~~sh
Warning: pasting the following URL into your browser exposes the OTP secret to Google:
- https://www.google.com/chart?chs=200x200&chld=M|0&cht=qr&chl=otpauth://totp/root@pam%3Fsecret%BKWG32GWKV4HOTNTQEUYQIYNDE%26issuer%3Dpve.deimos.cloud
-
- █▀▀▀▀▀█ █ █▀█▄▄ ▄ █ ██ ▄▀█▄ █▀▀▀▀▀█
- █ ███ █ ██ ▄█ █▄▄▄ ▄▄▄ ▄█▄ █ ███ █
- █ ▀▀▀ █ ▄ ▀▀▄▄████ ▄▄▀▄▄███ █ ▀▀▀ █
- ▀▀▀▀▀▀▀ ▀ ▀ █▄▀▄▀ ▀▄▀▄▀ █ █ ▀ ▀▀▀▀▀▀▀
- █ ▀▄█▀▄▀▄▀▄ ▄█▄ █ █▄▄▀ ▄▀█▀▀▀ ▀ ▄
- ▀▀██▀ ▀█▀█ █ ▀▀▄▀▀▄ ▀▄█ ▀▄ ▄██▄▄▀ ▀▀
- █▀██ ▄▀ █▀▄ ▄ ▄██▀█▀▄ ▀ █▄█ ▄█▀▀
- ▀▀█▀ ▀▀█▀▄ ██ ███ ▀ █▄█▄▄▀▀▀▄█ ▀ █
- ███ █▀▀██▀█ █▄▄▄ ██▄▄▀▄▄ ▄▄▀▀ █ ██▄
- ▀▀█▀▄█▀▄ ▄▄▀▀▀▀▀██ █▀▄▄▀▄▄▀▀▄█▀ ▄▀█
- ▀ ▄ ▀█▀▀▄█ █▀▀█▄ █▄▄█▄ ███ ▄ ██▀█▀▄
- ▄ █▀█▀▄ ▀█▄ ▀█ █ ▄▄▄█▄▀▄▄▄ █▀▀ ▀▀
- ▀█▄▄█ ▀▀██▀█▄▀ █▄▄ ▀█▄▀ ▀▄▄▀▀▀█▀▀▄▄▄
- ▀▄██▄▀▀ ▀█▄▀█▀▄▄ ▄▀▀▄█▀▀▀▄▀ █▄▀▀ ▀▄▀
- ▀▀ ▀▀▀ ▄ ██ ▀▄▄ ▄▀▀▄▄█▄▀█▀▀▀█▀ ▀▀
- █▀▀▀▀▀█ ▄▀█▀█▀█ ▄█ █▀▄▄▀ ▄▀█ ▀ █▀██▀
- █ ███ █ ▄ █▀ ▀▄▄███▄▀▄ ▀▄█▀ █▀▀███ ▄▀
- █ ▀▀▀ █ ▀ ▄▀▄█▀▄▀▄█▄▄█ ▄███▀ ▄▄█▀ ▀▀
- ▀▀▀▀▀▀▀ ▀ ▀ ▀ ▀ ▀ ▀ ▀▀▀▀▀▀ ▀▀▀
-
-Your new secret key is: BKWG32GWKV4HOTNTQEUYQIYNDE
-Enter code from app (-1 to skip):
+ https://www.google.com/chart?chs=200x200&chld=M|0&cht=qr&chl=otpauth://totp/root@pam%3Fsecret%3DHPDSSXPJ5BFJIXIIZD7EYTMJVM%26issuer%3Dpve.homelab.cloud
+
+ █▀▀▀▀▀█ ▄ ▄ ▄▀▀▄█▀▀ ██ ▄▀█▄ █▀▀▀▀▀█
+ █ ███ █ ▄█▀█▄▀█ ███ ▄▄▄ ▄█▄ █ ███ █
+ █ ▀▀▀ █ ▄ ▀█▀▄▄ ██▄█▄▄▄▀▄▄███ █ ▀▀▀ █
+ ▀▀▀▀▀▀▀ ▀▄▀ █▄█▄█▄▀ ▀▄▀ █ █ ▀ ▀▀▀▀▀▀▀
+ ▀▄ ▀ █▀ █ ▄ █▀█ ▄ ▄█▀ ▄▀█▀▀▀ ▀ ▄▄ ▄
+ ▄▀▀██▀▀█▄▀ ▀▀█▄▀▀██▄▄█ ▀▄ ▄██▄ █ ▀▀
+ ▄▄ █▄▀▄██ █▀▄ ▄▀▄▄ ▄▀▄ ▀ ▀█▄█ ▄█▀▀
+ ██▀█ ▄▀█ █▀▄▄█▀ ▀▄█▄ █ █▄█▄▄▀▀ ▄█ ▀ ▀
+ ▀ ▄▄▀█▀ ▄▄ █▀▄ ▄▄▀ ▀█▀█▄ ▄▄█▀▀█▄█▄▄
+ ▄▄▄ ▀ █▄██▄▀▀▀▀██ ██▀▄▄▀▄ ▀▀▄█ ▀█▀█
+ ▄▀ ▀▄▄▀▀▄▀ ▄▄▀▀██▀▄█▄▀▄ ███ ▄ ██▀▄▀▄
+ █▄ ▄██▀█▀ ▄█▀▄▄█ █ ▄ ▄▀▄▄▀▀█▀▀ ▀█
+ ▀▀▀▀██▀▀▄ █▀█ ▀▄█▀ ███▄ ▀▄▄ █▀▀ █ ▄▄
+ ▀▄▀▀▀█▀▀▄▄█▀▀ █▄██▀▄██▀▀▀▄▀ ▀▄▀ ▀▀▄█
+ ▀▀ ▀▀ ▀ ▄█▀ ██ ▀▄▄▀▄▀▀▄██▄▀█▀▀▀█▀ ▀
+ █▀▀▀▀▀█ ▄ █▀▄ ▀ ██▄ ██ ▄▀ ▄▀█ ▀ █▀███
+ █ ███ █ ▄ ██ ▀█▀█▄▄ ▄█ ▀▄█▀ █▀▀███▄ ▀
+ █ ▀▀▀ █ █ ▀▀█▄█ ▀█ ▄█ ▄███▀▄▄▄█ ▀▀▀
+ ▀▀▀▀▀▀▀ ▀▀▀ ▀ ▀ ▀ ▀ ▀ ▀ ▀▀▀▀▀▀ ▀▀▀
+
+Your new secret key is: HPDSSXPJ5BFJIXIIZD7EYTMJVM
+Enter code from app (-1 to skip):
~~~
See that the command is asking you to verify the key, so scan the QR code with your authentication app and then input the code as the answer to the question (or just press -1 if you want to skip this validation).
-~~~bash
+~~~sh
Enter code from app (-1 to skip): 384525
Code confirmed
Your emergency scratch codes are:
- 26550421
- 52555350
- 88197682
- 80294556
- 15953370
+ 68904426
+ 75567886
+ 58397698
+ 41263689
+ 78071899
~~~
With the code verified, make a snapshot of the QR (or just copy the QR text block itself in a plain UTF-8 text file). Save the **QR**, its **issuer** and **label**, and **all the related codes** in a safe place, like in a password manager.
-### _3. File with the generated authentication token configuration_
+### 3. File with the generated authentication token configuration
-By default, the authentication token generated by the `google-authenticator` command is saved, together with its configuration, in a plain text file called `.google_authenticator`. This file is generated by the command itself (remember how it asked about saving in the file in the interactive procedure) and saved in the **home** directory of your **current user**, but with only a read-only permission (`400` mode) exclusive for your user. It's content will vary depending on how the token has been configured, but it will look something like the following.
+By default, the authentication token generated by the `google-authenticator` command is saved, together with its configuration, in a plain text file called `.google_authenticator`. This file is generated by the command itself and saved in the **home** directory of your **current user**, but with only a read-only permission (`400` mode) exclusive for your user. It's content will vary depending on how the token has been configured, but it will look something like the following.
~~~config
-S4ZTE7Q4GMBYN4XZG3IHKJN4YE
+HPDSSXPJ5BFJIXIIZD7EYTMJVM
" RATE_LIMIT 3 30
" WINDOW_SIZE 3
" DISALLOW_REUSE
" TOTP_AUTH
-26550421
-52555350
-88197682
-80294556
-15953370
+68904426
+75567886
+58397698
+41263689
+78071899
~~~
-- `S4ZTE7Q4GMBYN4XZG3IHKJN4YE`: this is your token's secret code, the one you have to input in your authentication app to get the corresponding TOTP codes.
+- `HPDSSXPJ5BFJIXIIZD7EYTMJVM`\
+ This is your TOTP token's secret code, the one you have to input in your authentication app to get the corresponding TOTP codes.
+
+- All the lines starting with `"` are configuration values for the TOTP token\
+ You could change these manually but, since how to do it is not properly documented, I do not recommend it.
-- All the lines starting with `"` are configuration values for the token. You could change these manually but, since how to do it is not properly documented, I wouldn't recommend it.
+- `68904426`\
+ This number and all the ones listed below it are _scratch recovery codes_. If you lose access to your TFA authentication app, you can use any of these recovery codes as TOTP codes.
-- `26550421`: this number and the ones below are scratch recovery codes, to use in case you lose access to your TFA authentication app.
+ > [!WARNING]
+ > **Scratch recovery codes are one-use only**\
+ > When used once, a scratch recovery code cannot be used again (hence, it has to be _scratched_ from the list of available recovery codes).
-### _4. Enforcing TFA on remote ssh access_
+### 4. Enforcing TFA on remote ssh access
-Now that you have configured the TOTP token for your `root` user (still the only user you have), you can adjust your system's ssh configuration so it enforces the use of the TOTP token when accessing the system remotely.
+With the TOTP token configured for your `root` user (the only user you have at this point yet), you can adjust your system's ssh configuration so it enforces the use of the TOTP token when accessing the system remotely.
1. First, go to the `pam` configuration directory and make a backup of the ssh daemon's configuration file.
- ~~~bash
+ ~~~sh
$ cd /etc/pam.d
$ cp sshd sshd.orig
~~~
2. **Append** to the `sshd` file the following lines to make the ssh daemon aware of the `google-authenticator` method of authentication.
- ~~~bash
+ ~~~sh
# Enforcing TFA with Google Authenticator tokens
auth required pam_google_authenticator.so
~~~
3. Go to the ssh configuration directory and make a backup of the original `sshd_config` file.
- ~~~bash
+ ~~~sh
$ cd /etc/ssh/
$ cp sshd_config sshd_config.orig
~~~
-4. Edit the `sshd_config` as follows.
- - Find the parameter `ChallengeResponseAuthentication` and change it to `yes`.
+4. Edit the `sshd_config` file as follows:
- ~~~bash
- # Change to yes to enable challenge-response passwords (beware issues with
- # some PAM modules and threads)
- ChallengeResponseAuthentication yes
- ~~~
+ - Find the parameter `KbdInteractiveAuthentication` and change it to `yes`.
+
+ ~~~sh
+ # Change to yes to enable challenge-response passwords (beware issues with
+ # some PAM modules and threads)
+ KbdInteractiveAuthentication yes
+ ~~~
- Append the following line to the file.
- ~~~bash
- AuthenticationMethods keyboard-interactive
- ~~~
+ ~~~sh
+ AuthenticationMethods keyboard-interactive
+ ~~~
- This parameter tells ssh which authentication methods are required. In this case, it'll ask for a password and the verification code.
+ This parameter tells ssh which authentication methods are required. In this case, it'll ask for a password and the verification code.
- > **BEWARE!**
- With `keyboard-interactive`, the authentication procedure will ask for all the inputs it requires to validate the login: in this case, the user password and the verification (TOTP) code. Therefore, don't add the `password` method before the `keyboard-interactive` one or the login procedure won't work properly (the `password` method will require the TOTP code, **NOT** the user password).
+ > [!WARNING]
+ > **The keyboard interactive method already asks for all the required authentication inputs**\
+ > With `keyboard-interactive`, the authentication procedure will ask for all the inputs it requires to validate the login. In this case, the user password and the verification (TOTP) code. Therefore, **do not add the `password` method before the `keyboard-interactive` one** or the login procedure won't work properly (the `password` method will require the TOTP code, **NOT** the user password).
5. With all the previous changes done, restart the `sshd` daemon to apply the changes.
- ~~~bash
+ ~~~sh
$ systemctl restart sshd.service
~~~
6. Using a ssh client, like [**PuTTY**](https://www.putty.org/) or [**Bitvise**](https://www.bitvise.com/), check that the server asks you for your user password and your _verification code_ (your TOTP code). Through **PuTTY**, the ssh login for the `root` user will look like the following.
- ~~~bash
- Using username "root".
+ ~~~sh
+ login as: root
Keyboard-interactive authentication prompts from server:
- | Password:
- | Verification code:
+ Password:
+ Verification code:
End of keyboard-interactive prompts from server
~~~
-You have enabled TFA authentication when connecting through SSH to your server for all users. This is better than just using the user password to log in through ssh, but we can improve on this by using ssh key pairs instead of passwords. I'll show you how in a later guide in which I'll explain how to harden the `sshd` configuration.
+You have enabled TFA authentication when connecting through SSH to your server for all users. This is better than just using the user password to log in through ssh, but we can improve on this by using ssh key pairs instead of passwords. I'll show you how in a later chapter where I'll explain how to harden the `sshd` configuration.
## Enforcing TFA TOTP for accessing the Proxmox VE web console
The Proxmox VE web console offers the option of enabling TFA for its users.
-> **BEWARE!**
-> This TFA configuration is only about the web console login, **it has nothing to do** with the local or remote SSH shell terminal access.
+> [!IMPORTANT]
+> **This TFA configuration is only about PVE's web console login**\
+> Proxmox VE's TFA configuration **has nothing to do** with the local or remote SSH shell terminal access.
-To increase the security of your PVE setup, let's enable the **two-factor authentication** (**TFA**) of the main `root` user on the PVE's web console. Also, you'll reuse the **TOTP** token already generated in the previous section with the `google-authenticator` command.
+To increase the security of your PVE setup, let's enable the _Two-Factor Authentication_ (_TFA_) of the main `root` user on the PVE's web console. Also, you'll reuse the TOTP token already generated with the `google-authenticator` command.
-1. Go to the **TFA** option available in the `root@pam` menu list, as shown below.
+> [!WARNING]
+> [Generate your TOTP token with the `google-authenticator` procedure first!](#2-configuration-of-a-google-authenticator-totp-token)
- 
+1. Go to the `TFA` option available in the `root@pam` menu:
-2. The window below offers two tabs. Stick to the one opened by default, the **TOTP** one.
+ 
- 
+2. You'll end up at the `Datacenter\Permissions` level feature called `Two Factor`:
- > **BEWARE!**
- > The **User name** field is the label applied by PVE to the generated TOTP token. Use this value as **label** for generating the TOTP token with the `google-authenticator` procedure explained in the previous section.
+ 
-3. With your previously `google-authenticator` generated TOTP token, input it's secret key in the `Secret` field and the issuer in the `Issuer Name`. The autogenerated QR in this TOTP window **won't be the same** as the one you got previously with the `google-authenticator` command. So, if you scan it, it may generate a new entry in your authenticator app, but **it will generate the same codes** as the TOTP token you generated first.
+3. Unfold the `Two Factor` page's `Add` menu and pick the `TOTP` option:
- > **BEWARE!**
- > Generate your TOTP token with the `google-authenticator` procedure first!
+ 
-4. To validate the TOTP token, input the **Verification Code** given by your TOTP app. After this, you'll have the TFA TOTP mode enabled in the `root` account for the web console with the **same TOTP token** that you have enabled for the remote ssh access.
+4. The `TOTP` option raises the `Add a TOTP login factor` window. Setup in it the TOTP-based TFA procedure for your `root` user:
-> **BEWARE!**
-> This procedure only enables TFA for login in the web console, **not for accessing through a shell terminal**.
+ 
-## Enforcing TFA TOTP as a default requirement for `pam` realm
+5. Fill the fields of the TOTP window [using the details of your previously `google-authenticator` generated TOTP token](#2-configuration-of-a-google-authenticator-totp-token):
+
+ - `User`\
+ Identifies the user affected by this TOTP method. This field corresponds with the `label` you specified in the `google-authenticator` method (`-l` option).
+
+ - `Description`\
+ Just a string describing this TOTP login factor. There's no correspondence with any of the parameters specified in the `google-authenticator` method.
-To ensure that all `pam` users use TFA when accessing the PVE's web console, you can configure that realm to have TFA enforced by default. Select the `Datacenter` level, open the `Permissions` options set and click on `Authentication`.
+ - `Secret`\
+ This is where the secret key must be set. This field corresponds with the secret key the `google-authenticator` command returns as the generated TOTP.
+
+ - `Issuer Name`\
+ The name identifying the issuer of this TOTP. This field is equivalent to the issuer value specified to the `google-authenticator` command (`-i` option).
+
+ > [!WARNING]
+ > **The autogenerated QR in this TOTP window **won't be the same** as the one you got previously with the `google-authenticator` command**\
+ > If you scan it, it may generate a new entry in your authenticator app, but **it will generate the same codes** as the TOTP token you generated first.
+
+6. To enable the `Add` button, **you must validate the TOTP token**. Enter the `Verify Code` given by your TOTP app, then click `Add`. Your new TOTP login factor will appear listed in the `Two Factor` page:
+
+ 
+
+After all these steps, you'll have the TFA TOTP mode enabled in the `root` account for the web console with the **same TOTP token** you enabled for the remote ssh access.
+
+> [!IMPORTANT]
+> This procedure only enables the TFA method for login through Proxmox VE's web console, **not for accessing through a shell terminal**.
+
+## Enforcing TFA TOTP as a default requirement for `pam` realm
-
+To ensure that all `pam` users use TFA when accessing the PVE's web console, you can configure that realm to have TFA enforced by default. Select the `Datacenter` level, open the `Permissions` options set and click on `Realms`.
-The screen you've just reached lists the authentication realms currently available in your PVE installation. Since we're working with a clean Proxmox VE installation, you'll only see two realms there: `pam` and `pve`. The main `root` user of your PVE system is in the `pam` realm.
+
-1. Choose the `pam` realm and press `Edit` in the button menu above. You'll get the following window.
+The screen you've just reached lists the authentication realms currently available in your PVE installation. In a clean Proxmox VE installation like the one used in this guide, you'll only have two realms: `pam` and `pve`. The main `root` user of your PVE system is in the `pam` realm.
- 
+1. Choose the `pam` realm and press `Edit` in the button menu above:
-2. Unfold the `Require TFA` list, currently set as `none`, and you'll see all the options available.
+ 
- 
+2. After pressing the `Edit` button, you will get into the `Edit: Linux PAM` window:
-3. Choose the `OATH/TOTP` option, and notice how the window changes by adding two new parameters related to TOTP: `Time Step` and `Secret Length`.
+ 
- 
+3. Unfold the `Require TFA` list, currently set as `none`, and you'll see all the options available.
-4. Leave those new parameters with their **default values**, they're good as they are. Accept the change to the `pam` realm by pressing `OK`, and you'll get back to the authentication realms list.
+ 
- 
+4. Choose the `OATH/TOTP` option. Notice how in the window appear two new fields related to TOTP: `Time Step` and `Secret Length`.
- Notice how, in the TFA column, the `pam` realm shows the string `oath`. This means that TFA is enforced in that realm with a **OATH/TOTP** token system. From now on, any user in that realm will be required to give a TOTP code, apart from their username and password, to access the PVE web console.
+ 
-### _In Proxmox VE **v7.2-z**, `Authentication` is now `Realms`_
+5. Leave the TOTP parameters with their **default values**, they're good as they are. Accept the change to the `pam` realm by pressing `OK`, and you'll get back to the authentication realms list.
-In the `v7.2-z` releases (and maybe also in the `v7.1-z` ones) of Proxmox VE have the `Permissions > Authentication` section, found at the `Datacenter` level, renamed to the more adequate `Permissions > Realms`.
+ 
-
+ See in the TFA column how the `pam` realm shows the string `oath`. TFA is enforced in that realm with a **OATH/TOTP** token system. From now on, any user in that realm will be required to authenticate with a TOTP code, together with their username and password, to access the PVE web console.
## Incompatibility of PVE web console login with TFA enforced local shell access
At the beginning of this guide, I explicitly stated that I wouldn't enable the TFA token system for the local shell login. This is due to a incompatibility existing between the Proxmox VE web console login and the modification required in the underlying Debian system's `pam` configuration.
-Since this is not documented anywhere as far as I've seen, let me illustrate you the problem in a few steps.
+Since this is not documented anywhere as far as I've seen, I'll illustrate the problem here in a few steps.
1. Open a remote ssh terminal and log in as `root` (again, the only user we have at this point). Then `cd` to the `/etc/pam.d` directory.
- ~~~bash
+ ~~~sh
$ cd /etc/pam.d
~~~
2. The file to edit is `common-auth`, so first let's make a backup of it.
- ~~~bash
- $ cp common-auth common-auth.bkp
+ ~~~sh
+ $ cp common-auth common-auth.orig
~~~
3. **Append** to the `common-auth` file the following line.
- ~~~bash
+ ~~~sh
auth required pam_google_authenticator.so
~~~
- > **BEWARE!**
- > Don't close the remote session, and leave the file open but be sure to save the change above.
+ > [!WARNING]
+ > **Do not close the remote session**\
+ > Also, leave the `common-auth` file open but be sure to save the change above.
4. Now try to log in the PVE web console. You'll see how the login takes longer than usual.
- 
+ 
5. After a few seconds, the login will raise a failure error.
- 
+ 
- Notice that you won't even reach the TFA form to input your TOTP code. PVE just raises this error window and doesn't go further than that.
+ Notice that you won't even reach the TFA form to input your TOTP code. PVE just raises this error window and does not allow you to go further than that.
6. Go back to your remote terminal session. Remove the line you added to the `common-auth` file and save the change, or just restore the backup.
7. Try to log into the web console again. It should work as usual now.
-This conflict is not really that surprising. Proxmox VE web console needs some privileged access to its underlying Debian system, and adding the TFA restriction in the `pam` configuration for local authorization messes up with it.
+This conflict is not really that surprising. Proxmox VE web console needs some privileged access to its underlying Debian system, and adding the TFA restriction in the `pam` configuration for local authorization may mess up with it.
-Probably, this is just a problem of limiting the TFA restriction to the users and groups that should have it enforced. Still, this problem is not documented anywhere and messing with the system's PAM configuration is risky. Therefore it's better to leave it as it is, although being aware that the local access to the physical server **doesn't** have TFA enabled.
+This is probably just a problem of limiting the TFA restriction to the users and groups that should have it enforced. Still, this problem is not documented anywhere and messing with the system's PAM configuration is risky. Therefore better leave it as it is, although being aware that the local access to the physical server **does not** have TFA enabled.
-> **BEWARE!**
-> This problem not only happens in the **7.0-14** version of Proxmox Virtual Environment, but also in previous versions too.
+> [!IMPORTANT]
+> **This problem not only happens in the **9.0** releases of Proxmox Virtual Environment**\
+> I have seen this problem happening in previous versions too.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/pam.d`
- `/etc/ssh`
- `/root`
-### _Files_
+### Files
+- `/etc/pam.d/common-auth`
+- `/etc/pam.d/common-auth.orig`
- `/etc/pam.d/sshd`
- `/etc/pam.d/sshd.orig`
- `/etc/ssh/sshd_config`
@@ -371,30 +457,27 @@ Probably, this is just a problem of limiting the TFA restriction to the users an
## References
-### _About TFA_
+### About TFA and Google Authenticator
- [How To Set Up Multi-Factor Authentication for ssh on Ubuntu 20.04](https://www.digitalocean.com/community/tutorials/how-to-set-up-multi-factor-authentication-for-ssh-on-ubuntu-20-04)
- [How to set up two-factor authentication in Linux](https://www.techrepublic.com/article/how-to-setup-two-factor-authentication-in-linux/)
- [How to add two-factor authentication to Linux with Google Authenticator](https://www.techradar.com/how-to/how-to-add-two-factor-authentication-to-linux-with-google-authenticator)
- [How To Set Up Multi-Factor Authentication for ssh on Ubuntu 16.04](https://www.digitalocean.com/community/tutorials/how-to-set-up-multi-factor-authentication-for-ssh-on-ubuntu-16-04)
-- [How to Set Up Two-Factor Authentication in Ubuntu?](https://www.ultimatetech.org/set-two-factor-authentication-ubuntu/)
-- [Proxmox VE User Management Two-factor authentication](https://pve.proxmox.com/wiki/User_Management#_two_factor_authentication)
-- [Proxmox VE OATH(TOTP) Authentication](https://pve.proxmox.com/wiki/OATH(TOTP)_Authentication)
+- [Proxmox VE User Management Two-factor authentication](https://pve.proxmox.com/wiki/User_Management#pveum_tfa_auth)
- [Set up two-factor authentication by using Google Authenticator](https://docs.rackspace.com/support/how-to/set-up-two-factor-authentication-by-using-google-authenticator/)
-- [Google Authenticator OpenSource](https://github.com/google/google-authenticator)
+- [Google Authenticator OpenSource (archived)](https://github.com/google/google-authenticator)
-### _Configuring ssh login with password + TOTP_
+### Configuring ssh login with password + TOTP
- [SSHD Denies access with password + google-authenticator combo](https://unix.stackexchange.com/questions/513011/sshd-denies-access-with-password-google-authenticator-combo)
-### _About PAM_
+### About Linux PAM
-- [The Linux-PAM System Administrators' Guide](http://linux-pam.org/Linux-PAM-html/Linux-PAM_SAG.html)
+- [Linux PAM (Pluggable Authentication Modules for Linux) project](https://github.com/linux-pam/linux-pam)
- [Debian Reference. 4.5. PAM and NSS](https://www.debian.org/doc/manuals/debian-reference/ch04.en.html#_pam_and_nss)
- [How to Configure and Use PAM in Linux](https://www.tecmint.com/configure-pam-in-centos-ubuntu-linux/)
-- [Enhance Linux Authentication with Linux-PAM](https://likegeeks.com/linux-pam-easy-guide/)
- [Autenticación PAM – Redes PYMES (in Spanish)](https://blog.desdelinux.net/autenticacion-pam-redes-pymes/)
## Navigation
-[<< Previous (**G006. Host configuration 04**)](G006%20-%20Host%20configuration%2004%20~%20Removing%20subscription%20warning.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G008. Host hardening 02**) >>](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md)
+[<< Previous (**G006. Host configuration 04**)](G006%20-%20Host%20configuration%2004%20~%20Removing%20Proxmox's%20subscription%20warning.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G008. Host hardening 02**) >>](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md)
diff --git a/G008 - Host hardening 02 ~ Alternative administrator user.md b/G008 - Host hardening 02 ~ Alternative administrator user.md
index 804c395..02eb656 100644
--- a/G008 - Host hardening 02 ~ Alternative administrator user.md
+++ b/G008 - Host hardening 02 ~ Alternative administrator user.md
@@ -1,63 +1,82 @@
# G008 - Host hardening 02 ~ Alternative administrator user
-In the previous guides, you've been using the `root` user to do all the set up in your Proxmox VE system. That's fine for earlier configuration steps, but it's not recommended to keep on using it as your server's everyday administrator user.
-
-Since `root` is **the superuser with all the privileges**, using it directly on any Linux server has always the potential for creating all sorts of problems (security related or of any other kind). To mitigate those problems, the recommended thing to do is to create an alternative administrator user with `sudo` privileges and use it instead of `root`.
+- [Avoid using the root user](#avoid-using-the-root-user)
+- [Understanding the Proxmox VE user management and the realms](#understanding-the-proxmox-ve-user-management-and-the-realms)
+ - [Always create your `pam` users at the OS level first](#always-create-your-pam-users-at-the-os-level-first)
+- [Creating a new system administrator user for a Proxmox VE node](#creating-a-new-system-administrator-user-for-a-proxmox-ve-node)
+ - [Creating the user with sudo privileges in Debian](#creating-the-user-with-sudo-privileges-in-debian)
+ - [Assigning a TOTP code to the new user](#assigning-a-totp-code-to-the-new-user)
+ - [Testing `sudo` with the new administrator user](#testing-sudo-with-the-new-administrator-user)
+ - [Creating a system administrator group in Proxmox VE](#creating-a-system-administrator-group-in-proxmox-ve)
+ - [Enabling the new administrator user in Proxmox VE](#enabling-the-new-administrator-user-in-proxmox-ve)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [PAM](#pam)
+- [Navigation](#navigation)
+
+## Avoid using the root user
+
+In the previous chapters, you've been using the `root` user to work in your Proxmox VE system's setup. That's fine for earlier configuration steps, but it is absolutely not recommended to keep on using it as your server's everyday administrator user.
+
+Since `root` is **the superuser with all the privileges**, using it directly on any Linux server risks inducing all sorts of security or other issues. To mitigate those potential problems, is better to create an alternative administrator user with `sudo` privileges and use it instead of `root`.
## Understanding the Proxmox VE user management and the realms
-Proxmox VE can work with up to five authentication realms: pam, Proxmox VE authentication server, LDAP, Microsoft Active Directory and OpenId Connect. The last four can be considered external and shared among all the nodes of a PVE cluster. But what about the remaining `pam`?
+Proxmox VE can work with up to five authentication realms: pam, Proxmox VE authentication server, LDAP, Microsoft Active Directory and OpenID Connect. The last four can be considered external and shared among all the nodes of a PVE cluster. But what about the remaining `pam`?
+
+The `pam`, which stands for **Pluggable Authentication Module**, is the standard authentication system for any Linux distribution, including the Debian one running your standalone PVE node. But **this `pam` realm is strictly local**, bounded just to the node itself, and **not shared** in any way with any other computer.
-The `pam`, which stands for **Pluggable Authentication Module**, is the standard authentication system for any Linux distribution, including the Debian 11 running your standalone PVE node. But this realm is **strictly local**, bounded just to the node itself, and **not shared** in any way with any other computer.
+Is in the `pam` realm of the PVE node where the `root` user exists, like in any other Linux distribution. This same user is also registered within the user management of your Proxmox VE, and it is the only user you have initially to log in the Proxmox VE web console. This means that the `root` user was created first in the `pam` realm, and then linked to your node's Proxmox VE authentication system by the installation process.
-Is in the `pam` realm of the PVE node where the `root` user exists, like in any other Linux distribution. But this user is also registered within the user management of your Proxmox VE, and it's the only user you have initially to log in the Proxmox VE web console. This means that the `root` user was created first in the `pam` realm, and then linked to your node's Proxmox VE authentication system by the installation process.
+Let me show you how to find the user management screen in your PVE's web console. It is available at the `Datacenter` level, as an option called `Users` under `Permissions`.
-At this point, let me show you how to find the user management screen in your PVE's web console. It's at the `Datacenter` level, as an option called `Users` under `Permissions`.
+
-
+So, if all the PVE's user management is handled at the `Datacenter` level, does that mean that PVE takes care somehow of syncing the `pam` realm among the nodes of a cluster (in case you were working on one)? Short answer, no. Changes in the `palm` realm only apply to the node you logged in. Also, from what I've tested, if you create a user directly from the PVE's `Users` page and assign it to the `pam` realm, **it won't create the user at the Debian OS level**.
-So, if all the PVE's user management is handled at the `Datacenter` level, does that mean that PVE takes care somehow of syncing the `pam` realm among the nodes of a cluster (in case we were working on one)? Short answer, _it's not documented_. From what I've seen, if you create a user directly from the PVE's user management and assign it to the `pam` realm, **it won't create the user at the Debian OS level**.
+### Always create your `pam` users at the OS level first
-I infer that creating a user in the `pam` realm in Proxmox VE always implies two basic steps:
+In conclusion, creating a `pam` realm's user in Proxmox VE always implies two basic steps:
1. **Creating the user** directly in the node at the Debian OS level.
-2. **Enabling it in the Proxmox VE user management**, either through the web console or by the shell commands PVE also provides for this and other administrative tasks.
+2. **Enabling it in the Proxmox VE user management**, either through the web console or by the shell commands Proxmox VE also provides for this and other administrative tasks.
-With just one standalone node, creating one or two very particular system users is no big deal. In a cluster, you would need to automate this with some shell scripting or other tools.
+> [!NOTE]
+> **With just one standalone node, creating one or two very particular system users is no big deal**\
+> However, in a cluster you would need to automate this via shell scripting or some other tools.
## Creating a new system administrator user for a Proxmox VE node
-Here you're about to create an alternative, and a bit safer, administrator user for your system to use instead of `root`.
+Here you are about to create an alternative, and a bit safer, administrator user for your system to use instead of `root`.
-In a normal Linux-based server, you would just create a standard user and then give it `sudo` privileges. But such user also has to hold a certain role and concrete privileges within your Proxmox VE platform, which are security concerns at the PVE's `Datacenter` level, not of any particular node.
+In a normal Linux-based server, you would just create a standard user and then give it `sudo` privileges. But such user also has to hold a certain role and concrete privileges within your Proxmox VE platform. Those privileges are security concerns at the PVE's `Datacenter` level, not just of any particular PVE node.
So, you'll need to perform a number of steps to create a new administrative user in your Proxmox VE's `pam` realm.
-### _Creating the user with sudo privileges in Debian_
+### Creating the user with sudo privileges in Debian
-1. Open a terminal as `root`. Then create a user called, for instance, `mgrsys` with the `adduser` command.
+1. Open a terminal as `root`. Then create a user called, for instance, `mgrsys` with the `adduser` command:
- ~~~bash
+ ~~~sh
$ adduser mgrsys
~~~
- > **BEWARE!**
- > Use a criteria for naming your users, and make those names personalized. This way you'll have a better chance to detect any strange behavior related to your users in the system's logs. For instance, you could follow a pattern like `[role][initials]` or `[role][name][surname]`.
+ > [!IMPORTANT]
+ > **Stick to a naming criteria for your users!**\
+ > Use a criteria for naming your users, and make those names individualized. This way you'll have a better chance to detect any strange behavior related to your users in the system's logs. For instance, you could follow a pattern like `[role][initials]` or `[role][name][surname]`.
- The command will ask you the password for the new user first, and then a few informative details like the user's full name. The output will be something like this.
+ The command will ask you the password for the new user first, and then a few informative details like the user's full name. The whole output should be something like this:
- ~~~bash
- Adding user `mgrsys' ...
- Adding new group `mgrsys' (1000) ...
- Adding new user `mgrsys' (1000) with group `mgrsys' ...
- Creating home directory `/home/mgrsys' ...
- Copying files from `/etc/skel' ...
+ ~~~sh
New password:
Retype new password:
passwd: password updated successfully
Changing the user information for mgrsys
Enter the new value, or press ENTER for the default
- Full Name []: PVE System Manager
+ Full Name []: PVE system's manager
Room Number []:
Work Phone []:
Home Phone []:
@@ -65,84 +84,80 @@ So, you'll need to perform a number of steps to create a new administrative user
Is the information correct? [Y/n]
~~~
-2. Add the new user to the `sudo` group.
+2. Add the new user to the `sudo` group:
- > **BEWARE!**
- > Before you proceed with this step, be sure of having the `sudo` package installed in your PVE node:
- `apt install sudo`
+ > [!WARNING]
+ > **Ensure you have `sudo` installed in your PVE node**\
+ > Before you proceed with this step, be sure of having the `sudo` package installed in your PVE node:
+ >
+ > ~~~sh
+ > $ apt install sudo
+ > ~~~
- ~~~bash
+ ~~~sh
$ adduser mgrsys sudo
Adding user `mgrsys' to group `sudo' ...
Adding user mgrsys to group sudo
Done.
~~~
-### _Assigning a TOTP code to the new user_
+### Assigning a TOTP code to the new user
-1. Switch to your new user by using the `su` command, and go to its `$HOME` directory.
+1. Switch to your new user by using the `su` command, and go to its `$HOME` directory:
- ~~~bash
+ ~~~sh
$ su mgrsys
$ cd
~~~
2. Create a TOTP token for the new user with the `google-authenticator` program. Use the automated method with a command like the following:
- ~~~bash
- $ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i pve.deimos.cloud -l mgrsys@pam
+ ~~~sh
+ $ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q utf8 -i pve.homelab.cloud -l mgrsys@pam
~~~
- > **BEWARE!**
+ > [!NOTE]
+ > **Do not forget the `@pam` suffix in your TOTP token's label!**\
> Notice how the label (`-l`) has an `@pam` suffix after the username, like it is with `root`.
3. Copy all the codes given by the `google-authenticator` command in a safe location, like a password manager.
-### _Testing `sudo` with the new administrator user_
+### Testing `sudo` with the new administrator user
After you've checked that your new administrator user can connect through ssh, make a simple test to see if this user has sudo privileges. For instance, you could try to execute a `ls` with `sudo`.
- ~~~bash
- $ sudo ls -al
-
- We trust you have received the usual lecture from the local System
- Administrator. It usually boils down to these three things:
-
- #1) Respect the privacy of others.
- #2) Think before you type.
- #3) With great power comes great responsibility.
-
- [sudo] password for mgrsys:
- ~~~
-
-If `sudo` is working for your new user, the first time you use the command it'll show you a warning about the responsibility of using it and then it'll require the user's password.
+~~~sh
+$ sudo ls -al
+~~~
-### _Creating a system administrator group in Proxmox VE_
+### Creating a system administrator group in Proxmox VE
-The most convenient way of assigning roles and privileges to users within the Proxmox VE platform is by putting them in groups that already have the required roles and privileges. So, let's create a PVE platform managers group.
+The most convenient way of assigning roles and privileges to users within the Proxmox VE platform is by putting them in groups that already have the required roles and privileges. Hence, create a PVE platform managers group:
-1. Open a shell terminal as `root` and create the group with the following PVE command.
+1. Open a shell terminal as `root` and create the group with the following PVE command:
- ~~~bash
- $ pveum groupadd pvemgrs -comment "PVE System Managers"
+ ~~~sh
+ $ pveum groupadd pvemgrs -comment "PVE system's managers"
~~~
- > **BEWARE!**
- > Be careful of not using a name too similar or equal to the ones already used for **existing groups in the underlying Debian OS**, like `sys` or `adm`, to avoid possible confusions.
+ > [!NOTE]
+ > **Do not set confusing names to your groups**\
+ > Avoid using a name too similar or equal to the ones already used for **existing groups in the underlying Debian OS**, like `sys` or `adm`, to avoid possible confusions.
+ >
> You can check the existing Debian groups in the `/etc/group` file.
-2. Assign the Administrator role to the newly created group.
+2. Assign the Administrator role to the newly created group:
- ~~~bash
+ ~~~sh
$ pveum aclmod / -group pvemgrs -role Administrator
~~~
-3. Check the groups creation by opening the file `/etc/pve/user.cfg`. In it, you should see a content like the following.
+3. Check the group creation by opening the file `/etc/pve/user.cfg`. In it, you should see something like this:
- ~~~bash
- user:root@pam:1:0:::invalid@email.com::x!oath:
+ ~~~sh
+ user:root@pam:1:0:::pveroot@homelab.cloud::x:
- group:pvemgrs::PVE Managers:
+ group:pvemgrs::PVE system's managers:
@@ -150,66 +165,65 @@ The most convenient way of assigning roles and privileges to users within the Pr
~~~
The file's content can be explain as follows:
+
- The `user:` line describes the PVE's `root` user.
- The `group:` line corresponds to your newly created system administrator group.
- The `acl:` line assigns the PVE role `Administrator` to your new group.
-This new group can also be seen in the user management section of your PVE's web console. Click on `Datacenter` and unfold the `Permissions` list, then click on `Groups`.
-
-
+ This new group can also be seen in the user management section of your PVE node's web console. Click on `Datacenter` and unfold the `Permissions` list, then click on `Groups`.
-> **BEWARE!**
-> This group you've just created is just a Proxmox VE one, **it's not part of the underlying Debian groups**. Thus, you won't see it listed in the `/etc/group` file.
+ 
-### _Enabling the new administrator user in Proxmox VE_
+ > [!IMPORTANT]
+ > **The new group you've just created is just a Proxmox VE one**\
+ > The `pvemgrs` **is not part of the underlying Debian's groups**. Therefore, you won't see it listed in the `/etc/group` file.
-The `mgrsys` user you created earlier exists within the Debian 11 OS, but not in the Proxmox VE platform yet. To do so, you have to create the same user within the PVE platform too.
+### Enabling the new administrator user in Proxmox VE
-1. To create the user in just one line, type a command line (as `root`) like the following.
+The `mgrsys` user you created earlier exists within the Debian OS, but not in the Proxmox VE platform yet. To do so, you have to create the same user within the PVE platform too.
- ~~~bash
- $ pveum user add mgrsys@pam -comment "PVE System Manager" -email "mgrsys@replace.this" -firstname "PVE" -lastname "SysManager" -groups pvemgrs
- ~~~
-
- The command line above has created the `mgrsys` user within the `pam` realm (`@pam`) and also included it in the `pvemgrs` group. You can check this on the PVE web console.
+1. To create the user in just one line, type a command line (as `root`) like the following:
- 
+ > [!WARNING]
+ > Do not forget replacing the `-email` example value shown here with the one you want!
-2. Since the pam realm has TFA enforced, the user needs their TOTP enabled too. This can be done directly from the user management screen, by selecting the user and clicking on the `TFA` button above.
+ ~~~sh
+ $ pveum user add mgrsys@pam -comment "PVE system's manager" -email "mgrsys@homelab.cloud" -firstname "PVE" -lastname "SysManager" -groups pvemgrs
+ ~~~
- 
+ The command line above creates the `mgrsys` user within the PVE node's `pam` realm (`@pam`), while also including it in the `pvemgrs` group. You can check this on the PVE web console:
-3. Fill the form like it was done for the root user in the previous [**G007** guide](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md), reusing the same TOTP secret generated before for this new user with the `google-authenticator` program.
+ 
- 
+2. Since the PVE's pam realm has the two factor authentication enforced, the `mgrsys` user needs its TOTP enabled too. Go then to the `Datacenter\Permissions\Two Factor` page and `Add` a new `TOTP` entry:
- > **BEWARE!**
- > Remember that TOTP codes must not be shared at all!
+ 
-4. With the TOTP set up correctly, the new user can try to log in the Proxmox VE web console.
+3. Fill the TFA form for the `mgrsys` user reusing the same TOTP details and secret previously set with the `google-authenticator` program:
- > **BEWARE!**
- > The password is **the one** set up at the Debian OS level with the `adduser` command.
+ 
- 
+ > [!WARNING]
+ > Remember to never share TOTP codes at all!
- The new `mgrsys` user will have access to the same tabs and options as `root`, thanks of being part of a PVE group with full administrative rights.
+4. With the TOTP set up correctly, you can try to log with your new `mgrsys` user in the Proxmox VE web console:
-### _TFA has its own section in Proxmox VE **v7.2-z**_
+ > [!IMPORTANT]
+ > The `mgrsys` user's password is the one specified at the Debian OS level with the `adduser` command.
-There's no `TFA` button any more in the `Permissions > Users` section at the `Datacenter` level of the `v7.2-z` releases (and maybe also in the previous `v7.1-z` ones). Instead, you can manage all two factor tokens from its own separate section under `Permissions > Two Factor`.
+ 
-
+ The new `mgrsys` user has access to the same tabs and options as `root`, thanks of being part of a PVE group with full administrative rights.
## Relevant system paths
-### _Directories_
+### Directories
- `$HOME`
- `/etc/pve`
- `/etc/pve/priv`
-### _Files_
+### Files
- `$HOME/.google_authenticator`
- `/etc/pve/user.cfg`
@@ -218,14 +232,14 @@ There's no `TFA` button any more in the `Permissions > Users` section at the `Da
## References
-### _Proxmox VE user management_
+### [Proxmox VE](https://pve.proxmox.com/wiki/Main_Page)
-- [Proxmox VE, admin guide. User Management](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_user_management)
-- [Proxmox VE, admin guide. Authentication Realms](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pveum_authentication_realms)
-- [Proxmox VE, admin guide. User Management, command line tool](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_command_line_tool)
-- [Proxmox VE, admin guide. User Management, real world examples](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_real_world_examples)
+- [Administration Guide. User Management](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#user_mgmt)
+ - [Authentication Realms](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pveum_authentication_realms)
+ - [Command-line Tool](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_command_line_tool)
+ - [Real World Examples](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_real_world_examples)
-### _PAM_
+### PAM
- [PAM, Pluggable Authentication Module](https://en.wikipedia.org/wiki/Pluggable_authentication_module)
diff --git a/G009 - Host hardening 03 ~ SSH key pairs and sshd service configuration.md b/G009 - Host hardening 03 ~ SSH key pairs and sshd service configuration.md
index 6971937..174a07b 100644
--- a/G009 - Host hardening 03 ~ SSH key pairs and sshd service configuration.md
+++ b/G009 - Host hardening 03 ~ SSH key pairs and sshd service configuration.md
@@ -1,191 +1,253 @@
# G009 - Host hardening 03 ~ SSH key pairs and `sshd` service configuration
-To harden the ssh remote connections to your standalone PVE node there are two main things to do: use SSH key pairs instead of passwords, and adjust the `sshd` service configuration in a certain manner.
-
-## SSH key pairs
-
-### _Stronger key pair for the `root` user_
-
-The `root` user in your PVE host already comes with a generated ssh key pair. Open a terminal as `root` and list the contents of the `.ssh` directory.
-
-~~~bash
+- [Harden your SSH connections with key pairs](#harden-your-ssh-connections-with-key-pairs)
+- [Generating SSH key pairs](#generating-ssh-key-pairs)
+ - [Stronger key pair for the `root` user](#stronger-key-pair-for-the-root-user)
+ - [New key pair for non-`root` users](#new-key-pair-for-non-root-users)
+ - [Export your key pairs](#export-your-key-pairs)
+- [Hardening the `sshd` service](#hardening-the-sshd-service)
+ - [Disabling common pam authentication in SSH logins](#disabling-common-pam-authentication-in-ssh-logins)
+ - [Adjusting the `sshd` daemon configuration](#adjusting-the-sshd-daemon-configuration)
+ - [Disabling IPv6 protocol](#disabling-ipv6-protocol)
+ - [Reducing the login grace period](#reducing-the-login-grace-period)
+ - [Disabling the `root` user on ssh](#disabling-the-root-user-on-ssh)
+ - [Reducing the number of authentication attempts](#reducing-the-number-of-authentication-attempts)
+ - [Disabling password-based logins](#disabling-password-based-logins)
+ - [Disabling X11 forwarding](#disabling-x11-forwarding)
+ - [Setting up user specific authentication methods](#setting-up-user-specific-authentication-methods)
+ - [Other possible changes in SSH configuration](#other-possible-changes-in-ssh-configuration)
+ - [Consideration about hardening the `sshd` service](#consideration-about-hardening-the-sshd-service)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [General SSH configuration](#general-ssh-configuration)
+ - [About SSH key pairs](#about-ssh-key-pairs)
+ - [User specific authentication methods in SSH](#user-specific-authentication-methods-in-ssh)
+ - [Particular sshd parameters](#particular-sshd-parameters)
+ - [About the `authorized_keys` file](#about-the-authorized_keys-file)
+ - [About disabling the `root` user](#about-disabling-the-root-user)
+ - [About SSH key generation and cryptosystems](#about-ssh-key-generation-and-cryptosystems)
+ - [Proxmox VE](#proxmox-ve)
+- [Navigation](#navigation)
+
+## Harden your SSH connections with key pairs
+
+To harden the SSH remote connections to your standalone PVE node, there are two main steps to take:
+
+- **First step**\
+ To set up and use SSH key pairs instead of passwords.
+
+- **Second step**\
+ To harden the `sshd` service configuration in a certain manner.
+
+## Generating SSH key pairs
+
+Here you will see how to generate strong SSH key pairs for your administrative users already existing in your Proxmox VE server.
+
+### Stronger key pair for the `root` user
+
+The `root` user in your PVE host already comes with a generated SSH key pair found in its `$HOME` path. Open a terminal as `root` and list the contents of the `.ssh` directory:
+
+~~~sh
$ ls -al .ssh/
-total 24
-drwxr-xr-x 2 root root 4096 Mar 12 19:31 .
-drwx------ 4 root root 4096 Mar 16 09:55 ..
-lrwxrwxrwx 1 root root 29 Mar 12 19:31 authorized_keys -> /etc/pve/priv/authorized_keys
--rw-r----- 1 root root 117 Mar 12 19:31 config
--rw------- 1 root root 1811 Mar 12 19:31 id_rsa
--rw-r--r-- 1 root root 390 Mar 12 19:31 id_rsa.pub
+total 20
+drwx------ 2 root root 4096 Aug 21 12:01 .
+drwx------ 4 root root 4096 Aug 27 09:39 ..
+lrwxrwxrwx 1 root root 29 Aug 15 23:41 authorized_keys -> /etc/pve/priv/authorized_keys
+-rw-r----- 1 root root 117 Aug 15 23:41 config
+-rw------- 1 root root 3369 Aug 15 23:41 id_rsa
+-rw-r--r-- 1 root root 734 Aug 15 23:41 id_rsa.pub
~~~
-The `id_rsa` file has the private key, while the `id_rsa.pub` has the public key. The strength of any ssh key pair is directly related to their length, and you can check out the length of this key pair with `ssh-keygen`.
+The `id_rsa` file has the private key, while the `id_rsa.pub` has the public key. Since this is a key pair encrypted with the [RSA cryptosystem](https://en.wikipedia.org/wiki/RSA_cryptosystem), its strength is directly related to the number of bits used to create it. You can check out the bits employed in this key pair with `ssh-keygen`.
-~~~bash
-$ cd .ssh/
-$ ssh-keygen -lf id_rsa
-2048 SHA256:AZ4mJD4OZcna253AaECzD97nYkjthwoZr5vyXiSACAA root@pve (RSA)
+~~~sh
+$ ssh-keygen -lf .ssh/id_rsa
+4096 SHA256:+c40qYIXrTV+b0KHWenrgHaD4a8AEPeIHFMwZVAUk5U root@pve (RSA)
~~~
-The key pair's length is **2048**, measured always in **bits**. This is fine, but if you want a stronger key pair do the following.
+As you can see in the output above, the key pair's length is `4096` bits, which is enough (at the time I wrote this) for an RSA-encrypted key pair. But if you want a key pair encrypted with the newer [_Ed25519_ signature scheme](https://en.wikipedia.org/wiki/EdDSA#Ed25519), you can use the `ssh-keygen` command as follows:
-1. Make a backup of the current `root`'s key pair.
+1. Make a backup of the current `root`'s key pair:
- ~~~bash
+ ~~~sh
$ cd .ssh/
$ cp id_rsa id_rsa.orig
$ cp id_rsa.pub id_rsa.pub.orig
~~~
-2. Delete the current key pair.
+2. Delete the current key pair:
- ~~~bash
+ ~~~sh
$ rm -f id_rsa id_rsa.pub
~~~
-3. Generate a new key pair with the `ssh-keygen` command.
+3. Generate a new key pair with the `ssh-keygen` command:
- ~~~bash
- $ ssh-keygen -t rsa -b 4096 -C "root@pve"
+ ~~~sh
+ $ ssh-keygen -t ed25519 -a 250 -C "root@pve"
~~~
The options specified mean the following.
- - `-t rsa` : Specifies the type of key to create, `rsa` in this case.
+ - `-t ed25519`\
+ Specifies the type of key to create, `ed25519` in this case.
+
+ - `-a 250`\
+ Makes the command execute the specified number of key derivations (`250` here) to make your private key harder to break with brute-force techniques.
- - `-b 4096` : Specifies the number of bits in the key to create, or length. Here the number is `4096` bits.
+ - `-C "root@pve"`\
+ To set a comment associated with the generated key pair, most commonly used to put a string following the schema `[username]@[hostname]`.
- - `-C "root@pve"` : To set a comment associated with the generated key pair, most commonly used to put a string following the schema `[username]@[hostname]`.
+4. The ssh-keygen command will ask you two things:
-4. The ssh-keygen command will ask you two things.
+ - `Enter file in which to save the key (/root/.ssh/id_ed25519):`\
+ Leave this question empty, the default path is the good one.
- - `Enter file in which to save the key (/root/.ssh/id_rsa):`
- - `Enter passphrase (empty for no passphrase):`
+ - `Enter passphrase for "/root/.ssh/id_ed25519" (empty for no passphrase):`\
+ In general, you want to enter a good passphrase here (specially in a professional environment). However, for a homelab setting with no sensitive data involved, you might consider this unnecessary and leave this question empty.
- Leave questions both empty by pressing `enter`. Then, the whole command's output will look like the following.
+ After answering the questions, the whole command's output will look like this:
- ~~~bash
- Enter file in which to save the key (/root/.ssh/id_rsa):
- Enter passphrase (empty for no passphrase):
+ ~~~sh
+ Generating public/private ed25519 key pair.
+ Enter file in which to save the key (/root/.ssh/id_ed25519):
+ Enter passphrase for "/root/.ssh/id_ed25519" (empty for no passphrase):
Enter same passphrase again:
- Your identification has been saved in /root/.ssh/id_rsa.
- Your public key has been saved in /root/.ssh/id_rsa.pub.
+ Your identification has been saved in /root/.ssh/id_ed25519
+ Your public key has been saved in /root/.ssh/id_ed25519.pub
The key fingerprint is:
- SHA256:W+egCvEe3JffgWQaShiRMkUEyVaVdnFoKOpYyd2e7Co root@pve
+ SHA256:F6Vq9wL1wjAMTtaJRcnTxCtElnV5bz51BpbBc9guWVg root@pve
The key's randomart image is:
- +---[RSA 4096]----+
- | ..B*o.o.o. |
- | * o.+ +. |
- | o *.+ o |
- | = .o. |
- | +. .oS.+ + |
- | . .+ o+= X . |
- | . +.= + o . |
- | Eo o.. . . . |
- | .+. . . |
+ +--[ED25519 256]--+
+ | +B=Oo oo.BE|
+ | +.+O o+. X +|
+ | ..+.+. o O |
+ | .*.o o B|
+ | S.= . =o|
+ | . + o ..|
+ | . . .|
+ | . |
+ | |
+----[SHA256]-----+
~~~
-5. The `ssh-keygen` command has generated a new key pair in your `root`'s the `.ssh` directory. You can verify this also with `ssh-keygen`.
+5. The `ssh-keygen` command has generated a new key pair in your `root`'s `.ssh` directory. You can verify this also with `ssh-keygen`:
- ~~~bash
- $ ssh-keygen -lf id_rsa
- 4096 SHA256:W+egCvEe3JffgWQaShiRMkUEyVaVdnFoKOpYyd2e7Co root@pve (RSA)
+ ~~~sh
+ $ ssh-keygen -lf id_ed25519
+ 256 SHA256:F6Vq9wL1wjAMTtaJRcnTxCtElnV5bz51BpbBc9guWVg root@pve (ED25519)
~~~
- It should return the same key fingerprint you saw right after generating the new key pair. Also, the bits length is 4096, not just 2046 like in the original key pair set.
+ It should return the same key fingerprint you saw right after generating the new key pair. Also, notice how the bit length informed is 256. Do not worry about this detail, since the Ed25519 signature scheme is different from the RSA cryptosystem.
-6. The last bit to do is to authorize the public key part in the `authorized_keys` file, while also removing the original public key from it.
+6. The last important bit to do is to authorize the public key part in the `authorized_keys` file, while also removing the original public key from it:
- ~~~bash
+ ~~~sh
$ > authorized_keys
- $ cat id_rsa.pub >> authorized_keys
+ $ cat id_ed25519.pub >> authorized_keys
~~~
- > **BEWARE!**
+ > [!WARNING]
> In a Proxmox VE system, the `root`'s `authorized_keys` is a symlink to `/etc/pve/priv/authorized_keys`, which is used by Proxmox VE to allow nodes in a cluster to communicate with each other through ssh. So, in case of a clustered PVE environment, is expected to see the public keys of other nodes' `root` users authorized in this file.
-As a final consideration, you could remove (with `rm`) the backup of the original ssh key pair since you won't need it anymore.
-
-### _New key pair for non-`root` users_
+7. As a final consideration, you could also remove (with `rm`) the `.orig` backup of the original RSA-based SSH key pair since you won't need it anymore.
-At this point, you just have another user apart from the `root` one: your administrative or `sudo` user, created as `mgrsys` in the previous [**G008** guide](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md). This one doesn't even have a `.ssh` directory, but that will also be taken care of by the `ssh-keygen` command.
+> [!WARNING]
+> **You won't be able to login as `root` with its new SSH key pair yet!**\
+> Since the `publickey` method is still not enabled in the `sshd` service's configuration, the SSH server in your Proxmox VE node will reject your key pair if you attempt to login with it.
+>
+> Just use the password and the TOTP code for now at this point. In the section [Hardening the `sshd` service](#hardening-the-sshd-service) you will finally enable the `publickey` method, allowing you login remotely with your key pair.
-1. Open a shell as `mgrsys`, and verify that there's no `.ssh` folder in its `/home/mgrsys` directory.
+### New key pair for non-`root` users
- ~~~bash
- $ ls -al
- total 28
- drwxr-xr-x 2 mgrsys mgrsys 4096 Aug 20 14:25 .
- drwxr-xr-x 3 root root 4096 Aug 20 14:17 ..
- -rw------- 1 mgrsys mgrsys 155 Aug 20 14:25 .bash_history
- -rw-r--r-- 1 mgrsys mgrsys 220 Aug 20 14:17 .bash_logout
- -rw-r--r-- 1 mgrsys mgrsys 3526 Aug 20 14:17 .bashrc
- -r-------- 1 mgrsys mgrsys 135 Aug 20 14:21 .google_authenticator
- -rw-r--r-- 1 mgrsys mgrsys 807 Aug 20 14:17 .profile
- ~~~
-
- There shouldn't be one since new users don't have such `.ssh` folder.
+At this point, you just have another user apart from the `root` one: your administrative or `sudo` user, created as `mgrsys` in the previous [**G008** chapter](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md). This one does not even have a `.ssh` directory, but that will also be taken care of by the `ssh-keygen` command.
-2. Execute the `ssh-keygen` command to generate a ssh key pair and, as with `root`, just press enter on the questions.
+1. Open a shell as `mgrsys`, and verify that there is no `.ssh` folder in its `/home/mgrsys` directory:
- ~~~bash
- $ ssh-keygen -t rsa -b 4096 -C "mgrsys@pve"
- Generating public/private rsa key pair.
- Enter file in which to save the key (/home/mgrsys/.ssh/id_rsa):
+ ~~~sh
+ $ ls -al
+ total 36
+ drwx------ 3 mgrsys mgrsys 4096 Jul 11 20:26 .
+ drwxr-xr-x 3 root root 4096 Jun 12 17:07 ..
+ -rw------- 1 mgrsys mgrsys 466 Jun 12 17:36 .bash_history
+ -rw-r--r-- 1 mgrsys mgrsys 220 Jun 12 17:07 .bash_logout
+ -rw-r--r-- 1 mgrsys mgrsys 3526 Jun 12 17:07 .bashrc
+ drwxr-xr-x 4 mgrsys mgrsys 4096 Aug 20 2021 .config
+ -r-------- 1 mgrsys mgrsys 155 Jul 11 20:26 .google_authenticator
+ -rw-r--r-- 1 mgrsys mgrsys 807 Jun 12 17:07 .profile
+ -rw-r--r-- 1 mgrsys mgrsys 0 Jun 12 17:33 .sudo_as_admin_successful
+ -rw-r--r-- 1 mgrsys mgrsys 293 Aug 20 2021 .vimrc
+ ~~~
+
+ There shouldn't be one since new Debian Linux users are not created with such `.ssh` folder.
+
+2. Execute the `ssh-keygen` command to generate a SSH key pair and, as with `root`, just press enter on the questions:
+
+ ~~~sh
+ $ ssh-keygen -t ed25519 -a 250 -C "mgrsys@pve"
+ Generating public/private ed25519 key pair.
+ Enter file in which to save the key (/home/mgrsys/.ssh/id_ed25519):
Created directory '/home/mgrsys/.ssh'.
- Enter passphrase (empty for no passphrase):
+ Enter passphrase for "/home/mgrsys/.ssh/id_ed25519" (empty for no passphrase):
Enter same passphrase again:
- Your identification has been saved in /home/mgrsys/.ssh/id_rsa.
- Your public key has been saved in /home/mgrsys/.ssh/id_rsa.pub.
+ Your identification has been saved in /home/mgrsys/.ssh/id_ed25519
+ Your public key has been saved in /home/mgrsys/.ssh/id_ed25519.pub
The key fingerprint is:
- SHA256:W+egCvEe3JffgWQaShiRMkUEyVaVdnFoKOpYyd2e7Co mgrsys@pam
+ SHA256:oW/pYlgU4eDMDpjCyCdXeNymuDsX8fBjP4das1G3rvQ mgrsys@pve
The key's randomart image is:
- +---[RSA 4096]----+
- | ..B*o.o.o. |
- | * o.+ +. |
- | o *.+ o |
- | = .o. |
- | +. .oS.+ + |
- | . .+ o+= X . |
- | . +.= + o . |
- | Eo o.. . . . |
- | .+. . . |
+ +--[ED25519 256]--+
+ | +.o. |
+ |+o =.=.o |
+ |=+.o* +.. |
+ |. ++ +.. . |
+ | o.* S . . |
+ | . ..* .. . . |
+ | .oo *+.. . |
+ | o..oo.+=.o |
+ | o. oo.o..E |
+----[SHA256]-----+
~~~
-3. The new ssh key pair has been saved in the `.ssh` folder created inside your user's `$HOME` path.
+3. The new SSH key pair has been saved in the `.ssh` folder created inside your user's `$HOME` path:
- ~~~bash
- $ cd .ssh
+ ~~~sh
+ $ cd .ssh/
$ ls
- id_rsa id_rsa.pub
+ id_ed25519 id_ed25519.pub
~~~
4. Create the `authorized_keys` file.
- ~~~bash
+ ~~~sh
$ touch authorized_keys ; chmod 600 authorized_keys
~~~
-5. Append the content of `id_rsa.pub` into `authorized_keys`.
+5. Append the content of `id_ed25519.pub` into `authorized_keys`.
- ~~~bash
- $ cat id_rsa.pub >> authorized_keys
+ ~~~sh
+ $ cat id_ed25519.pub >> authorized_keys
~~~
6. Copy the `config` file from the `root` user, and change it's ownership.
- ~~~bash
+ ~~~sh
$ sudo cp /root/.ssh/config config
$ sudo chown mgrsys:mgrsys config
~~~
This `config` file was generated for `root` by the Proxmox VE installer with a predefined set of admitted ciphers for OpenSSH. Copying it to any other user it's just an extra hardening measure.
-### _Export your key pairs_
+> [!WARNING]
+> **You won't be able to login as `mgrsys` with its new SSH key pair yet!**\
+> Since the `publickey` method is still not enabled in the `sshd` service's configuration, the SSH server in your Proxmox VE node will reject your key pair if you attempt to login with it.
+>
+> Just use the password and the TOTP code for now at this point. In the section [Hardening the `sshd` service](#hardening-the-sshd-service) you will finally enable the `publickey` method, allowing you login remotely with your key pair.
+
+### Export your key pairs
-Don't forget to export those new key pairs so you can use them to connect to your standalone PVE node. Also remember that you'll need to generate the `.ppk` file from each private key so you can connect from Windows clients. Check out the [**G901** appendix guide](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md) to see how to connect through SSH with PuTTY.
+Don't forget to export those new key pairs so you can use them to connect to your standalone PVE node. Also remember that you'll need to generate the `.ppk` file from each private key to connect from Windows clients. Check out the [**G901** appendix chapter](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md) to see how to connect through SSH with PuTTY.
## Hardening the `sshd` service
@@ -196,87 +258,90 @@ To do this hardening, you'll need to modify two files that you've already change
So, open a terminal as your administrative `sudo` user `mgrsys` and make a backup of your current `sshd` configuration.
-~~~bash
+~~~sh
$ cd /etc/pam.d
$ sudo cp sshd sshd.bkp
$ cd /etc/ssh
$ sudo cp sshd_config sshd_config.bkp
~~~
-### _Disabling common pam authentication in ssh logins_
+### Disabling common pam authentication in SSH logins
-To disable the common `pam` authentication when login through ssh, edit the `/etc/pam.d/sshd` file by commenting out the `@include common-auth` line found at at its top.
+To disable the common `pam` authentication when login through ssh, edit the `/etc/pam.d/sshd` file by commenting out the `@include common-auth` line found at at its top:
-~~~bash
+~~~sh
# Standard Un*x authentication.
#@include common-auth
~~~
-### _Adjusting the `sshd` daemon configuration_
+### Adjusting the `sshd` daemon configuration
-#### **Disabling IPv6 protocol**
+The next `sshd` service hardening changes are all done in the `/etc/ssh/sshd_config` file.
-Nowadays IPv6 is still not really in use, and less in internal private networks like the one your system is in. Then, you can safely disable the protocol on the `sshd` service.
+#### Disabling IPv6 protocol
-1. Edit the current `/etc/ssh/sshd_config` file, enabling the `AddressFamily` parameter with the value `inet`. It should end looking like below.
+Although the use of IPv6 seems to be growing, you will not really need it in a internal private networks like the one your system is in. Then, you can safely disable the protocol on the `sshd` service.
- ~~~bash
+1. Edit the current `/etc/ssh/sshd_config` file, enabling the `AddressFamily` parameter with the value `inet`:
+
+ ~~~sh
#Port 22
AddressFamily inet
- ListenAddress 192.168.1.107
+ ListenAddress 10.1.0.1
#ListenAddress ::
~~~
- Notice that I've also put a concrete IP in the `ListenAddress` parameter, making the ssh daemon (called `sshd`) to listen only on the interface that corresponds to that IP. In this case, the IP corresponds with the physical Ethernet network card that is in use in the host. By default, the `sshd` process listens in all available interfaces in the host, when set with the default `ListenAddress 0.0.0.0` setting.
+ Notice that I have also specified a concrete IP in the `ListenAddress` parameter, making the SSH daemon (called `sshd`) to listen only on the interface that corresponds to that IP. In this case, the IP corresponds with the physical Ethernet network card in use in the Proxmox VE host. By default, the `sshd` process listens in all available interfaces in the host, when set with the default `ListenAddress 0.0.0.0` setting.
-2. Save the the change and restart the `ssh` service.
+2. Save the the change and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-3. Using the `ss` and `grep` commands, check if there's any socket listening on the port `22` but with an IPv6 address like `[::1]`.
+3. Using the `ss` and `grep` commands, check if there is any socket listening on the port `22` but with an IPv6 address like `[::1]`:
- ~~~bash
+ ~~~sh
$ sudo ss -atlnup | grep :22
- tcp LISTEN 0 128 192.168.1.107:22 0.0.0.0:* users:(("sshd",pid=4822,fd=3))
+ tcp LISTEN 0 128 10.1.0.1:22 0.0.0.0:* users:(("sshd",pid=39495,fd=6))
~~~
- With the `ss` command above, you get a list of all the ports open in your system and on which address, among other details. With `grep` you filter out the lines returned by `ss` to get only the ones which, in this case, have the port `22` opened. In the output above you can see that there's only one line, which corresponds to the IPv4 address where `sshd` is listening, `192.168.1.107` in this case.
+ With the `ss` command above, you get a list of all the ports opened in your system and on which address, among other details. With `grep` you filter out the lines returned by `ss` to get only the ones which, in this case, have the port `22` opened. In the output above you can see that there's only one line, which corresponds to the IPv4 address where `sshd` is listening, `10.1.0.1` in this case.
-#### **Reducing the login grace period**
+#### Reducing the login grace period
-By default, the sshd daemon gives any user two full minutes to authenticate. With modern ssh clients and ssh key pair authentication there's no need to have so much time to authenticate, so you can reduce it to just 45 seconds.
+By default, the sshd daemon gives any user two full minutes to authenticate. With modern SSH clients and SSH key pair authentication there's no need to have so much time to authenticate, so you can reduce it to a shorter time period like **45 seconds**.
-1. Edit the current `/etc/ssh/sshd_config` file, uncomment the `LoginGraceTime` parameter and set it to `45`.
+1. Edit the current `/etc/ssh/sshd_config` file, uncomment the `LoginGraceTime` parameter and set it to `45`:
- ~~~bash
+ ~~~sh
# Authentication:
LoginGraceTime 45
PermitRootLogin yes
~~~
-2. Save the the change and restart the `ssh` service.
+2. Save the the change and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-> **BEWARE!**
-> Careful of setting this value too low for your system. Take into account that, when your server receives several concurrent unauthenticated requests, it will need some time to process them. Also, a human user will need some time to type their **TOTP codes**.
+> [!WARNING]
+> **Do not set this timer with a too low value in your system**\
+> Be aware that, when your server receives several concurrent unauthenticated requests, it will need some time to process them. Also, a human user will need some time to get and enter their **TOTP code**.
-#### **Disabling the `root` user on ssh**
+#### Disabling the `root` user on ssh
-It's a common security measure in Linux servers to **disable** (_never deleting_, mind you) the `root` superuser after creating an administrative substitute `sudo`-enabled user (like `mgrsys`). This is fine for regular Linux servers (virtualized ones included) but not exactly peachy for Proxmox VE.
+It is a common security measure in Linux servers to **disable** (_never deleting_, mind you) the `root` superuser after creating an administrative substitute `sudo`-enabled user (like `mgrsys`). This is fine for regular Linux servers (virtualized ones included) but not exactly peachy for Proxmox VE.
-Proxmox VE uses `root` user for certain tasks like clustering functionality, but **it's not something specifically documented by Proxmox**. So, if you decide to disable the `root` superuser, be aware that you may face unexpected problems in your Proxmox VE system.
+Proxmox VE uses the `root` user for certain tasks like [its clustering functionality](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pvecm_cluster_requirements). Therefore, if you decide to disable the `root` superuser, be aware that you may face unexpected problems in your Proxmox VE system.
-Still, given that we're working on a standalone node, we can assume that a standalone Proxmox VE node may use `root` just in a local manner, never to remotely connect anywhere. Assuming this, disabling the ssh remote access for `root` is a simple modification to the `sshd` configuration.
+Still, given that your Proxmox VE server is just a standalone node, you can assume that it may only need the `root` user just for local tasks, never to remotely connect anywhere. Hence, disabling the SSH remote access for `root` is a simple modification to the `sshd` configuration.
-1. Edit the current `/etc/ssh/sshd_config` file, look for the parameter `PermitRootLogin` and change its value to `no`.
+1. Edit the current `/etc/ssh/sshd_config` file, look for the parameter `PermitRootLogin` and change its value to `no`:
- ~~~bash
+ ~~~sh
# Authentication:
LoginGraceTime 45
@@ -284,15 +349,15 @@ Still, given that we're working on a standalone node, we can assume that a stand
#StrictModes yes
~~~
-2. Save the change and restart the sshd server.
+2. Save the change and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-3. Try to log in remotely as `root` through a ssh client. You should get an error message or warning indicating that the server has rejected the connection. In PuTTY you'll see the following line.
+3. Try to log in remotely as `root` through an SSH client. You should get an error message or warning indicating that the server has rejected the connection. In PuTTY you'll see something like the following output:
- ~~~bash
+ ~~~sh
Using username "root".
Keyboard-interactive authentication prompts from server:
| Verification code:
@@ -302,60 +367,61 @@ Still, given that we're working on a standalone node, we can assume that a stand
| Verification code:
~~~
- Notice the **Access denied** line between the two `Keyboard-interactive authentication prompts`. No matter that you input the correct TOTP verification code every time, the server will reject your request. On the other hand, if you try to connect authenticating with a ssh key pair, you'll see a different message.
+ Notice the **Access denied** line between the two `Keyboard-interactive authentication prompts`. No matter that you input the correct TOTP verification code every time, the server will reject your request. On the other hand, if you try to connect authenticating with a SSH key pair, you'll see a different message.
- ~~~bash
+ ~~~sh
Using username "root".
Authenticating with public key "root@pve"
Server refused public-key signature despite accepting key!
~~~
-Revert this change when you detect a problem related to a Proxmox VE functionality requiring `root` to connect through ssh.
+> [NOTE]
+> Revert this change when you detect a problem related to a Proxmox VE functionality requiring `root` to connect through ssh.
-#### **Reducing the number of authentication attempts**
+#### Reducing the number of authentication attempts
By default, the sshd daemon gives users up to six attempts to make the authentication correctly. But those are too many tries for your ssh-key and TOTP based setup.
-1. Edit the current `/etc/ssh/sshd_config` file, uncomment the `MaxAuthTries` parameter and set it to `3`.
+1. Edit the current `/etc/ssh/sshd_config` file, uncomment the `MaxAuthTries` parameter and set it to `3`:
- ~~~bash
+ ~~~sh
PermitRootLogin no
#StrictModes yes
MaxAuthTries 3
#MaxSessions 10
~~~
-2. Save the the change and restart the `ssh` service.
+2. Save the the change and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-#### **Disabling password-based logins**
+#### Disabling password-based logins
-You've already disabled the password prompts in ssh logins when you changed the `/etc/pam.d/sshd` file before. Still, the sshd daemon has also an option in the `/etc/ssh/sshd_config` file to allow password-based authentication that is convenient to disable.
+You already have disabled the password prompts in SSH logins when you changed the `/etc/pam.d/sshd` file before. Still, the sshd daemon has also an option in the `/etc/ssh/sshd_config` file to allow password-based authentication that is convenient to disable.
1. In the `/etc/ssh/sshd_config`, uncomment the `PasswordAuthentication` parameter and set it to `no`.
- ~~~bash
- # To disable tunneled clear text passwords, change to no here!
+ ~~~sh
+ # To disable tunneled clear text passwords, change to "no" here!
PasswordAuthentication no
#PermitEmptyPasswords no
~~~
-2. Save the the change and restart the `ssh` service.
+2. Save the change and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-#### **Disabling X11 forwarding**
+#### Disabling X11 forwarding
-Proxmox VE doesn't come with a graphic (X11) environment. This means that you can disable the forwarding of the X11 system through ssh connections.
+Proxmox VE does not come with a graphic (X11) environment, so you can disable the forwarding of the X11 system through SSH connections.
1. Edit the current `/etc/ssh/sshd_config` file, setting the value of the `X11Forwarding` parameter to `no`.
- ~~~bash
+ ~~~sh
#GatewayPorts no
X11Forwarding no
#X11DisplayOffset 10
@@ -364,47 +430,47 @@ Proxmox VE doesn't come with a graphic (X11) environment. This means that you ca
2. Save the the change and restart the `ssh` service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-#### **Setting up user specific authentication methods**
+#### Setting up user specific authentication methods
-It's possible to particularize the authentication methods per user, something that will solve us a problem with the `root` superuser. In our standalone node scenario, we have disabled the ssh access to the `root` user but, for more advanced (clustered) scenarios, you may need to enable the `root` remote access through `ssh`. In such scenarios, the TOTP token is problematic since Proxmox VE uses the `root` superuser for launching certain automated tasks (backups, clustering actions, etc), and automations cannot input TOTP codes.
+It is possible to particularize the authentication methods per user, something that will solve us a problem with the `root` superuser. In our standalone node scenario, we have disabled the SSH access to the `root` user but, for more advanced (clustered) scenarios, you may need to enable the `root` remote access through `ssh`. In such scenarios, the TOTP token is problematic since Proxmox VE uses the `root` superuser for launching certain automated tasks (backups, clustering actions, etc), and automations cannot input TOTP codes.
-So, it's better to do the following:
+So, it is better to do the following:
- Generate a particular pam group for users authorized to connect through ssh, but not including `root`.
- Add your `mgrsys` user to this group.
-- Adjust the `sshd` configuration to enforce just the ssh keys authentication method for the `root` and any other accounts meant for launching automated tasks through ssh connections.
+- Adjust the `sshd` configuration to enforce just the SSH keys authentication method for the `root` and any other accounts meant for launching automated tasks through SSH connections.
-- Enforce both the ssh keys and the keyboard interactive (for TFA) methods to the ssh authorized group.
+- Enforce both the SSH keys and the keyboard interactive (for TFA) methods to the SSH authorized group.
- Disable any possible authentication method for any other unauthorized group or user.
-1. Create a new group called `sshauth`.
+1. Create a new group called `sshauth`:
- ~~~bash
+ ~~~sh
$ sudo addgroup sshauth
~~~
-2. Add the user you want to be able to connect remotely through ssh to this group.
+2. Add the user you want to be able to connect remotely through SSH to this group:
- ~~~bash
+ ~~~sh
$ sudo adduser mgrsys sshauth
~~~
-3. Edit once more the `/etc/ssh/sshd_config` file, replacing the line below...
+3. Once more, edit the `/etc/ssh/sshd_config` file, replacing the line below...
- ~~~bash
+ ~~~sh
AuthenticationMethods keyboard-interactive
~~~
- ... with the following configuration block.
+ ... with the following configuration block:
- ~~~bash
+ ~~~sh
# In Proxmox VE, root is used for automated tasks.
# This means that only the ssh keys can be used for
# the superuser authentication.
@@ -426,40 +492,42 @@ So, it's better to do the following:
4. Save the the change and restart the `sshd` service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-You cannot log as `root` through ssh, because you've already disabled that possibility but, if that were still possible, you would see how the server doesn't ask you the TFA verification code anymore. On the other hand, you can try to open a new non-shared ssh connection with `mgrsys` and check out that it's still asking you for the TFA code.
+You cannot log as `root` through ssh, because you have disabled that possibility but, if that were still possible, you would see how the server does not ask you the TFA verification code anymore. On the other hand, you can try to open a new non-shared SSH connection with `mgrsys` and check out that it is still asking you for the TFA code.
-> **BEWARE!**
+> [!IMPORTANT]
> Managing SSH access with `Match` rules using `pam` groups is a more practical approach when handling many users.
-#### **Other possible changes in ssh configuration**
+#### Other possible changes in SSH configuration
There are many other possible adjustments that can be done in the `sshd` service configuration, but some of them can conflict with how Proxmox VE runs. So, beware of the following changes.
-- **Adjusting the `MaxStartups` value**.
+- **Adjusting the `MaxStartups` value**\
When some user tries to connect to your server, that establishes a new unauthenticated or _startup_ connection. Those users trying to connect can be automated processes running in your server, so be mindful of making this value just big enough for your needs.
-- **Adjusting the `MaxSessions` value**.
- This parameter indicates how many sessions can be opened from a shared ssh connection. It could happen that some procedure requires to open two or more extra sessions branched out from its original ssh connection, so be careful of not making this value too small or unnecessarily big.
+- **Adjusting the `MaxSessions` value**\
+ This parameter indicates how many sessions can be opened from a shared SSH connection. It could happen that some procedure requires to open two or more extra sessions branched out from its original SSH connection, so be careful of not making this value too small or unnecessarily big.
-- **IP restrictions**.
+- **IP restrictions**\
You can specify which IPs can connect to your server through ssh, but this management is better left to the firewall embedded in your standalone PVE node.
-- **Changing the port number**.
- This is a common hardening-wise change, but not without it's own share of potential problems. Changing the port means that you'll also have to change the configuration of other systems and clients that may communicate with your server through ssh.
+- **Changing the port number**\
+ This is a common hardening-wise change, but not without it's own share of potential problems. Changing the port means that you'll also have to change the configuration of other systems and clients that may communicate with your Proxmox VE server through ssh.
- _My rule of thumb_: change it only if you're going to expose it directly to the public internet and you won't (_although you **absolutely** should_) put a firewall, reverse proxy or any other security solution between your ssh port and the wild.
+ > [!NOTE]
+ > **My rule of thumb regarding the SSH port change**\
+ > Change the SSH port only if you're going to expose it directly to the public internet and you won't (_although you **absolutely** should_) put a firewall, reverse proxy or any other security solution between your SSH port and the wild.
-#### **Consideration about hardening `sshd`**
+#### Consideration about hardening the `sshd` service
-Overall, be aware of the services or tasks in your server that require ssh connections to work, like it happens with Proxmox VE, and study the viability of any change to the `sshd` configuration in a **case-by-case** basis.
+Overall, be aware of the services or tasks in your server that require SSH connections to work, like it happens with Proxmox VE, and study the viability of any change to the `sshd` configuration in a **case-by-case** basis.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/pam.d`
- `/etc/pve/priv`
@@ -467,7 +535,7 @@ Overall, be aware of the services or tasks in your server that require ssh conne
- `/root/.ssh`
- `$HOME/.ssh`
-### _Files_
+### Files
- `/etc/pam.d/sshd`
- `/etc/pam.d/sshd.bkp`
@@ -485,50 +553,70 @@ Overall, be aware of the services or tasks in your server that require ssh conne
## References
-### _General SSH configuration_
+### General SSH configuration
- [SSH Essentials: Working with SSH Servers, Clients, and Keys](https://www.digitalocean.com/community/tutorials/ssh-essentials-working-with-ssh-servers-clients-and-keys)
-- [5 Linux SSH Security Best Practices To Secure Your Systems](https://phoenixnap.com/kb/linux-ssh-security)
+- [How to Secure SSH Connections: 5 Best Practices](https://phoenixnap.com/kb/linux-ssh-security)
- [6 ssh authentication methods to secure connection (sshd_config)](https://www.golinuxcloud.com/openssh-authentication-methods-sshd-config/)
- [How to Enable/Disable SSH access for a particular user or user group in Linux](https://www.2daygeek.com/allow-deny-enable-disable-ssh-access-user-group-in-linux/)
-- [Asegurando SSH (haciendo ssh más seguro)](https://www.linuxtotal.com.mx/index.php?cont=info_seyre_004)
+- [Asegurando SSH (haciendo ssh más seguro)](https://www.linuxtotal.com.mx/index.php?cont=info_seyre_004) (in Spanish)
- [Proxmox VE Cluster when SSH Port is Non-standard and Root Login is Disabled](http://jaroker.com/technotes/operations/proxmox/proxmox_cluster/proxmox-ve-cluster-when-ssl-port-is-non-standard-root-login-is-disabled/)
-### _About SSH key pairs_
+### About SSH key pairs
-- [`ssh-keygen` - Generate a New SSH Key](https://www.ssh.com/ssh/keygen/)
-- [How to Generate & Set Up SSH Keys on Debian 10](https://phoenixnap.com/kb/generate-ssh-key-debian-10)
+- [How to Use ssh-keygen to Generate a New SSH Key?](https://www.ssh.com/academy/ssh/keygen)
+- [How to Generate & Set Up SSH Keys on Debian](https://phoenixnap.com/kb/generate-ssh-key-debian-10)
- [How to know the SSH key's length?](https://stackoverflow.com/questions/56827341/how-to-know-the-ssh-keys-length)
-### _User specific authentication methods in SSH_
+### User specific authentication methods in SSH
- [Creating user specific authentication methods in SSH](https://security.stackexchange.com/questions/18036/creating-user-specific-authentication-methods-in-ssh)
- [How can I setup OpenSSH per-user authentication methods?](https://serverfault.com/questions/150153/how-can-i-setup-openssh-per-user-authentication-methods)
-### _Particular sshd parameters_
+### Particular sshd parameters
-- [Ensure SSH `LoginGraceTime` is set to one minute or less](https://secscan.acron.pl/centos7/5/2/14)
-- [In `sshd_config` '`MaxAuthTries`' limits the number of auth failures per connection. What is a connection?](https://unix.stackexchange.com/questions/418582/in-sshd-config-maxauthtries-limits-the-number-of-auth-failures-per-connection)
-- [Ensure SSH `MaxAuthTries` is set to 4 or less](https://secscan.acron.pl/centos7/5/2/5)
-- [Difference between `maxstartups` and `maxsessions` in `sshd_config`](https://stackoverflow.com/questions/31114690/difference-between-maxstartups-and-maxsessions-in-sshd-config)
+- [Ensure SSH `LoginGraceTime` is set to one minute or less (Scored)](https://secscan.acron.pl/centos7/5/2/14)
+- [In sshd_config 'MaxAuthTries' limits the number of auth failures per connection. What is a connection?](https://unix.stackexchange.com/questions/418582/in-sshd-config-maxauthtries-limits-the-number-of-auth-failures-per-connection)
+- [Ensure SSH `MaxAuthTries` is set to 4 or less (Scored)](https://secscan.acron.pl/centos7/5/2/5)
+- [Difference between maxstartups and maxsessions in sshd_config](https://stackoverflow.com/questions/31114690/difference-between-maxstartups-and-maxsessions-in-sshd-config)
- [Systems Administrator’s Lab: OpenSSH MaxStartups](https://crunchtools.com/systems-administrators-lab-openssh-maxstartups/)
-- [`sshd_config MaxSessions` parameter](https://unix.stackexchange.com/questions/26170/sshd-config-maxsessions-parameter)
+- [sshd_config MaxSessions parameter](https://unix.stackexchange.com/questions/26170/sshd-config-maxsessions-parameter)
-### _About the `authorized_keys` file_
+### About the `authorized_keys` file
- [Proxmox v6 default ssh key in authorized_keys file](https://forum.proxmox.com/threads/proxmox-v6-default-ssh-key-in-authorized_keys-file.57898/#post-266842)
- > In a cluster of several nodes, PVE relies on ssh to perform certain tasks, a working communication in-between the nodes is therefore essential. This is why we share `/etc/pve/priv/known_hosts` as well as the keys via the `pmxcfs [0]` between the hosts.
- > The `/etc/ssh/ssh_known_hosts` is setup to symlink to `/etc/pve/priv/known_hosts`.
+
+ > [!NOTE]
+ > **Regarding ssh in Proxmox VE clusters**\
+ > In a cluster of several nodes, PVE relies on ssh to perform certain tasks, a working communication in-between the nodes is therefore essential. This is why we share `/etc/pve/priv/known_hosts` as well as the keys via the `pmxcfs [0]` between the hosts.
+ >
+ > The `/etc/ssh/ssh_known_hosts` is setup to symlink to `/etc/pve/priv/known_hosts`.
+ >
> For a standalone node this does not really matter, but is setup anyway. You can check the fingerprint by `running ssh-keyscan -t rsa ` and compare it if not sure.
-- [Adding own keys to authorized_keys](https://forum.proxmox.com/threads/adding-own-keys-to-authorized_keys.41812/)
-- [About `/etc/pve/priv/authorized_keys`](https://forum.proxmox.com/threads/etc-pve-priv-authorized_keys.18561/)
-- [Another `/etc/pve/priv/authorized_keys` question](https://forum.proxmox.com/threads/etc-pve-priv-authorized_keys-question.7671/)
-### _About disabling the `root` user_
+- [adding own keys to authorized_keys](https://forum.proxmox.com/threads/adding-own-keys-to-authorized_keys.41812/)
+- [/etc/pve/priv/authorized_keys](https://forum.proxmox.com/threads/etc-pve-priv-authorized_keys.18561/)
+- [/etc/pve/priv/authorized_keys question](https://forum.proxmox.com/threads/etc-pve-priv-authorized_keys-question.7671/)
+
+### About disabling the `root` user
- [Don't disable root ssh login to PVE as I did, you'll get locked out of containers](https://www.reddit.com/r/Proxmox/comments/dkozht/dont_disable_root_ssh_login_to_pve_as_i_did_youll/)
- [Disable root login](https://forum.proxmox.com/threads/disable-root-login.10512/)
+### About SSH key generation and cryptosystems
+
+- [What are ssh-keygen best practices?](https://security.stackexchange.com/questions/143442/what-are-ssh-keygen-best-practices)
+- [Secure Secure Shell](https://blog.stribik.technology/2015/01/04/secure-secure-shell.html)
+- [EdDSA. Ed25519](https://en.wikipedia.org/wiki/EdDSA#Ed25519)
+- [IANIX. Things that use Ed25519](https://ianix.com/pub/ed25519-deployment.html)
+- [RSA cryptosystem](https://en.wikipedia.org/wiki/RSA_cryptosystem)
+- [What is the -sk ending for ssh key types?](https://security.stackexchange.com/questions/240991/what-is-the-sk-ending-for-ssh-key-types)
+
+### [Proxmox VE](https://pve.proxmox.com/)
+
+- [Administration Guide](https://pve.proxmox.com/pve-docs/pve-admin-guide.html)
+ - [Cluster Manager. Requirements](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pvecm_cluster_requirements)
+
## Navigation
[<< Previous (**G008. Host hardening 02**)](G008%20-%20Host%20hardening%2002%20~%20Alternative%20administrator%20user.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G010. Host hardening 04**) >>](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md)
diff --git a/G010 - Host hardening 04 ~ Enabling Fail2Ban.md b/G010 - Host hardening 04 ~ Enabling Fail2Ban.md
index 94f95e9..5820283 100644
--- a/G010 - Host hardening 04 ~ Enabling Fail2Ban.md
+++ b/G010 - Host hardening 04 ~ Enabling Fail2Ban.md
@@ -1,139 +1,194 @@
# G010 - Host hardening 04 ~ Enabling Fail2Ban
-To harden both the ssh port and the web interface further, `fail2ban` should be installed and configured to protect from brute-force attacks against those interfaces.
-
-## Installing Fail2ban
+- [Harden your setup against intrusions with Fail2Ban](#harden-your-setup-against-intrusions-with-fail2ban)
+- [Installing Fail2Ban](#installing-fail2ban)
+- [Configuring Fail2Ban](#configuring-fail2ban)
+ - [Configuring the jail for the SSH service](#configuring-the-jail-for-the-ssh-service)
+ - [Testing the sshd jail configuration](#testing-the-sshd-jail-configuration)
+ - [Configuring the Proxmox VE jail](#configuring-the-proxmox-ve-jail)
+ - [Testing the Proxmox VE jail configuration](#testing-the-proxmox-ve-jail-configuration)
+- [Considerations regarding Fail2Ban](#considerations-regarding-fail2ban)
+ - [Fail2Ban client](#fail2ban-client)
+ - [Fail2Ban configuration files are read in order](#fail2ban-configuration-files-are-read-in-order)
+ - [Fail2Ban uses `nftables` to enforce the bans](#fail2ban-uses-nftables-to-enforce-the-bans)
+ - [Manual banning or unbanning of IPs](#manual-banning-or-unbanning-of-ips)
+ - [Checking a jail's filter](#checking-a-jails-filter)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Fail2Ban](#fail2ban)
+- [Navigation](#navigation)
+
+## Harden your setup against intrusions with Fail2Ban
+
+To harden both the SSH port and the web interface of your Proxmox VE standalone server further, install the intrusion prevention Fail2Ban tool to protect those interfaces from anomalous login attempts trying to brute-force their way into your system.
+
+## Installing Fail2Ban
Login with `mgrsys`, and execute the `apt install` command.
-~~~bash
+~~~sh
$ sudo apt install -y fail2ban
~~~
-## Configuring Fail2ban
+## Configuring Fail2Ban
-The usual method for configuring Fail2ban is by making a `.local` version of the `/etc/fail2ban/jail.conf` file and just editing that version. This way, you have all your particular fail2ban rules in one file, but if you want to separate concerns on different files, you can do it by creating a file per concern under the `/etc/fail2ban/jail.d` folder.
+The usual method for configuring Fail2Ban is by making a `.local` version of the `/etc/fail2ban/jail.conf` file and just editing that version. This way, you have all your particular Fail2Ban rules in one file. But, if you want to separate concerns on different files, you can do it by creating a file per concern under the `/etc/fail2ban/jail.d` folder.
-### _Configuring the **ssh** jail_
+### Configuring the jail for the SSH service
-1. Open a shell with your `sudo` user, `cd` to `/etc/fail2ban/jail.d` and create an empty file called `01_sshd.conf`.
+1. Open a shell with your `sudo` user (`mgrsys` in this guide), `cd` to `/etc/fail2ban/jail.d` and create an empty file called `01_sshd.conf`:
- ~~~bash
+ ~~~sh
$ cd /etc/fail2ban/jail.d
$ sudo touch 01_sshd.conf
~~~
-2. Edit the `01_sshd.conf` file by inserting the configuration lines below.
+2. Edit the `01_sshd.conf` file by inserting the configuration lines below:
- ~~~bash
+ ~~~sh
[sshd]
enabled = true
+ backend = systemd
port = 22
maxretry = 3
~~~
- Regarding the lines above.
+ Regarding the configuration above:
+
+ - `[sshd]`\
+ Identifies the service this jail is applied to.
- - `[sshd]` identifies the service this jail is applied to.
+ - `enabled`\
+ Enables the jail for the `sshd` service. This line is also present in the `/etc/fail2ban/jail.d/defaults-debian.conf`.
- - `enabled` turns on the jail for the `sshd` service. This line is also present in the `/etc/fail2ban/jail.d/defaults-debian.conf`.
+ - `backend`\
+ The Debian Linux where Proxmox VE comes installed is _systemd_-based, which means that many of the traditional logs you could use with Fail2Ban to detect attacks have been replaced by the Journal logging system. Hence, you have to enable the `systemd` backend so Fail2Ban can access the logs it needs to monitor to do its job.
- - `port` should be the same as the one you've configured for your sshd service, in this case is the standard SSH one.
+ - `port`\
+ Should be the same as the one you've configured for your `sshd` service. In this guide's PVE setup is the standard SSH one.
- - `maxretry` is the number of failed authentication attempts allowed before applying a ban to an IP. Remember to make it the same as the `MaxAuthTries` in your `sshd` configuration, so they correlate.
+ - `maxretry`\
+ Number of failed authentication attempts allowed before applying a ban to an IP. Remember to make it the same as the `MaxAuthTries` in your `sshd` configuration, so they correlate.
3. Save the changes and restart the fail2ban service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart fail2ban.service
~~~
-To test the configuration, you should provoke a ban.
+#### Testing the sshd jail configuration
-1. For this test use another computer if you can, so your usual client system doesn't get banned.
+To test the jail configuration for the SSH service, you should provoke a ban:
-2. Then try to connect through ssh, with a non-existing test user, until you use up the attempts allowed in your configuration.
+1. **Use another computer for this test if you can**. This is to avoid banning the client system from which you usually connect to your PVE server.
-3. With the default configuration, the ban time lasts 600 seconds (10 minutes).
+2. Then try to connect through SSH **with a non-existing test user**, until you use up the attempts allowed in your configuration.
+
+3. With the default configuration, **the ban time lasts 3600 seconds (one hour)**.
4. After the banning time is over, the banned IP will be unbanned **automatically**.
-5. In your server, check the `/var/log/fail2ban.log`. At the end of the file you should find the lines that indicate you what has happened with the banned IP.
+5. In your server, check the `/var/log/fail2ban.log`. At the end of the file you should find as the most recent lines the logs indicating what has happened with the banned IP:
~~~log
- 2021-02-11 13:32:03,310 fail2ban.jail [28753]: INFO Jail 'sshd' started
- 2021-02-11 13:38:49,734 fail2ban.filter [28753]: INFO [sshd] Found 192.168.1.54 - 2021-02-11 13:38:49
- 2021-02-11 13:45:14,844 fail2ban.filter [28753]: INFO [sshd] Found 192.168.1.54 - 2021-02-11 13:45:14
- 2021-02-11 13:45:15,010 fail2ban.actions [28753]: NOTICE [sshd] Ban 192.168.1.54
- 2021-02-11 13:55:14,197 fail2ban.actions [28753]: NOTICE [sshd] Unban 192.168.1.54
+ 2025-08-27 16:43:52,822 fail2ban.filter [2825]: INFO [sshd] Found 10.3.0.1 - 2025-08-27 16:43:52
+ 2025-08-27 16:44:17,117 fail2ban.filter [2825]: INFO [sshd] Found 10.3.0.1 - 2025-08-27 16:44:17
+ 2025-08-27 16:44:17,117 fail2ban.filter [2825]: INFO [sshd] Found 10.3.0.1 - 2025-08-27 16:44:17
+ 2025-08-27 16:44:17,521 fail2ban.actions [2825]: NOTICE [sshd] Ban 10.3.0.1
+ ...
+ 2025-08-27 17:44:17,532 fail2ban.actions [2825]: NOTICE [sshd] Unban 10.3.0.1
~~~
- In the example above I only allowed two attempts, which correspond with the two `INFO` lines right below the `Jail 'sshd' started` one. After them you can see the warning of the same attempting IP being banned. Finally, ten minutes later, the IP is unbanned.
+ The configuration allows three login attempts through ssh, which correspond with the three `INFO` lines shown in the log output above. After them, you can see the first `NOTICE` line of the same attempting IP being banned. The final `NOTICE` log line corresponds to the unbanning of the IP after one hour.
-### _Configuring the Proxmox VE jail_
+### Configuring the Proxmox VE jail
-1. `cd` to `/etc/fail2ban/jail.d` and create an empty file called `02_proxmox.conf`.
+1. `cd` to `/etc/fail2ban/jail.d` and create an empty file called `02_proxmox.conf`:
- ~~~bash
+ ~~~sh
$ cd /etc/fail2ban/jail.d
$ sudo touch 02_proxmox.conf
~~~
-2. Edit the `02_proxmox.conf` file by inserting the configuration lines below.
+2. Edit the `02_proxmox.conf` file by inserting the configuration lines below:
- ~~~bash
+ ~~~sh
[proxmox]
enabled = true
port = https,http,8006
filter = proxmox
- logpath = /var/log/daemon.log
+ backend = systemd
maxretry = 3
- # 10 minutes
- bantime = 600
+ findtime = 2d
+ bantime = 1h
~~~
- Regarding the lines above.
+ Regarding the configuration above:
+
+ - `[proxmox]`\
+ Identifies the service this jail is applied to.
- - `[proxmox]` identifies the service this jail is applied to.
+ - `enabled`\
+ Enables the jail for the `proxmox` service.
- - `enabled` turns on the jail for the `proxmox` service.
+ - `backend`\
+ Indicates Fail2Ban the system where to find the logging information it needs to run, in this case `systemd`.
- - `port` lists all the ports that your Proxmox VE platform is currently using.
+ - `port`\
+ Lists all the ports that your Proxmox VE platform is currently using.
- - `filter` indicates which filter to use to look for failed authentication attempts in the PVE platform.
+ - `filter`\
+ Specifies which filter to use to look for failed authentication attempts in the PVE platform.
- - `logpath` points to the log file that Fail2ban has to monitor, using the `filter` on it to identify failed attempts.
+ - `maxretry`\
+ Number of failed authentication attempts allowed before applying a ban to an IP.
- - `maxretry` is the number of failed authentication attempts allowed before applying a ban to an IP.
+ - `findtime`\
+ Time window Fail2Ban will monitor for repeated failed login attempts. In the configuration used in this guide, it will take into account all the attempts that happened in the last two days.
- - `bantime` indicates how long the ban should last for any banned IP. The value is in seconds, and 600 is the default for Fail2ban.
+ - `bantime`\
+ Indicates how long the ban should last for any banned IP.
3. `cd` to `/etc/fail2ban/filter.d` and create an empty file called `proxmox.conf`.
- ~~~bash
+ ~~~sh
$ cd /etc/fail2ban/filter.d
$ sudo touch proxmox.conf
~~~
4. Edit the `proxmox.conf` file by inserting the configuration lines below.
- ~~~bash
+ ~~~sh
[Definition]
- failregex = pvedaemon\[.*authentication (verification )?failure; rhost= user=.* msg=.*
+ failregex = pvedaemon\[.*authentication failure; rhost= user=.* msg=.*
ignoreregex =
+ journalmatch = _SYSTEMD_UNIT=pvedaemon.service
~~~
- The `[Definition]` above establishes the filtering patterns (regular expressions), in the `failregex` parameter, to detect in the Proxmox VE log the failed authentication attempts. The parameter `ignoreregex` could be filled with patterns to detect false positives, in the case they could happen.
+ The `[Definition]` above establishes the filtering pattern to detect anomalous attempts at login into the Proxmox VE system:
+
+ - `failregex`\
+ To specify the regular expressions that help detect in the Proxmox VE log the anomalous authentication attempts.
+
+ - `ignoreregex`\
+ To set the regular expressions for detecting false positives, in case they are known to happen.
+
+ - `journalmatch`\
+ Indicates the `systemd` service to monitor in the Journal logs. In this case is Proxmox VE's daemon.
5. Save the changes and restart the fail2ban service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart fail2ban.service
~~~
+#### Testing the Proxmox VE jail configuration
+
To test the configuration, you should provoke a ban.
-1. For this test use another computer if you can, so your usual client system doesn't get banned.
+1. For this test **use another computer if you can**, to avoid banning your usual client system from where you connect to your Proxmox VE server.
2. Then try to log in the PVE web console, with a non-existing test user, until you use up the attempts allowed in your configuration.
@@ -141,32 +196,32 @@ To test the configuration, you should provoke a ban.
4. After the banning time is over, the banned IP will be unbanned **automatically**.
-5. In your server, check the `/var/log/fail2ban.log`. At the end of the file you should find the lines that indicate you what happened with the banned IP.
+5. In your server, check the `/var/log/fail2ban.log`. At the end of the file you should find the lines that indicate you what happened with the banned IP:
~~~log
- 2021-02-11 14:38:37,626 fail2ban.jail [5970]: INFO Jail 'proxmox' started
- 2021-02-11 14:39:11,201 fail2ban.filter [5970]: INFO [proxmox] Found 192.168.1.54 - 2021-02-11 14:39:10
- 2021-02-11 14:39:16,011 fail2ban.filter [5970]: INFO [proxmox] Found 192.168.1.54 - 2021-02-11 14:39:15
- 2021-02-11 14:39:20,418 fail2ban.filter [5970]: INFO [proxmox] Found 192.168.1.54 - 2021-02-11 14:39:20
- 2021-02-11 14:39:20,909 fail2ban.actions [5970]: NOTICE [proxmox] Ban 192.168.1.54
- 2021-02-11 14:49:20,079 fail2ban.actions [5970]: NOTICE [proxmox] Unban 192.168.1.54
+ 2025-08-27 16:58:49,881 fail2ban.filter [5927]: INFO [proxmox] Found 10.3.0.1 - 2025-08-27 16:58:49
+ 2025-08-27 16:58:55,690 fail2ban.filter [5927]: INFO [proxmox] Found 10.3.0.1 - 2025-08-27 16:58:55
+ 2025-08-27 16:59:00,107 fail2ban.filter [5927]: INFO [proxmox] Found 10.3.0.1 - 2025-08-27 16:58:59
+ 2025-08-27 16:59:00,647 fail2ban.actions [5927]: NOTICE [proxmox] Ban 10.3.0.1
+ ...
+ 2025-08-27 17:59:00,653 fail2ban.actions [5927]: NOTICE [proxmox] Unban 10.3.0.1
~~~
- In the example I allowed three attempts, which correspond with the three `INFO` lines right below the `Jail 'proxmox' started` one. After them you can see the warning of the same attempting IP being banned. Finally, ten minutes later, the IP is unbanned.
+ The configuration for accessing Proxmox VE allows three attempts, which correspond with the three `INFO` lines shown in the log snippet above. After them, you can see the first `NOTICE` warning of the same attempting IP being banned. The last `NOTICE` entry informs you of the unbanning of the IP after one hour.
-## Considerations regarding Fail2ban
+## Considerations regarding Fail2Ban
First, to know more about how to configure `fail2ban`, check the manual for `jail.conf`.
-~~~bash
+~~~sh
$ man jail.conf
~~~
-### _Fail2ban client_
+### Fail2Ban client
-Fail2ban comes with the `fail2ban-client` program to monitor its status. For instance, after applying the configuration explained in this guide, you would see the following.
+Fail2Ban comes with the `fail2ban-client` program to monitor its status. For instance, after applying the configuration explained in this guide, you would see the following.
-~~~bash
+~~~sh
$ sudo fail2ban-client status
Status
|- Number of jail: 2
@@ -175,35 +230,35 @@ Status
Also, you can check each jail with the `fail2ban-client`.
-~~~bash
+~~~sh
$ sudo fail2ban-client status sshd
Status for the jail: sshd
|- Filter
| |- Currently failed: 0
| |- Total failed: 0
-| `- File list: /var/log/auth.log
+| `- Journal matches: _SYSTEMD_UNIT=ssh.service + _COMM=sshd
`- Actions
|- Currently banned: 0
- |- Total banned: 0
+ |- Total banned: 1
`- Banned IP list:
~~~
-~~~bash
+~~~sh
$ sudo fail2ban-client status proxmox
Status for the jail: proxmox
|- Filter
| |- Currently failed: 0
-| |- Total failed: 0
-| `- File list: /var/log/daemon.log
+| |- Total failed: 3
+| `- Journal matches: _SYSTEMD_UNIT=pvedaemon.service
`- Actions
- |- Currently banned: 0
- |- Total banned: 0
- `- Banned IP list:
+ |- Currently banned: 1
+ |- Total banned: 1
+ `- Banned IP list: 10.3.0.1
~~~
On the other hand, the `fail2ban-client` can also be used to handle the fail2ban underlying server (the `fail2ban-server` daemon that monitors the system logs).
-### _Fail2ban configuration files are read in order_
+### Fail2Ban configuration files are read in order
The fail2ban configuration files are read in a particular order: first the `.conf` files, then the `.local` ones. And the configuration files within the `.d/` folders are read in alphabetical order. So, the reading order for the `jail` files would be:
@@ -212,77 +267,100 @@ The fail2ban configuration files are read in a particular order: first the `.con
3. `jail.local`
4. `jail.d/*.local` (in alphabetical order).
-### _Fail2ban uses `iptables` to enforce the bans_
+### Fail2Ban uses `nftables` to enforce the bans
-Fail2ban monitors the log files of the services you tell it to and, if it detects that an IP is banneable under the criteria of its configuration, it will block any offending IP in the `iptables` firewall of your server.
+Fail2Ban monitors the log files of the services you tell it to and, if it detects that an IP is banneable under the criteria of its configuration, it will block any offending IP with the `nftables` firewall integrated in your Proxmox VE server. The rules applied through nftables can be seen with the `nft` command:
-On the other hand, the fail2ban jails themselves will also temporarily appear in the `iptables` rule list when an IP is banned by a concrete jail.
+1. First, you must get the name of the table where the Fail2Ban rules are kept:
-To see the iptables rule list, use `sudo iptables -L`. This could give an output like the following if there has been a banning on each jail in your system.
+ ~~~sh
+ $ sudo nft list tables
+ table inet f2b-table
+ ~~~
-~~~bash
-$ sudo iptables -L
-Chain INPUT (policy ACCEPT)
-target prot opt source destination
-f2b-sshd tcp -- anywhere anywhere multiport dports ssh
-f2b-proxmox tcp -- anywhere anywhere multiport dports https,http,8006
+ Notice that there is only one table active in the `nftables` firewall, the one for Fail2Ban:
-Chain FORWARD (policy ACCEPT)
-target prot opt source destination
+ - `table`\
+ Just indicates that the object listed in the output is a table of rules.
-Chain OUTPUT (policy ACCEPT)
-target prot opt source destination
+ - `inet`\
+ Indicate to which address family the rules apply. In this case the rules of the table only apply to IPv4 addresses.
-Chain f2b-sshd (1 references)
-target prot opt source destination
-RETURN all -- anywhere anywhere
+ - `f2b-table`\
+ The name of the Fail2Ban (`f2b`) table.
-Chain f2b-proxmox (1 references)
-target prot opt source destination
-RETURN all -- anywhere anywhere
-~~~
+2. Once you know the family and name of the Fail2Ban table, you can see the rules in it with the `nft` command:
+
+ ~~~sh
+ $ sudo nft list table inet f2b-table
+ table inet f2b-table {
+ set addr-set-sshd {
+ type ipv4_addr
+ }
+
+ set addr-set-proxmox {
+ type ipv4_addr
+ elements = { 10.3.0.1 }
+ }
+
+ chain f2b-chain {
+ type filter hook input priority filter - 1; policy accept;
+ tcp dport 22 ip saddr @addr-set-sshd reject with icmp port-unreachable
+ tcp dport { 80, 443, 8006 } ip saddr @addr-set-proxmox reject with icmp port-unreachable
+ }
+ }
+ ~~~
-Notice the `f2b-sshd` and `f2b-proxmox` names, they have the name for the Fail2Ban proxmox jails they're related to.
+ The output reveals both sets of banned addresses and the chains of rules applied:
-### _Manual banning or unbanning of IPs_
+ - The `addr-set-sshd` and `addr-set-proxmox` sets each correspond to one of the jails enabled in Fail2Ban. Notice how in the `addr-set-proxmox` there is already one IP included in the set, meaning that nftables has blocked its access to the Proxmox VE server ports.
-You can manually unban an IP with the following command.
+ - The `f2b-chain` block contains the rules that block access to the sshd and Proxmox VE servers to banned IPs.
-~~~bash
+### Manual banning or unbanning of IPs
+
+You can manually unban an IP with the following command:
+
+~~~sh
$ sudo fail2ban-client set banip/unbanip
~~~
-Notice how you have to specify on which `` you want to place a ban or lift it. An example of unbanning an IP from the `sshd` jail would be like below.
+Notice how you have to specify on which `` you want to place a ban or lift it. An example of unbanning an IP from the `sshd` jail would be like this:
-~~~bash
-$ sudo fail2ban-client set sshd unbanip 192.168.1.54
+~~~sh
+$ sudo fail2ban-client set sshd unbanip 10.3.0.1
+1
~~~
-### _Checking a jail's filter_
+Notice that the unbanip command will return a number that indicates how many IPs have been unbanned. In the example, just one has been releases from the `proxmox` jail.
+
+### Checking a jail's filter
-The filter of a jail is just a regular expression that weeds out from a given log the lines that indicates to fail2ban if an IP should be banned.
+The filter of a jail is just a regular expression that weeds out from a given log the lines that indicates to Fail2Ban if an IP should be banned.
To check that the regular expression of a jail's filter works, you have to use the `fail2ban-regex` command.
-~~~bash
-fail2ban-regex /path/to/log.file /path/to/fail2ban/filter/configuration.file
+~~~sh
+fail2ban-regex [/path/to/log.file | systemd-journal] /path/to/fail2ban/filter/configuration.file
~~~
-Notice that you can test any filter against any log file. For instance, if you were to check the `proxmox` filter previously explained in this guide, you would do it like the following.
+Notice that you can test any filter against any log file, or the systemd Journal. For instance, if you were to check the `proxmox` filter previously explained in this guide, you would do this:
-~~~bash
-$ sudo fail2ban-regex /var/log/daemon.log /etc/fail2ban/filter.d/proxmox.conf
+~~~sh
+$ sudo fail2ban-regex systemd-journal /etc/fail2ban/filter.d/proxmox.conf
~~~
-The output of the command above will be something like the following.
+The output of the command above should be something like the following:
+
+~~~sh
-~~~bash
Running tests
=============
-Use failregex filter file : proxmox, basedir: /etc/fail2ban
-Use log file : /var/log/daemon.log
+Use filter file : proxmox, basedir: /etc/fail2ban
+Use systemd journal
Use encoding : UTF-8
+Use journal match : _SYSTEMD_UNIT=pvedaemon.service
Results
@@ -290,34 +368,28 @@ Results
Failregex: 8 total
|- #) [# of hits] regular expression
-| 1) [8] pvedaemon\[.*authentication (verification )?failure; rhost= user=.* msg=.*
+| 1) [8] pvedaemon\[.*authentication failure; rhost= user=.* msg=.*
`-
Ignoreregex: 0 total
-Date template hits:
-|- [# of hits] date format
-| [3421] {^LN-BEG}(?:DAY )?MON Day %k:Minute:Second(?:\.Microseconds)?(?: ExYear)?
-| [1] (?:DAY )?MON Day %k:Minute:Second(?:\.Microseconds)?(?: ExYear)?
-`-
-
-Lines: 3422 lines, 0 ignored, 8 matched, 3414 missed
-[processed in 0.49 sec]
+Lines: 356 lines, 0 ignored, 8 matched, 348 missed
+[processed in 0.14 sec]
-Missed line(s): too many to print. Use --print-all-missed to print all 3414 lines
+Missed line(s): too many to print. Use --print-all-missed to print all 348 lines
~~~
-Notice how it has detected 8 matches: those matches are authentication failures that I've provoked to test the `fail2ban` configuration.
+Notice how the `fail2ban-regex` command reports a total of eight matches in the `Failregex` line.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/fail2ban`
- `/etc/fail2ban/filter.d`
- `/etc/fail2ban/jail.d`
-### _Files_
+### Files
- `/etc/fail2ban/filter.d/proxmox.conf`
- `/etc/fail2ban/jail.d/01_sshd.conf`
@@ -325,13 +397,14 @@ Notice how it has detected 8 matches: those matches are authentication failures
## References
-### _Fail2ban_
+### [Fail2Ban](https://github.com/fail2ban/fail2ban)
-- [How to install Fail2ban on Debian](https://upcloud.com/community/tutorials/install-fail2ban-debian/)
-- [How to Setup Fail2ban on Debian 9](https://www.vultr.com/docs/how-to-setup-fail2ban-on-debian-9-stretch)
+- [How Fail2Ban Works to Protect Services on a Linux Server](https://www.digitalocean.com/community/tutorials/how-fail2ban-works-to-protect-services-on-a-linux-server)
+- [How to configure fail2ban with systemd journal?](https://unix.stackexchange.com/questions/268357/how-to-configure-fail2ban-with-systemd-journal)
- [Proxmox VE wiki. Protecting the web interface with fail2ban](https://pve.proxmox.com/wiki/Fail2ban)
- [fail2ban conf file](https://unix.stackexchange.com/questions/456756/fail2ban-conf-file)
- [Fail2ban on Debian Buster - the right way to configure?](https://serverfault.com/questions/997099/fail2ban-on-debian-buster-the-right-way-to-configure)
+- [How to install Fail2ban on Debian](https://upcloud.com/resources/tutorials/install-fail2ban-debian/)
## Navigation
diff --git a/G011 - Host hardening 05 ~ Proxmox VE services.md b/G011 - Host hardening 05 ~ Proxmox VE services.md
index 58f5e1a..9ecca5b 100644
--- a/G011 - Host hardening 05 ~ Proxmox VE services.md
+++ b/G011 - Host hardening 05 ~ Proxmox VE services.md
@@ -1,203 +1,316 @@
# G011 - Host hardening 05 ~ Proxmox VE services
-Your standalone Proxmox VE node comes with a number of services running by default. Having in mind the target system's particularities assumed in this guide series, there's some reconfiguring or disabling of services that you can do to harden your Proxmox VE platform further.
-
-The basic idea is to reduce the exposition surface of your platform, and also save some RAM and CPU, by disabling unneeded Proxmox VE related processes.
+- [Reduce your Proxmox VE server's exposed surface](#reduce-your-proxmox-ve-servers-exposed-surface)
+- [Checking currently running services](#checking-currently-running-services)
+ - [Listing the contents of the `/etc/init.d/` folder](#listing-the-contents-of-the-etcinitd-folder)
+ - [Listing the unit files with `systemctl`](#listing-the-unit-files-with-systemctl)
+ - [Getting the status of running services with `systemctl`](#getting-the-status-of-running-services-with-systemctl)
+ - [Checking the systemd units loaded in memory](#checking-the-systemd-units-loaded-in-memory)
+ - [Inspecting the running services with `htop`](#inspecting-the-running-services-with-htop)
+- [Configuring the `pveproxy` service](#configuring-the-pveproxy-service)
+ - [Default 8006 port and listening interfaces](#default-8006-port-and-listening-interfaces)
+ - [Enforcing strong SSL/TLS ciphers](#enforcing-strong-ssltls-ciphers)
+ - [Host based access control rules](#host-based-access-control-rules)
+- [Disabling RPC services](#disabling-rpc-services)
+- [Disabling `zfs` and `ceph`](#disabling-zfs-and-ceph)
+- [Disabling the SPICE proxy](#disabling-the-spice-proxy)
+- [Disabling cluster and high availability related services](#disabling-cluster-and-high-availability-related-services)
+- [Considerations](#considerations)
+ - [Errors in the `apt upgrade` process](#errors-in-the-apt-upgrade-process)
+ - [View of services running in the Proxmox VE node](#view-of-services-running-in-the-proxmox-ve-node)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [Proxmox hardening and standalone node optimization](#proxmox-hardening-and-standalone-node-optimization)
+ - [Services management in Debian](#services-management-in-debian)
+ - [NFS and RPC](#nfs-and-rpc)
+ - [Default listening behaviour of `pveproxy`](#default-listening-behaviour-of-pveproxy)
+ - [Configuration of SSL/TSL ciphers in `pveproxy`](#configuration-of-ssltsl-ciphers-in-pveproxy)
+ - [ZFS and Ceph](#zfs-and-ceph)
+ - [SPICE proxy](#spice-proxy)
+- [Navigation](#navigation)
+
+## Reduce your Proxmox VE server's exposed surface
+
+Your standalone Proxmox VE node comes with a number of services running by default. Having in mind the particularities assumed in this guide's target system, there is some reconfiguring or disabling of services you can do to harden your Proxmox VE platform even more.
+
+The basic idea is to reduce your platform's surface exposed to possible attacks, while also saving some RAM and CPU in the process, by disabling unneeded processes related to Proxmox VE.
## Checking currently running services
To see what services are running in your system, you can do any of the following.
-- Listing the contents of the `/etc/init.d/` folder. You should get an output like the following.
-
- ~~~bash
- $ ls -al /etc/init.d/
- total 144
- drwxr-xr-x 2 root root 4096 Nov 10 16:41 .
- drwxr-xr-x 91 root root 4096 Nov 10 16:41 ..
- -rwxr-xr-x 1 root root 3740 Apr 3 2021 apparmor
- -rwxr-xr-x 1 root root 1897 May 13 16:51 chrony
- -rwxr-xr-x 1 root root 1232 Jul 23 19:05 console-setup.sh
- -rwxr-xr-x 1 root root 3059 Feb 22 2021 cron
- -rwxr-xr-x 1 root root 3152 Feb 21 2021 dbus
- -rwxr-xr-x 1 root root 7033 Jul 12 06:52 fail2ban
- -rwxr-xr-x 1 root root 1748 Jul 28 21:09 hwclock.sh
- -rwxr-xr-x 1 root root 1503 Apr 28 2021 iscsid
- -rwxr-xr-x 1 root root 1479 Oct 29 2018 keyboard-setup.sh
- -rwxr-xr-x 1 root root 2044 Jan 7 2021 kmod
- -rwxr-xr-x 1 root root 883 May 17 2016 lm-sensors
- -rwxr-xr-x 1 root root 695 Feb 22 2021 lvm2
- -rwxr-xr-x 1 root root 586 Feb 22 2021 lvm2-lvmpolld
- -rwxr-xr-x 1 root root 5658 Jun 28 09:15 nfs-common
- -rwxr-xr-x 1 root root 5329 Oct 15 2020 nut-client
- -rwxr-xr-x 1 root root 5316 Oct 15 2020 nut-server
- -rwxr-xr-x 1 root root 2433 Apr 28 2021 open-iscsi
- -rwxr-xr-x 1 root root 3368 Aug 2 2020 postfix
- -rwxr-xr-x 1 root root 924 Apr 6 2021 procps
- -rwxr-xr-x 1 root root 2507 Jul 13 2020 rpcbind
- -rwxr-xr-x 1 root root 5246 Sep 1 2019 rrdcached
- -rwxr-xr-x 1 root root 4417 Feb 3 2021 rsync
- -rwxr-xr-x 1 root root 2864 Feb 17 2021 rsyslog
- -rwxr-xr-x 1 root root 3088 Oct 10 2019 smartmontools
- -rwxr-xr-x 1 root root 4056 Mar 13 2021 ssh
- -rwxr-xr-x 1 root root 1030 Feb 27 2021 sudo
- -rwxr-xr-x 1 root root 6871 Jul 13 19:28 udev
- lrwxrwxrwx 1 root root 10 Oct 15 2020 ups-monitor -> nut-client
- ~~~
+### Listing the contents of the `/etc/init.d/` folder
+
+Check out the `/etc/init.d/` folder with `ls -al` to get an output like the following:
+
+~~~sh
+$ ls -al /etc/init.d/
+total 112
+drwxr-xr-x 2 root root 4096 Aug 27 16:28 .
+drwxr-xr-x 100 root root 4096 Aug 27 16:28 ..
+-rwxr-xr-x 1 root root 3740 Jul 21 11:30 apparmor
+-rwxr-xr-x 1 root root 1897 Jun 3 17:16 chrony
+-rwxr-xr-x 1 root root 1235 Jul 20 06:30 console-setup.sh
+-rwxr-xr-x 1 root root 3100 Apr 3 12:18 cron
+-rwxr-xr-x 1 root root 3152 Mar 8 20:04 dbus
+-rwxr-xr-x 1 root root 7013 May 9 12:19 fail2ban
+-rwxr-xr-x 1 root root 1511 Mar 9 18:18 iscsid
+-rwxr-xr-x 1 root root 1482 Aug 7 2022 keyboard-setup.sh
+-rwxr-xr-x 1 root root 883 Apr 12 14:35 lm-sensors
+-rwxr-xr-x 1 root root 5619 Mar 31 20:13 nfs-common
+-rwxr-xr-x 1 root root 5329 Jun 27 23:04 nut-client
+-rwxr-xr-x 1 root root 5316 Jun 27 23:04 nut-server
+-rwxr-xr-x 1 root root 2472 Mar 9 18:18 open-iscsi
+-rwxr-xr-x 1 root root 2161 Dec 17 2024 postfix
+-rwxr-xr-x 1 root root 959 Jul 30 13:58 procps
+-rwxr-xr-x 1 root root 2529 Mar 18 01:43 rpcbind
+-rwxr-xr-x 1 root root 4417 Jul 26 11:26 rsync
+-rwxr-xr-x 1 root root 3088 Oct 10 2019 smartmontools
+-rwxr-xr-x 1 root root 4060 Aug 1 17:02 ssh
+-rwxr-xr-x 1 root root 1161 Jun 30 07:55 sudo
+lrwxrwxrwx 1 root root 10 Jun 27 23:04 ups-monitor -> nut-client
+-rwxr-xr-x 1 root root 1221 May 4 19:39 wtmpdb-update-boot
+~~~
+
+The problem with this approach is that not all running processes or services have an executable file in this folder, so this listing gives you a very incomplete reference.
+
+### Listing the unit files with `systemctl`
+
+A much more detailed listing is the one offered by this `systemctl` command:
+
+~~~sh
+$ sudo systemctl list-unit-files
+~~~
+
+This command will give you a long and exhaustive interactive read-only list of processes and services:
+
+~~~sh
+UNIT FILE STATE PRESET
+efi.automount generated -
+proc-sys-fs-binfmt_misc.automount static -
+-.mount generated -
+dev-hugepages.mount static -
+dev-mqueue.mount static -
+efi.mount generated -
+proc-fs-nfsd.mount static -
+proc-sys-fs-binfmt_misc.mount disabled disabled
+run-lock.mount disabled enabled
+run-rpc_pipefs.mount generated -
+sys-fs-fuse-connections.mount static -
+sys-kernel-config.mount static -
+sys-kernel-debug.mount static -
+sys-kernel-tracing.mount static -
+tmp.mount static -
+nut-driver-enumerator.path enabled enabled
+postfix-resolvconf.path disabled enabled
+systemd-ask-password-console.path static -
+systemd-ask-password-wall.path static -
+session-1.scope transient -
+apparmor.service enabled enabled
+apt-daily-upgrade.service static -
+apt-daily.service static -
+apt-listchanges.service static -
+auth-rpcgss-module.service static -
+autovt@.service alias -
+blk-availability.service enabled enabled
+capsule@.service static -
+ceph-fuse@.service disabled enabled
+chrony-dnssrv@.service static -
+chrony-wait.service disabled enabled
+chrony.service enabled enabled
+chronyd-restricted.service disabled enabled
+chronyd.service alias -
+console-getty.service disabled disabled
+console-setup.service enabled enabled
+...
+~~~
+
+### Getting the status of running services with `systemctl`
+
+Another way of listing all the running services with `systemctl` is:
+
+~~~sh
+$ sudo systemctl status
+~~~
+
+The command will print an interactive read-only list:
+
+~~~sh
+● pve
+ State: running
+ Units: 486 loaded (incl. loaded aliases)
+ Jobs: 0 queued
+ Failed: 0 units
+ Since: Thu 2025-08-28 16:09:43 CEST; 10min ago
+ systemd: 257.7-1
+ Tainted: unmerged-bin
+ CGroup: /
+ ├─init.scope
+ │ └─1 /sbin/init
+ ├─system.slice
+ │ ├─chrony.service
+ │ │ ├─842 /usr/sbin/chronyd -F 1
+ │ │ └─863 /usr/sbin/chronyd -F 1
+ │ ├─cron.service
+ │ │ └─1106 /usr/sbin/cron -f
+ │ ├─dbus.service
+ │ │ └─705 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only
+ │ ├─fail2ban.service
+ │ │ └─958 /usr/bin/python3 /usr/bin/fail2ban-server -xf start
+ │ ├─ksmtuned.service
+ │ │ ├─ 724 /bin/bash /usr/sbin/ksmtuned
+ │ │ └─2741 sleep 60
+ │ ├─lxc-monitord.service
+ │ │ └─959 /usr/libexec/lxc/lxc-monitord --daemon
+ │ ├─lxcfs.service
+ │ │ └─739 /usr/bin/lxcfs /var/lib/lxcfs
+ │ ├─nfs-blkmap.service
+ │ │ └─674 /usr/sbin/blkmapd
+ │ ├─nut-monitor.service
+ │ │ ├─1159 /lib/nut/upsmon -F
+ │ │ └─1160 /lib/nut/upsmon -F
+ │ ├─nut-server.service
+ │ │ └─1156 /lib/nut/upsd -F
+ │ ├─postfix.service
+ │ │ ├─1146 /usr/lib/postfix/sbin/master -w
+...
+~~~
+
+### Checking the systemd units loaded in memory
+
+Also with `systemctl`, you can see the "units" `systemd` has currently in memory and their state:
+
+~~~sh
+$ sudo systemctl list-units
+ UNIT LOAD ACTIVE SUB DESCRIPTION
+ efi.automount loaded active waiting EFI System Partition Automount
+ proc-sys-fs-binfmt_misc.automount loaded active running Arbitrary Executable File Formats File System Automount Point
+● dev-fuse.device loaded activating tentative /dev/fuse
+ sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda1.device loaded active plugged Samsung_SSD_860_QVO_1TB 1
+ sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda2.device loaded active plugged Samsung_SSD_860_QVO_1TB 2
+ sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda3.device loaded active plugged Samsung_SSD_860_QVO_1TB 3
+ sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda4.device loaded active plugged Samsung_SSD_860_QVO_1TB 4
+ sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda.device loaded active plugged Samsung_SSD_860_QVO_1TB
+ sys-devices-pci0000:00-0000:00:13.0-ata2-host1-target1:0:0-1:0:0:0-block-sdb-sdb1.device loaded active plugged WDC_WD10JPVX-22JC3T0 1
+ sys-devices-pci0000:00-0000:00:13.0-ata2-host1-target1:0:0-1:0:0:0-block-sdb.device loaded active plugged WDC_WD10JPVX-22JC3T0
+ sys-devices-pci0000:00-0000:00:14.0-usb2-2\x2d1-2\x2d1:1.0-host2-target2:0:0-2:0:0:0-block-sdc-sdc1.device loaded active plugged ST2000LM015-2E8174 1
+ sys-devices-pci0000:00-0000:00:14.0-usb2-2\x2d1-2\x2d1:1.0-host2-target2:0:0-2:0:0:0-block-sdc.device loaded active plugged ST2000LM015-2E8174
+ sys-devices-pci0000:00-0000:00:1b.0-sound-card0-controlC0.device loaded active plugged /sys/devices/pci0000:00/0000:00:1b.0/sound/card0/controlC0
+ sys-devices-pci0000:00-0000:00:1c.2-0000:03:00.0-net-enp3s0.device loaded active plugged RTL8111/8168/8211/8411 PCI Express Gigabit Ethernet Controller
+ sys-devices-pci0000:00-0000:00:1f.0-intel\x2dspi-spi_master-spi0-spi0.0-mtd-mtd0.device loaded active plugged /sys/devices/pci0000:00/0000:00:1f.0/intel-spi/spi_master/spi0/spi0.0/mtd/mtd0
+ sys-devices-pci0000:00-0000:00:1f.0-intel\x2dspi-spi_master-spi0-spi0.0-mtd-mtd0ro.device loaded active plugged /sys/devices/pci0000:00/0000:00:1f.0/intel-spi/spi_master/spi0/spi0.0/mtd/mtd0ro
+ sys-devices-platform-serial8250-serial8250:0-serial8250:0.0-tty-ttyS0.device loaded active plugged /sys/devices/platform/serial8250/serial8250:0/serial8250:0.0/tty/ttyS0
+ sys-devices-platform-serial8250-serial8250:0-serial8250:0.1-tty-ttyS1.device loaded active plugged /sys/devices/platform/serial8250/serial8250:0/serial8250:0.1/tty/ttyS1
+ sys-devices-platform-serial8250-serial8250:0-serial8250:0.2-tty-ttyS2.device loaded active plugged /sys/devices/platform/serial8250/serial8250:0/serial8250:0.2/tty/ttyS2
+ sys-devices-platform-serial8250-serial8250:0-serial8250:0.3-tty-ttyS3.device loaded active plugged /sys/devices/platform/serial8250/serial8250:0/serial8250:0.3/tty/ttyS3
+ sys-devices-virtual-block-dm\x2d0.device loaded active plugged /sys/devices/virtual/block/dm-0
+ sys-devices-virtual-block-dm\x2d1.device loaded active plugged /sys/devices/virtual/block/dm-1
+ sys-devices-virtual-misc-rfkill.device loaded active plugged /sys/devices/virtual/misc/rfkill
+ sys-devices-virtual-net-vmbr0.device loaded active plugged /sys/devices/virtual/net/vmbr0
+ sys-devices-virtual-tty-ttyprintk.device loaded active plugged /sys/devices/virtual/tty/ttyprintk
+ sys-module-configfs.device loaded active plugged /sys/module/configfs
+ sys-module-fuse.device loaded active plugged /sys/module/fuse
+ sys-subsystem-net-devices-enp3s0.device loaded active plugged RTL8111/8168/8211/8411 PCI Express Gigabit Ethernet Controller
+ sys-subsystem-net-devices-vmbr0.device loaded active plugged /sys/subsystem/net/devices/vmbr0
+ -.mount loaded active mounted Root Mount
+ dev-hugepages.mount loaded active mounted Huge Pages File System
+ dev-mqueue.mount loaded active mounted POSIX Message Queue File System
+ etc-pve.mount loaded active mounted /etc/pve
+ proc-sys-fs-binfmt_misc.mount loaded active mounted Arbitrary Executable File Formats File System
+ run-lock.mount loaded active mounted Legacy Locks Directory /run/lock
+ run-rpc_pipefs.mount loaded active mounted RPC Pipe File System
+...
+~~~
+
+### Inspecting the running services with `htop`
+
+If you installed it, you can use `htop` to monitor in real time all the services currently running in your Proxmox VE server and the resources usage:
+
+~~~sh
+$ htop
+~~~
+
+This program offers an interactive text-based interface that, after some tinkering (press `F2` to enter its `Setup` menu), can be configured to look like in this snapshot:
+
+
- The problem is that not all running processes or services have an executable file in this folder, so this listing gives you a very incomplete reference.
+## Configuring the `pveproxy` service
-- A much more detailed listing is the one offered by the following `systemctl` command.
+The PVEProxy is the component responsible for exposing the Proxmox VE API through HTTPS. It is just a specific reverse proxy that gives access to the Proxmox VE API and its web console at the 8006 port.
- ~~~bash
- $ sudo systemctl list-unit-files
- ~~~
+### Default 8006 port and listening interfaces
- This will give you an interactive read-only long and exhaustive list of processes and services.
-
- ~~~bash
- UNIT FILE STATE VENDOR PRESET
- proc-sys-fs-binfmt_misc.automount static -
- -.mount generated -
- boot-efi.mount generated -
- dev-hugepages.mount static -
- dev-mqueue.mount static -
- proc-fs-nfsd.mount static -
- proc-sys-fs-binfmt_misc.mount disabled disabled
- run-rpc_pipefs.mount static -
- sys-fs-fuse-connections.mount static -
- sys-kernel-config.mount static -
- sys-kernel-debug.mount static -
- sys-kernel-tracing.mount static -
- systemd-ask-password-console.path static -
- systemd-ask-password-wall.path static -
- session-7.scope transient -
- apparmor.service enabled enabled
- apt-daily-upgrade.service static -
- ...
- ~~~
+By default, `pveproxy` listens on all your system's network interfaces through the `8006` port. You can adjust this behavior to make `pveproxy` listen only through one specific IP, although you cannot change the listening port:
-- Another way of listing all the running services with `systemctl` is the following.
+1. Open a shell with `mgrsys`, `cd` to `/etc/default/` and create an empty `pveproxy` file:
- ~~~bash
- $ sudo systemctl status
- ~~~
-
- The command will output an interactive read-only list like the following excerpt.
-
- ~~~bash
- ● pve
- State: running
- Jobs: 0 queued
- Failed: 0 units
- Since: Wed 2021-11-10 15:49:06 CET; 1h 22min ago
- CGroup: /
- ├─715 bpfilter_umh
- ├─user.slice
- │ └─user-1000.slice
- │ ├─user@1000.service …
- │ │ └─init.scope
- │ │ ├─7388 /lib/systemd/systemd --user
- │ │ └─7389 (sd-pam)
- │ └─session-7.scope
- │ ├─ 7384 sshd: mgrsys [priv]
- │ ├─ 7411 sshd: mgrsys@pts/0
- │ ├─ 7412 -bash
- │ ├─11656 sudo systemctl status
- │ ├─11657 systemctl status
- │ └─11658 less
- ├─init.scope
- │ └─1 /sbin/init
- └─system.slice
- ├─fail2ban.service
- │ └─8660 /usr/bin/python3 /usr/bin/fail2ban-server -xf start
- ...
- ~~~
-
-- Also with `systemctl`, you can see the "units" systemd has currently in memory and their state.
-
- ~~~bash
- $ sudo systemctl list-units
- UNIT LOAD ACTIVE SUB DESCRIPTION
- proc-sys-fs-binfmt_misc.automount loaded active waiting Arbitrary Executable File Formats File System Automount Point
- dev-fuse.device loaded activating tentative /dev/fuse
- sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda1.device loaded active plugged Samsung_SSD_860_QVO_1TB 1
- sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda2.device loaded active plugged Samsung_SSD_860_QVO_1TB 2
- sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda3.device loaded active plugged Samsung_SSD_860_QVO_1TB 3
- sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda-sda4.device loaded active plugged Samsung_SSD_860_QVO_1TB 4
- sys-devices-pci0000:00-0000:00:13.0-ata1-host0-target0:0:0-0:0:0:0-block-sda.device loaded active plugged Samsung_SSD_860_QVO_1TB
- sys-devices-pci0000:00-0000:00:13.0-ata2-host1-target1:0:0-1:0:0:0-block-sdb-sdb1.device loaded active plugged ST1000DM003-9YN162 1
- sys-devices-pci0000:00-0000:00:13.0-ata2-host1-target1:0:0-1:0:0:0-block-sdb.device loaded active plugged ST1000DM003-9YN162
- sys-devices-pci0000:00-0000:00:14.0-usb2-2\x2d1-2\x2d1:1.0-host2-target2:0:0-2:0:0:0-block-sdc-sdc1.device loaded active plugged WDC_WD20EARX-00PASB0 1
- sys-devices-pci0000:00-0000:00:14.0-usb2-2\x2d1-2\x2d1:1.0-host2-target2:0:0-2:0:0:0-block-sdc.device loaded active plugged WDC_WD20EARX-00PASB0
- sys-devices-pci0000:00-0000:00:1b.0-sound-card0-controlC0.device loaded active plugged /sys/devices/pci0000:00/0000:00:1b.0/sound/card0/controlC0
- sys-devices-pci0000:00-0000:00:1c.2-0000:02:00.0-net-enp2s0.device loaded active plugged RTL810xE PCI Express Fast Ethernet controller
- sys-devices-pci0000:00-0000:00:1c.3-0000:03:00.0-net-wlp3s0.device loaded active plugged RTL8723BE PCIe Wireless Network Adapter
- sys-devices-platform-serial8250-tty-ttyS1.device loaded active plugged /sys/devices/platform/serial8250/tty/ttyS1
- sys-devices-platform-serial8250-tty-ttyS10.device loaded active plugged /sys/devices/platform/serial8250/tty/ttyS10
- sys-devices-platform-serial8250-tty-ttyS11.device loaded active plugged /sys/devices/platform/serial8250/tty/ttyS11
- sys-devices-platform-serial8250-tty-ttyS12.device loaded active plugged /sys/devices/platform/serial8250/tty/ttyS12
- ...
+ ~~~sh
+ $ cd /etc/default/
+ $ sudo touch pveproxy
~~~
-- If you installed it, you can use `htop` to monitor in real time all the services running and the resources usage.
+2. Edit the new `/etc/default/pveproxy` file to add a line like the following:
- ~~~bash
- $ htop
+ ~~~sh
+ LISTEN_IP="10.1.0.1"
~~~
- This program offers an interactive text-based interface, that can be configured to look like below.
+ > [!WARNING]
+ > **Ensure you specify your PVE system's IP!**\
+ > Do not just blindly copy this configuration line and forget about it, or you might very well disable your access to your Proxmox VE web interface!
- 
-
-## Configuring the `pveproxy` service
-
-The PVEProxy is the component responsible for the Proxmox WEB interface communication. It's just a specific reverse proxy that gives access to the web console at the 8006 port.
-
-### _Default 8006 port and listening interfaces_
-
-By default, `pveproxy` listens on all your system's network interfaces through the `8006` port. This behavior **cannot be changed in any way**, or is not documented how to do so. Hence you'll need to rely on other techniques, like firewalling or use an extra reverse proxy, to protect the PVE's proxy.
+3. Save the changes and restart the `pveproxy` service.
-### _Enforcing strong SSL/TLS ciphers_
+ ~~~sh
+ $ sudo systemctl restart pveproxy.service
+ ~~~
-To make the `pveproxy` use only strong SSL/TLS ciphers when its negotiating TLS connections (to avoid _Man In The Middle_, or _MITM_, attacks), do the following.
+> [!NOTE]
+> **This change also affects PVE's `spiceproxy` service**\
+> Since `spiceproxy` is a service you will disable later in this chapter, it is not necessary to restart it here.
-1. Open a shell with `mgrsys`, `cd` to `/etc/default/` and create an empty `pveproxy` file.
+### Enforcing strong SSL/TLS ciphers
- ~~~bash
- $ cd /etc/default/
- $ sudo touch pveproxy
- ~~~
+To make the `pveproxy` use strong SSL/TLS ciphers (for TLS 1.2 and below) and cipher suites (for TLS 1.3 and above) only when its negotiating TLS connections (to avoid _Man In The Middle_, or _MITM_, attacks), do the following:
-2. Add to the `pveproxy` file the following lines.
+1. Add to the `pveproxy` file the following lines:
- ~~~bash
- CIPHERS="ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256"
+ ~~~sh
+ CIPHERS="ECDHE-ARIA128-GCM-SHA256:ECDHE-ARIA256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-ARIA128-GCM-SHA256:ECDHE-ECDSA-ARIA256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-PSK-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-CHACHA20-POLY1305"
+ CIPHERSUITES="TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_GCM_SHA256"
HONOR_CIPHER_ORDER="1"
~~~
-3. Save the changes and restart the `pveproxy` service.
+2. Save the changes and restart the `pveproxy` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart pveproxy.service
~~~
-The list in the CIPHERS parameter is derived from the list of supported ciphers you can find [in Cryptcheck](https://cryptcheck.fr/ciphers). The chosen ones are at the top of the list and with a green mark, hinting their strength.
+The set in the CIPHERS parameter is derived from the list of supported ciphers you can find [in Cryptcheck](https://cryptcheck.fr/ciphers). The chosen ones are the ones at the top of the list, branded with a green mark but not of the _CCM_ type. The set in `CIPHERSUITES` is the default one, which is a selection from the five cipher suites currently available for TLS 1.3.
-> **BEWARE!**
-> Over time, the ciphers will become outdated or deemed weak. You should check this list or a similar one to update the list of ciphers admitted in your system.
+> [!WARNING]\
+> **Ciphers evolve over time**\
+> Over time, the ciphers will become outdated or deemed weak. You should check this list or a similar one from time to time to update the set of ciphers admitted in your system.
-### _Host based access control rules_
+### Host based access control rules
You can apply some degree of access control by specifying a list of allowed IP addresses or ranges in the `pveproxy` configuration.
-> **BEWARE!**
-> This approach forces you to remember to what program did you apply certain rules. The `sshd` and other services also offer this kind of access control in their configuration, but makes managing all those rules harder.
-It's better if you centralize those rules in the Proxmox VE firewall or putting a similar solution "in front" of all your services.
+> [!IMPORTANT]
+> **This approach forces you to remember to what program did you apply certain rules**\
+> The `sshd` and other services also offer this kind of access control in their configuration, but managing all those rules in this disperse way is just unnecessarily hard.
+>
+> **Better centralize all the access control rules in the Proxmox VE firewall or putting a similar solution "in front" of all your services.**
+
+If you really need or find useful to enforce some hard access control directly in the PVE proxy's configuration:
1. Edit the `pveproxy` file and append to it the following parameters.
- ~~~bash
+ ~~~sh
DENY_FROM="all"
ALLOW_FROM="YOUR.PRIVATE.IP.RANGE/24,YOUR.HOME.IP.ADDRESS"
POLICY="allow"
@@ -205,45 +318,49 @@ It's better if you centralize those rules in the Proxmox VE firewall or putting
For example, you could put in the `ALLOW_FROM` parameter something like the following.
- ~~~bash
+ ~~~sh
ALLOW_FROM="10.0.0.1-10.0.0.5,192.168.0.0/22"
~~~
2. Save the changes and restart the `pveproxy` service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart pveproxy.service
~~~
## Disabling RPC services
-Since we're not using NFS in our standalone node, here you'll see how to disable all of its related services or daemons.
+Since you will not use NFS in your standalone PVE node, here you'll see how to disable all of its related services or daemons:
-1. Open a shell with your `mgrsys` user, and prevent with `systemctl` the `rpcbind` services from starting up when the system boots up.
+1. Open a shell with your `mgrsys` user, and prevent with `systemctl` the `rpcbind` services from starting up when the system boots up:
- ~~~bash
+ ~~~sh
$ sudo systemctl disable --now rpcbind.target rpcbind.socket rpcbind.service
~~~
- The command will return an output like below.
+ The command will output this:
- ~~~bash
- Synchronizing state of rpcbind.service with SysV service script with /lib/systemd/systemd-sysv-install.
- Executing: /lib/systemd/systemd-sysv-install disable rpcbind
- Removed /etc/systemd/system/multi-user.target.wants/rpcbind.service.
- Removed /etc/systemd/system/sockets.target.wants/rpcbind.socket.
+ ~~~sh
+ Synchronizing state of rpcbind.service with SysV service script with /usr/lib/systemd/systemd-sysv-install.
+ Executing: /usr/lib/systemd/systemd-sysv-install disable rpcbind
+ Removed '/etc/systemd/system/sockets.target.wants/rpcbind.socket'.
+ Removed '/etc/systemd/system/multi-user.target.wants/rpcbind.service'.
+ Disabling 'rpcbind.service', but its triggering units are still active:
+ rpcbind.socket
~~~
-2. `cd` to `/etc/defaults/` and make a backup of the `nfs-common` file.
+ Nevermind the warning about the `rpcbind.socket` unit. After the reboot you'll perform in a later step, that service unit and the other rpcbind ones will remain disabled.
- ~~~bash
- $ cd /etc/defaults
+2. `cd` to `/etc/default/` and make a backup of the `nfs-common` file:
+
+ ~~~sh
+ $ cd /etc/default/
$ sudo cp nfs-common nfs-common.orig
~~~
- Then, edit `nfs-common` and set its parameters as follows.
+ Then, edit `nfs-common` and set its `NEED_` parameters as follows:
- ~~~bash
+ ~~~sh
# If you do not set values for the NEED_ options, they will be attempted
# autodetected; this should be sufficient for most people. Valid alternatives
# for the NEED_ options are "yes" and "no".
@@ -251,13 +368,6 @@ Since we're not using NFS in our standalone node, here you'll see how to disable
# Do you want to start the statd daemon? It is not needed for NFSv4.
NEED_STATD=no
- # Options for rpc.statd.
- # Should rpc.statd listen on a specific port? This is especially useful
- # when you have a port-based firewall. To use a fixed port, set this
- # this variable to a statd argument like: "--port 4000 --outgoing-port 4001".
- # For more information, see rpc.statd(8) or http://wiki.debian.org/SecuringNFS
- STATDOPTS=
-
# Do you want to start the idmapd daemon? It is only needed for NFSv4.
NEED_IDMAPD=no
@@ -265,85 +375,90 @@ Since we're not using NFS in our standalone node, here you'll see how to disable
NEED_GSSD=no
~~~
- Notice that I've set only the `NEED_` parameters as `no`, disabling certain daemons that won't be used in the setup explained in this guide series.
+ Notice that I've set the `NEED_` parameters as `no`, disabling certain daemons that won't be used in this guide's PVE setup.
-3. There's also a unit loaded in systemd which offers nfs client services. Let's disable it.
+3. There's also a unit loaded in systemd which offers nfs client services. Let's disable it:
- ~~~bash
+ ~~~sh
$ sudo systemctl disable --now nfs-client.target
~~~
-4. Reboot your system.
+4. Reboot your system:
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-5. Finally, check what sockets are currently listening in your system.
+5. Finally, check what sockets are currently listening in your system:
- ~~~bash
+ ~~~sh
$ sudo ss -atlnup
~~~
- This will print you an output like below.
-
- ~~~bash
- Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
- udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=739,fd=5))
- udp UNCONN 0 0 [::1]:323 [::]:* users:(("chronyd",pid=739,fd=6))
- tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=827,fd=4))
- tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=965,fd=6),("pvedaemon worke",pid=964,fd=6),("pvedaemon worke",pid=963,fd=6),("pvedaemon",pid=962,fd=6))
- tcp LISTEN 0 128 192.168.1.107:22 0.0.0.0:* users:(("sshd",pid=727,fd=3))
- tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=923,fd=13))
- tcp LISTEN 0 4096 *:8006 *:* users:(("pveproxy worker",pid=979,fd=6),("pveproxy worker",pid=978,fd=6),("pveproxy worker",pid=977,fd=6),("pveproxy",pid=976,fd=6))
- tcp LISTEN 0 4096 *:3128 *:* users:(("spiceproxy work",pid=985,fd=6),("spiceproxy",pid=984,fd=6))
- tcp LISTEN 0 100 [::1]:25 [::]:* users:(("master",pid=923,fd=14))
+ This will print you an output like this:
+
+ ~~~sh
+ Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
+ udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=834,fd=5))
+ udp UNCONN 0 0 [::1]:323 [::]:* users:(("chronyd",pid=834,fd=6))
+ tcp LISTEN 0 128 10.1.0.1:22 0.0.0.0:* users:(("sshd",pid=972,fd=6))
+ tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=1182,fd=6),("pvedaemon worke",pid=1181,fd=6),("pvedaemon worke",pid=1180,fd=6),("pvedaemon",pid=1179,fd=6))
+ tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=1132,fd=13))
+ tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=1141,fd=4))
+ tcp LISTEN 0 4096 10.1.0.1:3128 0.0.0.0:* users:(("spiceproxy work",pid=1242,fd=6),("spiceproxy",pid=1241,fd=6))
+ tcp LISTEN 0 4096 10.1.0.1:8006 0.0.0.0:* users:(("pveproxy worker",pid=1213,fd=6),("pveproxy worker",pid=1212,fd=6),("pveproxy worker",pid=1211,fd=6),("pveproxy",pid=1210,fd=6))
+ tcp LISTEN 0 100 [::1]:25 [::]:* users:(("master",pid=1132,fd=14))
~~~
- The nfs service use several ports in both TCP and UDP protocols: 111, 1110, 2049, 4045. But, as you can see in the output above, none of them appear as being in use by any active socket.
+ The nfs service uses several ports in both TCP and UDP protocols: 111, 1110, 2049, 4045. But, as you can see in the output above, none of them appear as being in use by any active socket.
## Disabling `zfs` and `ceph`
-ZFS requires a lot of RAM we cannot afford in such a small server as the one used in this guide series. Ceph, on the other hand, is impossible to use with just one standalone node. So let's disable all their related services.
+ZFS requires a lot of RAM we cannot afford in such a small server as the one used in this guide. Ceph, on the other hand, is impossible to use with just one standalone node. So let's disable all their related services.
-1. Open a shell with you administrator user, and disable the `zfs` and `ceph` related services so they don't start when the node boots up.
+1. Open a shell with you administrator user, and disable the `zfs` and `ceph` related services so they don't start when the node boots up:
- ~~~bash
- $ sudo systemctl disable --now zfs-mount.service zfs-share.service zfs-volume-wait.service zfs-zed.service zfs-import.target zfs-volumes.target zfs.target ceph-fuse.target ceph.target
+ ~~~sh
+ $ sudo systemctl disable --now zfs-mount.service zfs-share.service zfs-volume-wait.service zfs-zed.service zfs-import.target zfs-volumes.target zfs.target ceph-fuse.target
~~~
- > **BEWARE!**
- > If you're working on a Proxmox VE `v7.2-z` or later (and maybe also in a previous `v7.1-z` release), you'll have to **mask** the `ceph.target` service instead of disabling it. Otherwise, it'll start running again after a reboot. Mask it with the following command.
- > `sudo systemctl mask --now ceph.target`
+2. The `ceph.target` cannot be just disabled, **it has to be masked** or it will run again after a reboot:
-2. Reboot the system.
+ ~~~sh
+ $ sudo systemctl mask --now ceph.target
+ ~~~
+
+3. Reboot the system.
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-3. Check with `htop` or `systemctl` that the `zfs` services are not running.
+4. Check with `htop` or `systemctl` that the `zfs` services are not running.
-> **BEWARE!**
-> Disabling the `zfs` services **won't remove** the ZFS tab found in the PVE web console at the `pve` node level, under the `Disks` section.
-> Also, Ceph wasn't really installed on your system, something notified by the PVE web console at the `Datacenter > Ceph` section, although Proxmox VE is installed with a couple of Ceph-related services.
+> [!NOTE]
+> **The ZFS tab will remain visible in PVE's web console**\
+> Disabling the `zfs` services **will not remove** the ZFS tab found in the PVE web console at the `pve` node level, under the `Disks` section.
+>
+> Also, Ceph was not really installed on your system, something notified by the PVE web console at the `Datacenter > Ceph` section, although Proxmox VE is installed with a couple of Ceph-related services.
## Disabling the SPICE proxy
-The `spiceproxy` service allows SPICE clients to connect to virtual machines and containers with graphical environments. Since in this guide series we're not planning to have graphical environments running in our VMs or containers, let's disable this proxy.
+The `spiceproxy` service allows SPICE clients to connect to virtual machines and containers with graphical environments. Since this guide does not contemplate having a graphical environment running in any VM or container, let's disable this proxy.
1. Open a shell with you administrator user, and mask the `spiceproxy` service.
- ~~~bash
+ ~~~sh
$ sudo systemctl mask --now spiceproxy
~~~
- > **BEWARE!**
- > The Proxmox VE process called `pve-manager` depends on this `spiceproxy` service somehow, and that forces us to `mask` rather than `disable` it with `systemctl`. If you just `disable` the `spiceproxy`, it will be started by the `pve-manager` (probable behaviour, but not documented).
+ > [!IMPORTANT]
+ > **The `pve-manager` service launches `spiceproxy`**\
+ > The Proxmox VE startup script called `pve-manager` starts this `spiceproxy` service, and that forces us to `mask` rather than `disable` it with `systemctl`. If you just `disable` the `spiceproxy`, it will be started again by the `pve-manager` after a reboot.
2. Reboot the system.
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
@@ -351,62 +466,69 @@ The `spiceproxy` service allows SPICE clients to connect to virtual machines and
4. Check with `ss` that there's no `spiceproxy` process with a socket open at the `3128` port.
- ~~~bash
+ ~~~sh
$ sudo ss -atlnup | grep spiceproxy
~~~
The command above should return no result whatsoever.
-> **BEWARE!**
+> [!NOTE]
+> **The `SPICE` option will remain available**\
> The `SPICE` option offered by the PVE web console will still be present in the `Shell` list, although of course you won't be able to connect to the server through that method now.
-
-
+>
+> 
## Disabling cluster and high availability related services
-Since we're working just with a standalone node, doesn't make much sense to have cluster or high availability related services running for nothing.
+Since we're working just with a standalone node, it doesn't make much sense to have cluster or high availability related services running for nothing.
+
+> [!WARNING]
+> **Do not attempt to disable the `pve-cluster` daemon**\
+> [The Proxmox VE documentation explicitly says](https://pve.proxmox.com/wiki/Service_daemons#pve-cluster) that this service is **needed** even when not running a cluster.
1. Open a shell with you administrator user, and with the `systemctl` command disable and stop the services as follows.
- ~~~bash
+ ~~~sh
$ sudo systemctl disable --now pve-ha-crm pve-ha-lrm corosync
~~~
2. Reboot the system.
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-3. Check with `htop` or `systemctl` that the `pve-ha-crm` and `pve-ha-lrm` services are not running. Corosync wasn't running since your node is a standalone one.
-
-> **BEWARE!**
-> Don't disable the **pve-cluster** daemon, [the Proxmox VE documentation explicitly says](https://pve.proxmox.com/wiki/Service_daemons#pve-cluster) that this service is **required** even when not running a cluster.
+3. Check either with `htop` or `systemctl` that the `pve-ha-crm` and `pve-ha-lrm` services are not running. Corosync wasn't running since your node is a standalone one.
## Considerations
-### _Errors in the `apt upgrade` process_
+These are a couple of things to be aware of regarding Proxmox VE services.
+
+### Errors in the `apt upgrade` process
+
+Disabling or masking PVE related services may provoke errors during the `apt upgrade` process of Proxmox VE packages, as it can happen with the `pve-manager` when `apt` does not find the `spiceproxy` daemon available. The update process for some of those PVE packages may expect a certain service to be present or running in the system, and they may try to restart them as part of the update process.
-Disabling or masking PVE related services may provoke errors during the `apt upgrade` process of Proxmox VE packages, as it can happen with the `pve-manager` when `apt` doesn't find the `spiceproxy` daemon available. The update process of some of those PVE packages may expect a certain service to be present or running in the system, and they may try to restart them as part of the update process.
+You should expect these error happening anytime you run an update of your system, although your Proxmox VE standalone node should keep on running just fine. Any time you end an upgrade process with errors, try the command `sudo apt autoremove` to make `apt` or `dpkg` treat them somehow.
-You should expect these error happening anytime you run an update of your system, although your Proxmox VE standalone node should keep on running fine. Any time you end an upgrade process with errors, try the command `sudo apt autoremove` to make `apt` or `dpkg` treat them somehow.
+### View of services running in the Proxmox VE node
-### _View of services running in the Proxmox VE node_
+The Proxmox VE web console has a `System` view, at the PVE node level, where you can see the current status of the Proxmox VE-related service units running in your host.
-The Proxmox VE web console has a view, at the node level, where you can see the current status of the Proxmox VE-related service units running in your host.
+
-
+For example, notice how the `corosync`, `pve-ha-crm` and `pve-ha-lrm` services are reported _dead_ with their systemd units _disabled_. Meanwhile, the `spiceproxy` service is greyed out as _disabled_ with its systemd unit _masked_.
-> **BEWARE!**
-> The services shown in this view are only the ones Proxmox VE is directly concerned with. Other services running in your PVE host won't appear here.
+> [!IMPORTANT]
+> **The `System` view only shows PVE-related services**\
+> The services shown in this `System` view are only the ones Proxmox VE is directly concerned with. Other system services running in your PVE host will not appear listed here.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/default`
-### _Files_
+### Files
- `/etc/default/nfs-common`
- `/etc/default/nfs-common.orig`
@@ -414,47 +536,50 @@ The Proxmox VE web console has a view, at the node level, where you can see the
## References
-### _Services management in Debian_
+### [Proxmox VE](https://pve.proxmox.com/)
-- [How to Start, Stop and Restart Services in Debian 10](https://vitux.com/how-to-start-stop-and-restart-services-in-debian-10/)
-- [The directory `/etc/default/`](https://www.linuxquestions.org/questions/slackware-14/the-directory-etc-default-723942/)
-- [What is the purpose of /etc/default?](https://superuser.com/questions/354944/what-is-the-purpose-of-etc-default)
+- [Wiki. Service daemons](https://pve.proxmox.com/wiki/Service_daemons)
+
+- [Administration Guide. Important Service Daemons](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_important_service_daemons)
+ - [pveproxy - Proxmox VE API Proxy Daemon](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_pveproxy_proxmox_ve_api_proxy_daemon)
-### _Proxmox hardening and standalone node optimization_
+### Proxmox hardening and standalone node optimization
-- [Hardening Proxmox, some in one place](https://blog.samuel.domains/blog/security/hardening-proxmox-some-in-one-place)
- [Proxmox Lite](https://www.lowendtalk.com/discussion/165481/proxmox-lite)
- [PROXMOX Standalone or Cluster](https://www.reddit.com/r/Proxmox/comments/i3mupd/proxmox_standalone_or_cluster/)
-### _Proxmox VE service daemons_
+### Services management in Debian
-- [Proxmox VE wiki. Service daemons](https://pve.proxmox.com/wiki/Service_daemons)
+- [How to Start, Stop and Restart Services in Debian 10](https://vitux.com/how-to-start-stop-and-restart-services-in-debian-10/)
+- [The directory `/etc/default/`](https://www.linuxquestions.org/questions/slackware-14/the-directory-etc-default-723942/)
+- [What is the purpose of /etc/default?](https://superuser.com/questions/354944/what-is-the-purpose-of-etc-default)
-### _NFS and RPC_
+### NFS and RPC
- [Is it possible to disable rpcbind (port 111) and how to do it?](https://forum.proxmox.com/threads/is-it-possible-to-disable-rpcbind-port-111-and-how-to-do-it.33590/)
-- [Is rpcbind needed for an NFS client?
-](https://serverfault.com/questions/1015970/is-rpcbind-needed-for-an-nfs-client)
+- [Is rpcbind needed for an NFS client?](https://serverfault.com/questions/1015970/is-rpcbind-needed-for-an-nfs-client)
- [What is the purpose of rpcbind service on Linux Systems?](https://www.quora.com/What-is-the-purpose-of-rpcbind-service-on-Linux-Systems)
- [Which ports do I need to open in the firewall to use NFS?](https://serverfault.com/questions/377170/which-ports-do-i-need-to-open-in-the-firewall-to-use-nfs)
-### _Default listening behaviour of `pveproxy`_
+### Default listening behaviour of `pveproxy`
- [pveproxy LISTEN address](https://forum.proxmox.com/threads/pveproxy-listen-address.75376/).
-### _Configuration of SSL/TSL ciphers in `pveproxy`_
+### Configuration of SSL/TSL ciphers in `pveproxy`
+- [CryptCheck. Supported cipher suites](https://cryptcheck.fr/ciphers)
- [pveproxy - Disable weak SSL ciphers?](https://forum.proxmox.com/threads/pveproxy-disable-weak-ssl-ciphers.14794/page-3)
-- [Supported cipher suites - List of SSL ciphers and it's quality](https://cryptcheck.fr/ciphers)
+- [Cipher Suites Explained in Simple Terms: Unlocking the Code](https://www.ssldragon.com/blog/cipher-suites/)
+- [Cipher Suites: Ciphers, Algorithms and Negotiating Security Settings](https://www.thesslstore.com/blog/cipher-suites-algorithms-security-settings/)
-### _ZFS and Ceph_
+### ZFS and Ceph
- [ZFS on Linux](https://pve.proxmox.com/wiki/ZFS_on_Linux).
- [Deploy Hyper-Converged Ceph Cluster](https://pve.proxmox.com/wiki/Deploy_Hyper-Converged_Ceph_Cluster).
-### _SPICE proxy_
+### SPICE proxy
-- [PVE admin manual. spiceproxy - SPICE Proxy Service](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_spiceproxy_spice_proxy_service)
+- [spiceproxy - SPICE Proxy Service](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_spiceproxy_spice_proxy_service)
- [disable spiceproxy](https://forum.proxmox.com/threads/disable-spiceproxy.36638/)
- [pve-manager and other updates with optional services masked](https://forum.proxmox.com/threads/pve-manager-and-other-updates-with-optional-services-masked.63652/)
diff --git a/G012 - Host hardening 06 ~ Network hardening with sysctl.md b/G012 - Host hardening 06 ~ Network hardening with sysctl.md
index 9767344..e0da290 100644
--- a/G012 - Host hardening 06 ~ Network hardening with sysctl.md
+++ b/G012 - Host hardening 06 ~ Network hardening with sysctl.md
@@ -1,48 +1,77 @@
# G012 - Host hardening 06 ~ Network hardening with `sysctl`
-You can harden your server's network connections with a proper `sysctl` configuration. This will help you mitigate or nullify certain attacks, while also fitting your network configuration better to your needs.
+- [Harden your PVE's networking with a `sysctl` configuration](#harden-your-pves-networking-with-a-sysctl-configuration)
+- [About `sysctl`](#about-sysctl)
+ - [Some `sysctl` values are managed by the Proxmox VE firewall](#some-sysctl-values-are-managed-by-the-proxmox-ve-firewall)
+- [TCP/IP stack hardening with `sysctl`](#tcpip-stack-hardening-with-sysctl)
+ - [Disabling Chrony's IPv6 socket](#disabling-chronys-ipv6-socket)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [`sysctl` references](#sysctl-references)
+ - [`sysctl` configuration examples](#sysctl-configuration-examples)
+ - [SYN cookies](#syn-cookies)
+ - [ICMP](#icmp)
+ - [Other networking-related knowledge](#other-networking-related-knowledge)
+ - [Chrony](#chrony)
+- [Navigation](#navigation)
+
+## Harden your PVE's networking with a `sysctl` configuration
+
+You can harden your Proxmox VE node's network connections with a proper `sysctl` configuration. This will help you mitigate or nullify certain attacks, while also fitting your network configuration better to your needs.
## About `sysctl`
-The `sysctl` framework looks for system configuration files distributed on several locations of your system, and will read them in a concrete order. Inside the folders, `sysctl` will read the files in lexicographical order.
+The `sysctl` framework looks for system configuration files distributed on a couple of locations in your system, and will parse them at boot time in a concrete order. Inside the folders, `sysctl` will read the files in lexicographical order.
-In your PVE system you'll find the following files.
+In your PVE system you'll find the following files:
+
+1. `/etc/sysctl.d/README.sysctl`\
+ This is **not** a file `sysctl` will read.
+
+2. `/usr/lib/sysctl.d/10-coredump-debian.conf`
+
+3. `/usr/lib/sysctl.d/10-pve.conf`
-1. `/etc/sysctl.d/99-sysctl.conf` : symlink to `/etc/sysctl.conf`
-2. `/etc/sysctl.d/pve.conf`
-3. `/etc/sysctl.d/README.sysctl` : this is **not** a file `sysctl` will read.
4. `/usr/lib/sysctl.d/10-pve-ct-inotify-limits.conf`
-5. `/usr/lib/sysctl.d/50-pid-max.conf`
-6. `/usr/lib/sysctl.d/protect-links.conf`
+
+5. `/usr/lib/sysctl.d/50-default.conf`
+
+6. `/usr/lib/sysctl.d/50-pid-max.conf`
+
7. `/usr/lib/sysctl.d/pve-firewall.conf`
-8. `/lib/sysctl.d/` : same as `/usr/lib/sysctl.d/`, since `/lib` is just a symlink to `/usr/lib/`
-9. `/etc/sysctl.conf`
-See how Proxmox VE already has its own files, the ones with the `pve` string in their names. Also notice that the `sysctl.conf` file will be read **twice**: first as `99-sysctl.conf`, and last as `sysctl.conf`.
+See how Proxmox VE already has its own files, the ones with the `pve` string in their names.
-> **BEWARE!**
-> All configuration files are sorted (after being loaded) in memory by their filename in lexicographic order, **regardless of the directories they're in**. If multiple files specify the same option, the entry in the file with the **lexicographically last name** will take precedence. Thus, the configuration in a certain file may either be replaced completely (by placing a file with the same name in a directory with higher priority), or individual settings might be changed (by specifying additional settings in a file with a different name that is ordered later).
+On the other hand, there is the directory `/lib/sysctl.d/`. This path is just the same `/usr/lib/sysctl.d/` directory since `/lib` is just a symlink to `/usr/lib/`.
-So, the previous file list would be applied by **sysctl** in the following _lexicographical_ order.
+> [!IMPORTANT]
+> **The `sysctl` configuration files get sorted in memory by their names**\
+> All configuration files are sorted (after being loaded) in memory by their filename in lexicographic order, **regardless of the directories they're in**.
+>
+> If multiple files specify the same option, the entry in the file with the **lexicographically last name** will take precedence. Thus, the configuration in a certain file may either be replaced completely (by placing a file with the same name in a directory with higher priority), or individual settings might be changed (by specifying additional settings in a file with a different name that is ordered later).
-1. `10-pve-ct-inotify-limits.conf`
-2. `50-pid-max.conf`
-3. `99-sysctl.conf`
-4. `protect-links.conf`
-5. `pve.conf`
+So, the previous file list would be applied by `sysctl` in the following _lexicographical_ order.
+
+1. `10-coredump-debian.conf`
+2. `10-pve.conf`
+3. `10-pve-ct-inotify-limits.conf`
+4. `50-default.conf`
+5. `50-pid-max.conf`
6. `pve-firewall.conf`
-7. `sysctl.conf`
Finally, to see the current value of a `sysctl` parameter, you can use directly the `sysctl` command with any full parameter name (autocomplete works for those parameter names).
-~~~bash
+~~~sh
$ sudo sysctl net.core.wmem_default
net.core.wmem_default = 212992
~~~
On the other hand, you can also list the full list of parameters with their current values.
-~~~bash
+~~~sh
$ sudo sysctl -a
abi.vsyscall32 = 1
debug.exception-trace = 1
@@ -63,62 +92,71 @@ dev.cdrom.info = Can open tray:
Since there are a lot of parameters, you'd prefer to pipe this command to `less` so you can revise them in a more comfortable manner.
-~~~bash
+~~~sh
$ sudo sysctl -a | less
~~~
-### _Some `sysctl` values are managed by the Proxmox VE firewall_
+### Some `sysctl` values are managed by the Proxmox VE firewall
+
+Be aware that the Proxmox VE firewall handles a bunch of sysctl `net` parameters. The ones listed in the Proxmox VE Firewall official documentation, [in the firewall's **Host Specific Configuration** segment under the **Configuration Files** section](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pve_firewall_host_specific_configuration), are the following ones.
-Be aware that the Proxmox VE firewall handles a bunch of sysctl `net` parameters. The ones listed in the Proxmox VE Firewall official documentation, [in the firewall's **Host Specific Configuration** segment under the **Configuration Files** section](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_configuration_files), are the following ones.
+- `nf_conntrack_allow_invalid`\
+ To allow packets with the state INVALID on the connection tracking (conntrack). I haven't found any other reference to this parameter beyond the Proxmox VE-related documentation, so maybe its not a `sysctl` value, although it certainly looks like it.
-- `nf_conntrack_allow_invalid`: to allow packets with the state INVALID on the connection tracking (conntrack). I haven't found any other reference to this parameter beyond the Proxmox VE-related documentation, so maybe its not a `sysctl` value, although it certainly looks like it.
+- `nf_conntrack_helpers`\
+ For enabling [conntrack helpers](https://wiki.nftables.org/wiki-nftables/index.php/Conntrack_helpers) for specific protocols. Like the `nf_conntrack_allow_invalid` parameter, it appears to be a `sysctl` parameter but it is not mentioned in the [official Kernel networking documentation](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html).
-- `net.netfilter.nf_conntrack_max`: the maximum number of allowed connection tracking entries. This value can be changed directly in the Proxmox VE web console.
+- `net.netfilter.nf_conntrack_max`\
+ The maximum number of allowed connection tracking entries. This value can be changed directly in the Proxmox VE web console.
-- `net.netfilter.nf_conntrack_tcp_timeout_established`: just a timeout in seconds for established connections. This value can be changed directly in the Proxmox VE web console. Shouldn't be shorter than the `net.ipv4.tcp_keepalive_time` value.
+- `net.netfilter.nf_conntrack_tcp_timeout_established`\
+ Just a timeout in seconds for established connections. This value can be changed directly in the Proxmox VE web console. Shouldn't be shorter than the `net.ipv4.tcp_keepalive_time` value.
-- `net.netfilter.nf_conntrack_tcp_timeout_syn_recv`: another timeout in seconds, although for this one I haven't found a proper definition that explains its meaning.
+- `net.netfilter.nf_conntrack_tcp_timeout_syn_recv`\
+ Another timeout in seconds, although for this one I haven't found a proper definition that explains its meaning.
-The parameters mentioned above are just a few of many related to the netfilter conntrack system. Read a more complete list [in this page](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html). On the other hand, the two parameters that can be managed from the web console are found at the pve node level, in the firewall option's screen.
+The parameters mentioned above are just a few of many related to the netfilter conntrack system. [Find a more complete list in this page](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html). On the other hand, the nf parameters that can be managed from the web console are found at the pve node level, in the firewall option's page:
-
+
-It's not documented, but at least two other values are also affected by the Proxmox VE firewall (which, remember, it's just the legacy `iptables` system).
+It is not documented, but at least two other values can be affected by the Proxmox VE firewall.
- `net.bridge.bridge-nf-call-ip6tables`
- `net.bridge.bridge-nf-call-iptables`
-When the firewall gets fully enabled, those values are set to 1, overriding the 0 value they have in the `/etc/sysctl.d/pve.conf` file. This means that netfilter becomes enabled on the bridges you run on your Proxmox VE host.
+When the firewall gets fully enabled, those values are set to `1`, overriding the `0` value they have in the `/usr/lib/sysctl.d/10-pve.conf` file. This means that netfilter becomes enabled on the bridges you run on your Proxmox VE host.
-On the other hand, know that all the sysctl parameters are found under the `/proc/sys` folder as independent files containing just their values. So, to see the `net.netfilter` values, you should list the contents of the `/proc/sys/net/netfilter` folder and you'll be met by a little surprise.
+On the other hand, know that all the `sysctl` parameters are found under the `/proc/sys` folder as independent files containing just their values. So, to see the `net.netfilter` values, you should list the contents of the `/proc/sys/net/netfilter` folder and you'll be met by a little surprise.
-~~~bash
+~~~sh
$ ls -al /proc/sys/net/netfilter
total 0
-dr-xr-xr-x 1 root root 0 Nov 11 10:44 .
-dr-xr-xr-x 1 root root 0 Nov 11 10:27 ..
-dr-xr-xr-x 1 root root 0 Nov 11 10:44 nf_log
--rw-r--r-- 1 root root 0 Nov 11 10:44 nf_log_all_netns
+dr-xr-xr-x 1 root root 0 Aug 29 12:03 .
+dr-xr-xr-x 1 root root 0 Aug 29 11:10 ..
+-rw-r--r-- 1 root root 0 Aug 29 12:03 nf_hooks_lwtunnel
+dr-xr-xr-x 1 root root 0 Aug 29 12:03 nf_log
+-rw-r--r-- 1 root root 0 Aug 29 12:03 nf_log_all_netns
~~~
-None of the parameters controlled by Proxmox VE is present there, nor any of the ones you can see [here](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html). This only means that the netfilter system is not currently active in the system. Only after you enable the firewall at the datacenter level (something you'll do in the [G014 guide](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md)), you'll see this folder filled with new `nf_conntrack_` files.
+None of the parameters controlled by Proxmox VE is present there, nor [any of the ones you can see here](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html). This only means that the netfilter system is not currently active in the system. Only after you enable the firewall at the datacenter level (something you'll do in the [G014 chapter](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md)), you'll see this folder filled with new `nf_conntrack_` files.
## TCP/IP stack hardening with `sysctl`
To avoid messing with the `.conf` files already present, let's make a new one filled with parameters just for hardening the TCP/IP stack of your system.
-> **BEWARE!**
+> [!IMPORTANT]
+> **Give your sysctl `.conf` files unique names**
> The `.conf` files can have any name but, once a file of a given filename is loaded, `sysctl` **will ignore** any other file of the same name in subsequent directories.
1. Open a shell as `mgrsys` and `cd` to `/etc/sysctl.d/`.
- ~~~bash
+ ~~~sh
$ cd /etc/sysctl.d
~~~
2. Create the new configuration file as `80_tcp_hardening.conf`.
- ~~~bash
+ ~~~sh
$ sudo touch 80_tcp_hardening.conf
~~~
@@ -190,44 +228,45 @@ To avoid messing with the `.conf` files already present, let's make a new one fi
4. Save the `80_tcp_hardening.conf` file, and apply the changes in your system.
- ~~~bash
+ ~~~sh
$ sudo sysctl -p /etc/sysctl.d/80_tcp_hardening.conf
~~~
- The command's output will list the all the parameters with their values as it has applied them to the system.
+ The command will output a list of all the parameters it has read and applied to the system.
- > **BEWARE!**
- > When executing `sysctl -p` without specifying any file, the command will load only the values found in the `/etc/sysctl.conf` file, and won't read anything inside the `/etc/sysctl.d` folder.
+ > [!NOTE]
+ > **Do not forget to specify the file to read to the `sysctl -p` command**\
+ > When executing `sysctl -p` without specifying any file, the command will load only the values found in the `/etc/sysctl.conf` file (if it exists), and won't read anything inside the `/etc/sysctl.d` folder.
5. Reboot your system.
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
6. The specified configuration in the previous step **3** also disables the IPv6 protocol in your system. Verify that you don't see sockets listening in `[::1]` (the IPv6 version of `localhost`) addresses with the `ss` command.
- ~~~bash
+ ~~~sh
$ sudo ss -atlnup
- Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
- udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=693,fd=5))
- udp UNCONN 0 0 [::1]:323 [::]:* users:(("chronyd",pid=693,fd=6))
- tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=923,fd=6),("pvedaemon worke",pid=922,fd=6),("pvedaemon worke",pid=921,fd=6),("pvedaemon",pid=920,fd=6))
- tcp LISTEN 0 128 192.168.1.107:22 0.0.0.0:* users:(("sshd",pid=677,fd=3))
- tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=880,fd=13))
- tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=781,fd=4))
- tcp LISTEN 0 4096 *:8006 *:* users:(("pveproxy worker",pid=932,fd=6),("pveproxy worker",pid=931,fd=6),("pveproxy worker",pid=930,fd=6),("pveproxy",pid=929,fd=6))
+ Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
+ udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=801,fd=5))
+ udp UNCONN 0 0 [::1]:323 [::]:* users:(("chronyd",pid=801,fd=6))
+ tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=1161,fd=6),("pvedaemon worke",pid=1160,fd=6),("pvedaemon worke",pid=1158,fd=6),("pvedaemon",pid=1157,fd=6))
+ tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=1111,fd=13))
+ tcp LISTEN 0 128 10.1.0.1:22 0.0.0.0:* users:(("sshd",pid=948,fd=6))
+ tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=1119,fd=4))
+ tcp LISTEN 0 4096 10.1.0.1:8006 0.0.0.0:* users:(("pveproxy worker",pid=1176,fd=6),("pveproxy worker",pid=1175,fd=6),("pveproxy worker",pid=1174,fd=6),("pveproxy",pid=1173,fd=6))
~~~
- In the output above you can see that only one IPv6 socket remains, one opened by a certain `chronyd` service. Check the next subsection to learn how to disable that particular socket.
+ In the output above you can see that only one IPv6 socket remains, opened by a certain `chronyd` service. Check the next subsection to learn how to disable that particular socket.
-### _Disabling Chrony's IPv6 socket_
+### Disabling Chrony's IPv6 socket
Chrony is a daemon that keeps your system's clock synchronized with an external time server through the NTP (Network Time Protocol) protocol. It can run as time server, but in this setup it's only working as client. To disable its IPv6 socket, you'll need to modify slightly one of its configuration files.
1. Go to `/etc/default` and make a backup of the `chrony` file.
- ~~~bash
+ ~~~sh
$ cd /etc/default
$ sudo cp chrony chrony.orig
~~~
@@ -241,20 +280,20 @@ Chrony is a daemon that keeps your system's clock synchronized with an external
3. Restart the Chrony service.
- ~~~bash
+ ~~~sh
$ sudo systemctl restart chrony.service
~~~
4. Verify that the Chrony daemon (`chronyd`) only has the IPv4 socket open.
- ~~~bash
+ ~~~sh
$ sudo ss -atlnup | grep chronyd
- udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=1591,fd=5))
+ udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=9379,fd=5))
~~~
## Relevant system paths
-### _Directories_
+### Directories
- `/etc`
- `/etc/default`
@@ -264,43 +303,41 @@ Chrony is a daemon that keeps your system's clock synchronized with an external
- `/proc/sys`
- `/proc/sys/net/netfilter`
-### _Files_
+### Files
- `/etc/default/chrony`
- `/etc/default/chrony.orig`
-- `/etc/sysctl.conf`
- `/etc/sysctl.d/80_tcp_hardening.conf`
-- `/etc/sysctl.d/99-sysctl.conf`
-- `/etc/sysctl.d/pve.conf`
- `/etc/sysctl.d/README.sysctl`
+- `/usr/lib/sysctl.d/10-coredump-debian.conf`
+- `/usr/lib/sysctl.d/10-pve.conf`
- `/usr/lib/sysctl.d/10-pve-ct-inotify-limits.conf`
+- `/usr/lib/sysctl.d/50-default.conf`
- `/usr/lib/sysctl.d/50-pid-max.conf`
-- `/usr/lib/sysctl.d/protect-links.conf`
- `/usr/lib/sysctl.d/pve-firewall.conf`
## References
-### _Proxmox VE_
+### [Proxmox VE](https://pve.proxmox.com/pve-docs/)
-- [Proxmox VE firewall](https://pve.deimos.cloud:8006/pve-docs/chapter-pve-firewall.html)
+- [Proxmox VE firewall](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pve_firewall)
+ - [Configuration files. Host Specific Configuration](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pve_firewall_host_specific_configuration)
-### _`sysctl` references_
+### `sysctl` references
- [`sysctl` ipv4 parameters](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt)
- [Netfilter Conntrack `sysctl` parameters](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html)
-### _`sysctl` configuration examples_
+### `sysctl` configuration examples
-- [Archlinux wiki: `sysctl`](https://wiki.archlinux.org/index.php/Sysctl)
-- [Linux sysctl Tuning](https://community.mellanox.com/s/article/linux-sysctl-tuning)
+- [Archlinux. ArchWiki. sysctl](https://wiki.archlinux.org/index.php/Sysctl)
- [Most popular speedup `sysctl` options for Proxmox](https://gist.github.com/sergey-dryabzhinsky/bcc1a15cb7d06f3d4606823fcc834824)
- [Test environment - Virtualized Proxmox on esxi - network problem](https://forum.proxmox.com/threads/test-environment-virtualized-proxmox-on-esxi-network-problem.37612/)
- [Ethtool & sysctl.conf hardening per Cryptostorm](https://tails-dev.boum.narkive.com/PdkdEag9/ethtool-sysctl-conf-hardening-per-cryptostorm)
- [Tuning nf_conntrack](https://ixnfo.com/en/tuning-nf_conntrack.html)
-- [nf_conntrack: table full, dropping packet](https://newbedev.com/nf-conntrack-table-full-dropping-packet)
- [Linux kernel tuning settings for large number of concurrent clients](https://gist.github.com/kfox/1942782)
-### _SYN cookies_
+### SYN cookies
- [SYN cookies on Wikipedia](https://en.wikipedia.org/wiki/SYN_cookies)
- [SYN cookies by their inventor, Daniel J. Bernstein](https://cr.yp.to/syncookies.html)
@@ -310,22 +347,23 @@ Chrony is a daemon that keeps your system's clock synchronized with an external
- [How TCP backlog works in Linux](http://veithen.io/2014/01/01/how-tcp-backlog-works-in-linux.html)
- [Sane value for net.ipv4.tcp_max_syn_backlog in sysctl.conf](https://serverfault.com/questions/875035/sane-value-for-net-ipv4-tcp-max-syn-backlog-in-sysctl-conf)
-### _ICMP_
+### ICMP
- [sysctl.d: switch net.ipv4.conf.all.rp_filter from 1 to 2](https://github.com/systemd/systemd/pull/10971)
- [What is ICMP broadcast good for?](https://superuser.com/questions/306065/what-is-icmp-broadcast-good-for)
- [What are ICMP redirects and should they be blocked?](https://askubuntu.com/questions/118273/what-are-icmp-redirects-and-should-they-be-blocked)
- [Ensure broadcast ICMP requests are ignored](https://secscan.acron.pl/centos7/3/2/5)
-### _Other_
+### Other networking-related knowledge
+- [nftables. Wiki. Connection Tracking System](https://wiki.nftables.org/wiki-nftables/index.php/Connection_Tracking_System)
- [DSA-4272-1 linux -- security update](https://www.debian.org/security/2018/dsa-4272)
- [`ipfrag_high_thresh` on sysctl explorer](https://sysctl-explorer.net/net/ipv4/ipfrag_high_thresh/)
- [When to turn TCP SACK off?](https://serverfault.com/questions/10955/when-to-turn-tcp-sack-off)
- [Improving TCP performance over a gigabit network with lots of connections and high traffic of small packets](https://serverfault.com/questions/357799/improving-tcp-performance-over-a-gigabit-network-with-lots-of-connections-and-hi)
- [SegmentSmack and FragmentSmack: IP fragments and TCP segments with random offsets may cause a remote denial of service (CVE-2018-5390, CVE-2018-5391)](https://access.redhat.com/articles/3553061)
-### _Chrony_
+### Chrony
- [chronyd on debian could not open IPv6 NTP socket](https://serverfault.com/questions/992844/chronyd-on-debian-could-not-open-ipv6-ntp-socket)
- [Steps to configure Chrony as NTP Server & Client (CentOS/RHEL 8)](https://www.golinuxcloud.com/configure-chrony-ntp-server-client-force-sync/)
diff --git a/G013 - Host hardening 07 ~ Mitigating CPU vulnerabilities.md b/G013 - Host hardening 07 ~ Mitigating CPU vulnerabilities.md
index e02b4d6..baad476 100644
--- a/G013 - Host hardening 07 ~ Mitigating CPU vulnerabilities.md
+++ b/G013 - Host hardening 07 ~ Mitigating CPU vulnerabilities.md
@@ -1,93 +1,104 @@
# G013 - Host hardening 07 ~ Mitigating CPU vulnerabilities
-CPUs also come with bugs and, in some cases, can become security vulnerabilities. At the time of writing this, the most famous cases of such bugs are the **meltdown** and **spectre** vulnerabilities.
+- [CPUs also have security vulnerabilities](#cpus-also-have-security-vulnerabilities)
+- [Discovering your CPU's vulnerabilities](#discovering-your-cpus-vulnerabilities)
+- [Your Proxmox VE system will already have the correct microcode package applied](#your-proxmox-ve-system-will-already-have-the-correct-microcode-package-applied)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [About CPU vulnerabilities](#about-cpu-vulnerabilities)
+- [Navigation](#navigation)
-## Checking out your CPU's vulnerabilities
+## CPUs also have security vulnerabilities
-To check out what known vulnerabilities your CPU has, perform these steps.
+CPUs also come with bugs and, in some cases, they can become security vulnerabilities. At the time of writing this, the most famous cases of such bugs (still) are the **meltdown** and **spectre** vulnerabilities.
-1. Open a shell as your administrator user, and execute the following.
+## Discovering your CPU's vulnerabilities
- ~~~bash
- $ cat /proc/cpuinfo | grep bugs
- ~~~
+To check out what known vulnerabilities your CPU has, perform these steps:
-2. The output will be one line per core on your CPU. So, in an old four single-threaded cores Intel CPU like mine it looks like below.
+1. Open a shell as your administrator user, and execute the following:
- ~~~bash
- bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
- bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
- bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
- bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
+ ~~~sh
+ $ cat /proc/cpuinfo | grep bugs
~~~
-As you may expect, the list of vulnerabilities will change depending on the CPU.
-
-## Applying the correct microcode package
-
-To mitigate these bugs, you can install the proper microcode `apt` package for your CPU: the `intel-microcode` or the `amd-microcode` one. But to do so, first you need to enable the proper `apt` sources so those packages can be downloaded in your system.
+2. The output will be one line per core on your CPU. So, in the four-cores processor of this guide's reference hardware it looks like below:
-1. Log in as `mgrsys`, then `cd` to `/etc/apt/sources.list.d`.
-
- ~~~bash
- $ cd /etc/apt/sources.list.d
+ ~~~sh
+ bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only mmio_unknown
+ bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only mmio_unknown
+ bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only mmio_unknown
+ bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only mmio_unknown
~~~
-2. Create a new file called `debian-nonfree.list`.
+As you can imagine, the list of vulnerabilities will change depending on the CPU inspected.
- ~~~bash
- $ sudo touch debian-nonfree.list
- ~~~
+## Your Proxmox VE system will already have the correct microcode package applied
-3. Edit the `debian-nonfree.list` file, filling it with the lines below.
+To mitigate these bugs, it is required to install the proper microcode `apt` package for your CPU: the `intel-microcode` or the `amd-microcode` one. In my case, the Proxmox VE installation process already installed the correct package (the `intel-microcode` one) in my system. I discovered this by first checking what `apt` sources were configured in the `/etc/apt/sources.list.d/debian.sources` file:
- ~~~bash
- deb http://deb.debian.org/debian bullseye non-free
- deb-src http://deb.debian.org/debian bullseye non-free
+~~~properties
+Types: deb
+URIs: http://deb.debian.org/debian/
+Suites: trixie trixie-updates
+Components: main contrib non-free-firmware
+Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
- deb http://deb.debian.org/debian-security/ bullseye-security non-free
- deb-src http://deb.debian.org/debian-security/ bullseye-security non-free
+Types: deb
+URIs: http://security.debian.org/debian-security/
+Suites: trixie-security
+Components: main contrib non-free-firmware
+Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
+~~~
- deb http://deb.debian.org/debian bullseye-updates non-free
- deb-src http://deb.debian.org/debian bullseye-updates non-free
- ~~~
+Notice that both sources have the `non-free-firmware` components, which is where the microcode packages are part of. Then, I tried to install the `intel-microcode` package in my system with `apt`:
- > **BEWARE!**
- > This sources list is only for Debian 11 Bullseye!
+~~~sh
+$ sudo apt install -y intel-microcode
+intel-microcode is already the newest version (3.20250512.1).
+Summary:
+ Upgrading: 0, Installing: 0, Removing: 0, Not Upgrading: 3
+~~~
-4. Save the file, update `apt` and then install the package that suits your CPU. In my case, I'll apply the `intel-microcode` package.
+The apt command warned me that the package was already installed in the system and in its newest version. Therefore, you can expect your Proxmox VE setup to have the correct microcode package already applied. If not, first ensure that your `/etc/apt/sources.list.d/debian.sources` file looks like the one shown before, then do the following:
- ~~~bash
+1. Make apt update its references, then install the correct microcode package for your system:
+
+ ~~~sh
$ sudo apt update
$ sudo apt install -y intel-microcode
~~~
-5. After the package's installation is done, reboot your system.
+2. After the package's installation is done, reboot your system:
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-> **BEWARE!**
-> The microcode package can affect the **performance** of your CPU. Also, the microcode applied may just mitigate rather than completely fix the vulnerabilities on your CPU.
+> [!WARNING]
+> **The microcode package can affect your CPU's performance**\
+> Furthermore, the microcode applied may just mitigate rather than completely fix the vulnerabilities on your CPU.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/apt/sources.list.d`
- `/proc`
-### _Files_
+### Files
-- `/etc/apt/sources.list.d/debian-nonfree.list`
+- `/etc/apt/sources.list.d/debian.sources`
- `/proc/cpuinfo`
## References
+### About CPU vulnerabilities
+
- [Meltdown and Spectre](https://meltdownattack.com/)
-- [Microcode on Debian Wiki](https://wiki.debian.org/Microcode)
-- [Debian Wiki. SourcesList](https://wiki.debian.org/SourcesList)
+- [Debian Wiki. Microcode](https://wiki.debian.org/Microcode)
## Navigation
diff --git a/G014 - Host hardening 08 ~ Firewalling.md b/G014 - Host hardening 08 ~ Firewalling.md
index 99e681d..b6bdc34 100644
--- a/G014 - Host hardening 08 ~ Firewalling.md
+++ b/G014 - Host hardening 08 ~ Firewalling.md
@@ -1,55 +1,101 @@
# G014 - Host hardening 08 ~ Firewalling
-One of the strongest hardening measures you can apply is a proper firewall, and Proxmox VE comes with one. Needless to say, you must enable and configure this firewall to secure your system.
-
-## Proxmox VE firewall uses `iptables`
+- [Enabling your PVE's firewall is a must](#enabling-your-pves-firewall-is-a-must)
+- [Proxmox VE firewall uses iptables](#proxmox-ve-firewall-uses-iptables)
+ - [iptables is a legacy package](#iptables-is-a-legacy-package)
+- [Zones in the Proxmox VE firewall](#zones-in-the-proxmox-ve-firewall)
+- [Situation at this point](#situation-at-this-point)
+- [Enabling the firewall at the Datacenter tier](#enabling-the-firewall-at-the-datacenter-tier)
+ - [Netfilter conntrack sysctl parameters](#netfilter-conntrack-sysctl-parameters)
+- [Firewalling with ebtables](#firewalling-with-ebtables)
+ - [Setting up ebtables](#setting-up-ebtables)
+ - [Example of ebtables usage](#example-of-ebtables-usage)
+- [Firewall fine tuning](#firewall-fine-tuning)
+ - [Enabling TCP SYN flood protection](#enabling-tcp-syn-flood-protection)
+- [Firewall logging](#firewall-logging)
+ - [Understanding the firewall log](#understanding-the-firewall-log)
+- [Connection tracking tool](#connection-tracking-tool)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [Ethernet Bridge firewall ebtables](#ethernet-bridge-firewall-ebtables)
+ - [Network auditing on Linux](#network-auditing-on-linux)
+ - [Network security concepts](#network-security-concepts)
+ - [Networking concepts](#networking-concepts)
+ - [conntrack command](#conntrack-command)
+- [Navigation](#navigation)
+
+## Enabling your PVE's firewall is a must
+
+One of the strongest hardening measures you can apply in any computing system is setting up a proper firewall, and Proxmox VE comes with one integrated. Needless to say, you must enable and configure this firewall to secure your virtualization server.
+
+## Proxmox VE firewall uses iptables
+
+The Proxmox VE firewall is based on iptables, meaning that any rules defined within the PVE firewall will be enforced by the underlying iptables firewall present in your standalone node. In a cluster, those rules are spread by the PVE firewall daemon through all the nodes.
+
+### iptables is a legacy package
+
+The iptables is a legacy package that has been replaced by [nftables](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page) (where `nf` stands for _Netfilter_), but Proxmox VE 9.0 is still using iptables. You can verify this by printing the iptables version.
+
+~~~sh
+$ sudo iptables -V
+iptables v1.8.11 (legacy)
+~~~
-The Proxmox VE firewall is based on `iptables`, meaning that any rules defined within the PVE firewall will be enforced by the underlying `iptables` firewall present in your standalone node. In a cluster, those rules are spread by the PVE firewall daemon among all the nodes.
+> [!NOTE]
+> **You can enable in your Proxmox VE system a nftables-based version of its firewall**\
+> At the time of writing this, [the new nftables-based `proxmox-firewall` service it is still in the _tech-preview_ stage](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pve_firewall_nft). Therefore, this guide sticks to the legacy iptables-based firewall, which is the one that comes enabled by default in a Proxmox VE 8.4 server.
+>
+> If you want to try using the nftables firewall, know that the configuration explained here is compatible with the new firewall since it uses the same files and configuration format.
-### _`iptables` is a **legacy** package_
+> [!IMPORTANT]
+> **Fail2Ban uses nftables!**\
+> Remember that the Fail2Ban service you enabled [back in chapter **G010** already uses nftables to ban IPs](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md#fail2ban-uses-nftables-to-enforce-the-bans). Both iptables and nftables can coexist, but you must remember which one is used to ban suspicious IPs (nftables) and which one is used by Proxmox VE as its main firewall (iptables).
-The `iptables` is a legacy package that has been replaced by `nftables` but, probably due to incompatibilities, Proxmox VE 7.0-x is still using this legacy `iptables` package. You can verify this by printing the iptables version.
+> [!NOTE]
+> **Careful of not confusing nftables commands with iptables ones**\
+> There are several iptables commands available in the system, but some of them are meant to be used with the nftables firewall. So, when you specifically execute iptables commands, avoid using the ones that have the `-nft` string in their names (unless you have switched to the new nftables firewall).
-~~~bash
-$ sudo iptables -V
-iptables v1.8.7 (legacy)
-~~~
+## Zones in the Proxmox VE firewall
-> **BEWARE!**
-> There are several `iptables` commands available in the system, but some of them are meant to be used with the nftables firewall. So, when you execute `iptables` commands, avoid using the ones that have the `nft` string in their names.
+The [Proxmox VE documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_directions_amp_zones) indicates that the firewall groups the network in three logical zones:
-## Zones in the Proxmox VE firewall
+- **Host**\
+ Traffic from/to a cluster node (a host).
-The Proxmox VE documentation indicates that the firewall groups the network in two logical zones:
+- **VM**\
+ Traffic from/to a specific VM or container.
-- **Host**: Traffic from/to a cluster node.
-- **VM**: Traffic from/to a specific VM.
+- **VNet**\
+ Traffic passing through a SDN VNet, either from guest to guest or from host to guest and vice-versa.
-But the same documentation also mentions three distinct tiers, or levels, that are contemplated by the PVE firewall: datacenter or cluster, nodes and guests or virtual machines.
+But the same documentation also mentions three distinct tiers, or levels, that are contemplated by the PVE firewall: datacenter (the cluster), hosts (the cluster nodes) and guests (the virtual machines or containers).
-It's important of being always aware that these tiers or zones are kind of independent from each other. Essentially, the firewall has to be enabled on each level individually, and the rules applied at the datacenter level won't be applied automatically in cascade to the lower tiers, you'll have to enable them specifically for each level (and for each virtual NIC on each guest in the case of VMs and containers).
+Be always aware that these tiers or zones are kind of independent from each other. Essentially, the firewall has to be enabled on each level individually, and the rules applied at the datacenter level won't be applied automatically in cascade to the lower tiers. You will have to enable each firewall option specifically for each level, and for each virtual NIC on each guest in the case of VMs and containers.
-Also, be aware that the PVE firewall offers different options for each tier.
+Also, know that the PVE firewall offers different options for each tier.
## Situation at this point
-By enabling `fail2ban` in the previous [**G010** guide](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md), you've also enabled the `iptables` firewall at the **node level**. You can verify this in the web console at the `pve` node level, by going to the `Firewall > Options` of your standalone node.
+You will find the Proxmox VE firewall feature already enabled by default at the node level of your setup. You can verify this in the web console at the `pve` node level, by going to the `Firewall > Options` of your standalone node:
-
+
-But, on the other hand, the PVE firewall it's **NOT** running at the `Datacenter` tier.
+However, the PVE firewall **is NOT running at the `Datacenter` tier**:
-
+
-The Proxmox VE documentation indicates that the firewall comes completely disabled, at all levels we should understand, after the installation. That's why you find the `Firewall` disabled at the `Datacenter` level.
+The Proxmox VE documentation indicates that the firewall comes disabled "cluster-wide" after the installation. That's why you find the `Firewall` disabled at the `Datacenter` level.
-Also, from the PVE web console's point of view, the firewall doesn't seem to have rules whatsoever, something you can check when you go directly into the `Firewall` screen of either the `Datacenter` or the node tier.
+Furthermoe, from the PVE web console's point of view, the firewall does not seem to have rules whatsoever. You can see this in the `Firewall` page of either the `Datacenter` or the node tier.
-
+
-And this is mostly true. If you open a shell terminal in your node as your `mgrsys` user, you can check the rules actually enabled in the `iptables` firewall.
+The _No firewall rule configured here._ line highlighted in the snapshot above is mostly correct. If you open a shell terminal in your node as your `mgrsys` user, you can check the rules actually enabled in the iptables firewall of your Proxmox VE server:
-~~~bash
+~~~sh
$ sudo iptables -L -n
Chain INPUT (policy ACCEPT)
target prot opt source destination
@@ -61,59 +107,66 @@ Chain OUTPUT (policy ACCEPT)
target prot opt source destination
~~~
-There are no rules per se, just empty chains of rules, but you can see how each main `Chain` has a policy already established: all chains are **accepting everything**, with the default `policy ACCEPT`. This means that your firewall is completely open, something we don't want at all.
+There are no rules per se, just empty _chains of rules_. Still, you can see how each `Chain` has a policy already established: all chains are **accepting everything** with the `ACCEPT` policy. Your firewall is completely open, something you do not want at all.
-Now, let's revise which are the ports currently open in your system.
+Next, let's revise which are the ports currently open in your PVE system:
-~~~bash
+~~~sh
$ sudo ss -atlnup
-Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
-udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=691,fd=5))
-tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=775,fd=4))
-tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=925,fd=6),("pvedaemon worke",pid=924,fd=6),("pvedaemon worke",pid=923,fd=6),("pvedaemon",pid=922,fd=6))
-tcp LISTEN 0 128 192.168.1.107:22 0.0.0.0:* users:(("sshd",pid=677,fd=3))
-tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=883,fd=13))
-tcp LISTEN 0 4096 *:8006 *:* users:(("pveproxy worker",pid=935,fd=6),("pveproxy worker",pid=934,fd=6),("pveproxy worker",pid=933,fd=6),("pveproxy",pid=932,fd=6))
+Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
+udp UNCONN 0 0 127.0.0.1:323 0.0.0.0:* users:(("chronyd",pid=819,fd=5))
+tcp LISTEN 0 100 127.0.0.1:25 0.0.0.0:* users:(("master",pid=1101,fd=13))
+tcp LISTEN 0 4096 127.0.0.1:85 0.0.0.0:* users:(("pvedaemon worke",pid=1151,fd=6),("pvedaemon worke",pid=1150,fd=6),("pvedaemon worke",pid=1148,fd=6),("pvedaemon",pid=1147,fd=6))
+tcp LISTEN 0 4096 10.1.0.1:8006 0.0.0.0:* users:(("pveproxy worker",pid=1796,fd=6),("pveproxy worker",pid=1795,fd=6),("pveproxy worker",pid=1794,fd=6),("pveproxy",pid=1161,fd=6))
+tcp LISTEN 0 128 10.1.0.1:22 0.0.0.0:* users:(("sshd",pid=942,fd=6))
+tcp LISTEN 0 16 127.0.0.1:3493 0.0.0.0:* users:(("upsd",pid=1109,fd=4))
~~~
-If you've followed the guides, you should get a list of listening sockets like the output above. Read what they are below.
+If you've followed this guide closely up to this chapter, you should get a list of listening sockets like the output above (although not necessarily in the same order):
-- Service `chronyd` listening on localhost (`127.0.0.1`) interface at port `323`: daemon for synchronizing the system's clock with an external time server (already seen back in the [**G012** guide](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md)).
+- **Service `chronyd` listening on localhost (`127.0.0.1`) interface at port `323`**\
+ Daemon for synchronizing the system's clock with an external time server (already seen back in the [**G012** chapter](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#disabling-chronys-ipv6-socket)).
-- Service `upsd` listening on localhost (`127.0.0.1`) interface at port `3493`: NUT service for monitoring your UPS.
+- **Service `master` listening on localhost (`127.0.0.1`) interface at port `25`**\
+ Postmaster mail service providing mailing within the system itself.
-- Service `pvedaemon` listening on localhost (`127.0.0.1`) interface at port `85`: exposes the whole Proxmox VE API.
+- **Service `pvedaemon` listening on localhost (`127.0.0.1`) interface at port `85`**\
+ Exposes the whole Proxmox VE API.
-- Service `sshd` listening on the host real network Ethernet interface (`192.168.1.107`) at port `22`: ssh daemon service.
+- **Service `pveproxy` listening on the host's network Ethernet interface (`10.1.0.1`) at port `8006`**\
+ Proxmox VE proxy that gives access to the web console and also exposes the Proxmox VE API.
-- Service `master` listening on localhost (`127.0.0.1`) interface at port `25`: postmaster mail service providing mailing within the system itself.
+- **Service `sshd` listening on the host's network Ethernet interface (`10.1.0.1`) at port `22`**\
+ SSH daemon service.
-- Service `pveproxy` listening on all interfaces (`0.0.0.0`) at port `8006`: Proxmox VE proxy that gives access to the web console and also exposes the Proxmox VE API.
+- **Service `upsd` listening on localhost (`127.0.0.1`) interface at port `3493`**\
+ NUT service for monitoring your UPS.
-## Enabling the firewall at the `Datacenter` tier
+## Enabling the firewall at the Datacenter tier
Just by enabling the PVE firewall at the `Datacenter` tier you'll get a much stronger set of rules enforced in your firewall. But, before you do this...
-> **BEWARE!**
-> **After enabling the firewall at the `Datacenter` tier, the platform will block incoming traffic from all hosts towards your datacenter, except the traffic coming from your LAN towards the 8006 (web console) and 22 (ssh) ports**.
+> [!WARNING]
+> **Read this warning before enabling the firewall at the `Datacenter` tier**\
+> After enabling the firewall at the `Datacenter` tier, your Proxmox VE platform will block incoming traffic from all hosts towards your datacenter, except the traffic coming from your LAN towards the 8006 (web console) and 22 (ssh) ports.
+>
+> If you plan to access your PVE platform **from IPs outside your LAN**, you'll need to **add first** the rules in the PVE firewall to allow such access. But I won't cover this here, since I'm just assuming a "pure" LAN scenario (meaning a Proxmox VE server **not accessible** from internet), in this guide.
-This means that, if you plan to access your PVE platform from IPs outside your LAN, you'll need to add **first** the rules in the PVE firewall to allow such access. But I won't cover this here, since I'm just assuming a "pure" LAN, or a Proxmox VE server **not accessible** from internet, scenario in this guide series.
+Assuming you are accessing your PVE system from another computer in the same LAN, go to the `Datacenter > Firewall > Options` screen in the web console and select the `Firewall` parameter.
-Assuming you're accessing your PVE system from another computer in the same LAN, go to the `Datacenter > Firewall > Options` screen in the web console and select the `Firewall` parameter.
-
-
+
Click on `Edit` and mark the `Firewall` checkbox presented to enable the firewall at the `Datacenter` tier.
-
+
-Hit `OK` and you'll see the `Firewall` parameter has a `Yes` value now.
+Hit `OK` and you'll see how the `Firewall` parameter has changed to a `Yes` value.
-
+
-The change will be applied immediately, but you won't see any rules at all in the `Firewall` screens, neither at the `Datacenter` nor at the `pve` node level. But the `iptables` rule set will change a lot, something you can verify with the `iptables` command.
+The change will be applied immediately, but you won't see any rules at all in your Proxmox VE server's `Firewall` pages, neither at the `Datacenter` nor at the `pve` node level. What will change a lot is the iptables ruleset, something you can verify with the `iptables` command:
-~~~bash
+~~~sh
$ sudo iptables -L -n
Chain INPUT (policy ACCEPT)
target prot opt source destination
@@ -157,65 +210,184 @@ ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ES
PVEFW-FWBR-IN all -- 0.0.0.0/0 0.0.0.0/0 PHYSDEV match --physdev-in fwln+ --physdev-is-bridged
PVEFW-FWBR-OUT all -- 0.0.0.0/0 0.0.0.0/0 PHYSDEV match --physdev-out fwln+ --physdev-is-bridged
all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:qnNexOcGa+y+jebd4dAUqFSp5nw */
-...
+
+Chain PVEFW-FWBR-IN (1 references)
+target prot opt source destination
+PVEFW-smurfs all -- 0.0.0.0/0 0.0.0.0/0 ctstate INVALID,NEW
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:Ijl7/xz0DD7LF91MlLCz0ybZBE0 */
+
+Chain PVEFW-FWBR-OUT (1 references)
+target prot opt source destination
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:2jmj7l5rSw0yVb/vlWAYkK/YBwk */
+
+Chain PVEFW-HOST-IN (1 references)
+target prot opt source destination
+ACCEPT all -- 0.0.0.0/0 0.0.0.0/0
+DROP all -- 0.0.0.0/0 0.0.0.0/0 ctstate INVALID
+ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED
+PVEFW-smurfs all -- 0.0.0.0/0 0.0.0.0/0 ctstate INVALID,NEW
+RETURN 2 -- 0.0.0.0/0 0.0.0.0/0
+RETURN tcp -- 0.0.0.0/0 0.0.0.0/0 match-set PVEFW-0-management-v4 src tcp dpt:8006
+RETURN tcp -- 0.0.0.0/0 0.0.0.0/0 match-set PVEFW-0-management-v4 src tcp dpts:5900:5999
+RETURN tcp -- 0.0.0.0/0 0.0.0.0/0 match-set PVEFW-0-management-v4 src tcp dpt:3128
+RETURN tcp -- 0.0.0.0/0 0.0.0.0/0 match-set PVEFW-0-management-v4 src tcp dpt:22
+RETURN tcp -- 0.0.0.0/0 0.0.0.0/0 match-set PVEFW-0-management-v4 src tcp dpts:60000:60050
+PVEFW-Drop all -- 0.0.0.0/0 0.0.0.0/0
+DROP all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:cucENGwfi+iPw5e/BS7lH7zNnU8 */
+
+Chain PVEFW-HOST-OUT (1 references)
+target prot opt source destination
+ACCEPT all -- 0.0.0.0/0 0.0.0.0/0
+DROP all -- 0.0.0.0/0 0.0.0.0/0 ctstate INVALID
+ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED
+RETURN 2 -- 0.0.0.0/0 0.0.0.0/0
+RETURN tcp -- 0.0.0.0/0 10.0.0.0/8 tcp dpt:8006
+RETURN tcp -- 0.0.0.0/0 10.0.0.0/8 tcp dpt:22
+RETURN tcp -- 0.0.0.0/0 10.0.0.0/8 tcp dpts:5900:5999
+RETURN tcp -- 0.0.0.0/0 10.0.0.0/8 tcp dpt:3128
+RETURN all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:ayfLu3G+p/8c4SeNBnCZRkwon94 */
+
+Chain PVEFW-INPUT (1 references)
+target prot opt source destination
+PVEFW-HOST-IN all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:+5iMmLaxKXynOB/+5xibfx7WhFk */
+
+Chain PVEFW-OUTPUT (1 references)
+target prot opt source destination
+PVEFW-HOST-OUT all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:LjHoZeSSiWAG3+2ZAyL/xuEehd0 */
+
+Chain PVEFW-Reject (0 references)
+target prot opt source destination
+PVEFW-DropBroadcast all -- 0.0.0.0/0 0.0.0.0/0
+ACCEPT icmp -- 0.0.0.0/0 0.0.0.0/0 icmptype 3 code 4
+ACCEPT icmp -- 0.0.0.0/0 0.0.0.0/0 icmptype 11
+DROP all -- 0.0.0.0/0 0.0.0.0/0 ctstate INVALID
+PVEFW-reject udp -- 0.0.0.0/0 0.0.0.0/0 multiport dports 135,445
+PVEFW-reject udp -- 0.0.0.0/0 0.0.0.0/0 udp dpts:137:139
+PVEFW-reject udp -- 0.0.0.0/0 0.0.0.0/0 udp spt:137 dpts:1024:65535
+PVEFW-reject tcp -- 0.0.0.0/0 0.0.0.0/0 multiport dports 135,139,445
+DROP udp -- 0.0.0.0/0 0.0.0.0/0 udp dpt:1900
+DROP tcp -- 0.0.0.0/0 0.0.0.0/0 tcp flags:!0x17/0x02
+DROP udp -- 0.0.0.0/0 0.0.0.0/0 udp spt:53
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:h3DyALVslgH5hutETfixGP08w7c */
+
+Chain PVEFW-SET-ACCEPT-MARK (0 references)
+target prot opt source destination
+MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x80000000
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:Hg/OIgIwJChBUcWU8Xnjhdd2jUY */
+
+Chain PVEFW-logflags (5 references)
+target prot opt source destination
+DROP all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:MN4PH1oPZeABMuWr64RrygPfW7A */
+
+Chain PVEFW-reject (4 references)
+target prot opt source destination
+DROP all -- 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type BROADCAST
+DROP all -- 224.0.0.0/4 0.0.0.0/0
+DROP icmp -- 0.0.0.0/0 0.0.0.0/0
+REJECT tcp -- 0.0.0.0/0 0.0.0.0/0 reject-with tcp-reset
+REJECT udp -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-port-unreachable
+REJECT icmp -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-unreachable
+REJECT all -- 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:Jlkrtle1mDdtxDeI9QaDSL++Npc */
+
+Chain PVEFW-smurflog (2 references)
+target prot opt source destination
+DROP all -- 0.0.0.0/0 0.0.0.0/0
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:2gfT1VMkfr0JL6OccRXTGXo+1qk */
+
+Chain PVEFW-smurfs (2 references)
+target prot opt source destination
+RETURN all -- 0.0.0.0 0.0.0.0/0
+PVEFW-smurflog all -- 0.0.0.0/0 0.0.0.0/0 [goto] ADDRTYPE match src-type BROADCAST
+PVEFW-smurflog all -- 224.0.0.0/4 0.0.0.0/0 [goto]
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:HssVe5QCBXd5mc9kC88749+7fag */
+
+Chain PVEFW-tcpflags (0 references)
+target prot opt source destination
+PVEFW-logflags tcp -- 0.0.0.0/0 0.0.0.0/0 [goto] tcp flags:0x3F/0x29
+PVEFW-logflags tcp -- 0.0.0.0/0 0.0.0.0/0 [goto] tcp flags:0x3F/0x00
+PVEFW-logflags tcp -- 0.0.0.0/0 0.0.0.0/0 [goto] tcp flags:0x06/0x06
+PVEFW-logflags tcp -- 0.0.0.0/0 0.0.0.0/0 [goto] tcp flags:0x03/0x03
+PVEFW-logflags tcp -- 0.0.0.0/0 0.0.0.0/0 [goto] tcp spt:0 flags:0x17/0x02
+ all -- 0.0.0.0/0 0.0.0.0/0 /* PVESIG:CMFojwNPqllyqD67NeI5m+bP5mo */
~~~
-I've omitted most of the output since it's quite long, but still you can see that quite a lot of rules have been added just by enabling the firewall at the `Datacenter` tier.
+As you can see in the long output above, quite a lot of rules have been added just by enabling the firewall at the `Datacenter` tier. And, if you look closely, you will notice that most of these rules are applied to any source or destination IP. Only a few ones, defined in the `Chain PVEFW-HOST-OUT`, are particular to your local network.
+
+The Proxmox VE firewall's `.fw` configuration files can be found at the following locations:
-The Proxmox VE firewall's configuration files can be found at the following locations.
+- `/etc/pve/firewall/cluster.fw`\
+ For the datacenter or cluster wide tier.
-- `/etc/pve/firewall/cluster.fw` : for datacenter or cluster wide tier.
-- `/etc/pve/firewall/.fw` : for each virtual machine or container, identified by their ``, present in the system.
-- `/etc/pve/nodes//host.fw` : for each node (identified by their ``) present in the datacenter.
+- `/etc/pve/firewall/.fw`\
+ For each virtual machine or container, identified by their ``, present in the system.
-### _Netfilter conntrack `sysctl` parameters_
+- `/etc/pve/nodes//host.fw`\
+ For each node, identified by their ``, present in the datacenter.
-I hinted you earlier in the [G012 guide](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md), how by enabling the firewall, new `sysctl` parameters would become available in your Proxmox VE host. Those new parameters are all the files put in the `/proc/sys/net/netfilter` folder, except the `nf_log` ones which were already present initially. All of them are related to the netfilter conntrack system used to track network connections on the system. Also, a new `/proc/sys/net/nf_conntrack_max` file is created, which is a duplicate of the `/proc/sys/net/netfilter/nf_conntrack_max` file, maybe as a management convenience for Proxmox VE.
+- `/etc/pve/sdn/firewall/.fw`\
+ For each VNet configured in the system, identified by their ``.
-But not only new parameters are enabled, some already existing ones are changed. In particular the `net.bridge.bridge-nf-call-ip6tables` and `net.bridge.bridge-nf-call-iptables` are set to `1`, enabling filtering on the bridges existing on this host. In fact, at this point your system already has one Linux bridge enabled, which you can find in the `pve` node level at the `System > Network` view.
+These files will only appear in your system when you change the configuration they belong to. For instance, at this point, you will only find the `/etc/pve/firewall/cluster.fw` file existing under its expected path.
-
+### Netfilter conntrack sysctl parameters
-In the capture above, you can see how this Linux bridge uses the real Ethernet network card `enp2s0` as port to have access to the network. This bridge is necessary for later being able to provide network connectivity to the VMs you'll create in later guides. But keep on reading this guide to see how to apply firewalling at the bridge level with **ebtables**.
+I hinted you back in the [**G012** chapter](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md#some-sysctl-values-are-managed-by-the-proxmox-ve-firewall) how, by enabling the firewall, new `sysctl` parameters would become available in your Proxmox VE host. Those new parameters are all the files put in the `/proc/sys/net/netfilter` folder, except the `nf_log` ones which were already present initially. All of them are related to the netfilter conntrack system used to track network connections on the system. A new `/proc/sys/net/nf_conntrack_max` file is also created as a duplicate of the `/proc/sys/net/netfilter/nf_conntrack_max` file, maybe as a management convenience for Proxmox VE.
-## Firewalling with `ebtables`
+But not only new parameters are enabled, some already existing ones get changed. In particular the `net.bridge.bridge-nf-call-ip6tables` and `net.bridge.bridge-nf-call-iptables` are set to `1`, enabling filtering on the bridges existing on the host. In fact, at this point your system already has one Linux bridge enabled, which you can find in the `pve` node level at the `System > Network` view:
-When you enabled the firewall at the datacenter level, you might have noticed an option in that page called `ebtables` which was already enabled.
+
-
+In the capture above, you can see how this Linux bridge uses the host's Ethernet network card `enp3s0` as port to have access to the network. This bridge is necessary for later being able to provide network connectivity to the VMs you'll create in later chapters. But keep on reading this chapter to see how to apply firewalling at the bridge level with **ebtables**.
-This option refers to the `ebtables` program that works as a firewall for bridges, like the one you have in your PVE virtual network. This firewall is mainly for filtering packets at the network's link layer, in which MACs (not IPs) and VLAN tags are what matter to route packets. On the other hand, `ebtables` is also capable of some packet filtering on upper network layers. The problem is that Proxmox VE doesn't have a page where you can manage `ebtables` rules, forcing you to handle them with the corresponding `ebtables` command.
+## Firewalling with ebtables
-> **BEWARE!**
-> As it happens with `iptables`, `ebtables` is also a legacy package that has an alternative version meant for nftables. So, only use the `ebtables` commands that don't include the `nft` string in their names.
+When you enabled the firewall at the datacenter level, you may have noticed the `ebtables` option which was already enabled:
-### _Setting up `ebtables`_
+
-The `ebtables` command handles rules but is unable to make them persist, meaning that any rules you might add to the ebtables won't survive a reboot. So you need a way to make these rules persistent in your system. To achieve this, do the following steps.
+This option refers to the ebtables program that works as a firewall for bridges, like the one you have in your PVE virtual network. This firewall is mainly for filtering packets at the network's link layer, in which MACs (not IPs) and VLAN tags are what matter to route packets. On the other hand, ebtables is also capable of some packet filtering on upper network layers. The problem is that Proxmox VE does not have a page where you can manage ebtables rules, forcing you to handle them with the corresponding `ebtables` shell command.
-1. Open a shell as `mgrsys` on your Proxmox VE host, then install the package `netfilter-persistent`.
+> [!WARNING]
+> **The ebtables firewall is a legacy program**\
+> As it happens with iptables, ebtables is also a legacy package that has an alternative version meant for nftables. Therefore, in a Proxmox VE setup based on this guide, only use the ebtables commands that do not include the `-nft` string in their names.
- ~~~bash
+### Setting up ebtables
+
+The `ebtables` command handles rules but is unable to make them persist, meaning that any rules you may add to the ebtables won't survive a reboot. But there is a way to persist these rules in your system:
+
+> [!WARNING]
+> **This configuration may not work with the nftables version of ebtables**\
+> Keep this in mind if you want to try the nftables-based firewall of Proxmox VE.
+
+1. Open a shell as `mgrsys` on your Proxmox VE host, then install the package `netfilter-persistent`:
+
+ ~~~sh
$ sudo apt install netfilter-persistent
~~~
- This package provides a "loader for netfilter configuration" and, with the proper extension (a shell script), it can handle `ebtables` configuration too.
+ This package provides a "_loader for netfilter configuration_" and, with the proper extension (a shell script), it can handle ebtables configuration too.
-2. Create a new empty file at `/usr/share/netfilter-persistent/plugins.d/35-ebtables`.
+2. Create a new empty file at `/usr/share/netfilter-persistent/plugins.d/35-ebtables`:
- ~~~bash
+ ~~~sh
$ sudo touch /usr/share/netfilter-persistent/plugins.d/35-ebtables
~~~
Then make it executable.
- ~~~bash
+ ~~~sh
$ sudo chmod 544 /usr/share/netfilter-persistent/plugins.d/35-ebtables
~~~
-3. Copy in the `/usr/share/netfilter-persistent/plugins.d/35-ebtables` file the whole shell script below.
+3. Copy in the `/usr/share/netfilter-persistent/plugins.d/35-ebtables` file the whole shell script below:
- ~~~bash
+ ~~~sh
#!/bin/sh
# This file is part of netfilter-persistent
@@ -305,72 +477,77 @@ The `ebtables` command handles rules but is unable to make them persist, meaning
exit $rc
~~~
- This script (found attached to [this old Debian Bug thread](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=697088)) is the extension that allows `netfilter-persistent` to truly persist the `ebtables` rules in your system.
+ This script (found attached to [this old Debian Bug thread](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=697088)) is the extension that allows `netfilter-persistent` to truly persist the ebtables rules in your system.
-4. Create the empty folder `/etc/ebtables`.
+4. Create the empty folder `/etc/ebtables`:
- ~~~bash
+ ~~~sh
$ sudo mkdir /etc/ebtables
~~~
- This is the folder where the previous `35-ebtables` shell script will persist the `ebtables` rules, in several `rules` files.
+ This is the folder where the previous `35-ebtables` shell script will persist the ebtables rules, in several `rules` files.
-5. Make the script save the current `ebtables` rules to check if it works.
+5. Make the script save the current ebtables rules to check if it works:
- ~~~bash
+ ~~~sh
$ sudo /usr/share/netfilter-persistent/plugins.d/35-ebtables save
~~~
Then list the contents of `/etc/ebtables` and see if the `rules` files are there.
- ~~~bash
+ ~~~sh
$ ls /etc/ebtables/
rules.broute rules.filter rules.nat
~~~
- You should see three different rule files, as in the output above. The files are in binary format, so don't try to open them with an editor. Each file corresponds to one of the tables `ebtables` uses to separate its functionality into different sets of rules. The `filter` table is the default one on which the `ebtables` command works.
+ You should see three different rule files, as in the output above.
+
+ > [!NOTE]
+ > **Do not open the ebtables rule files with a text editor**\
+ > The files are in binary format. Do not manipulate them in any way.
+
+ Each file corresponds to one of the tables ebtables uses to separate its functionality into different sets of rules. The `filter` table is the default one on which the ebtables command works.
+
+### Example of ebtables usage
-### _Example of `ebtables` usage_
+Next, I'll give you an example about when and how to use ebtables based on my own experience.
-Next, I'll give you an example about when and how to use `ebtables` based on my own experience.
+> [!NOTE]
+> **The example shown here is based on the original hardware setup I used for the first version of this guide**\
+> Therefore you will notice small differences with the virtualized hardware I'm using in this newer version of the guide. Still, the commands used in this example are still valid.
-While I was working on this guide, I detected that incoming (`RX`) packets were being dropped only by the `vmbr0` bridge for some unknown reason. I noticed this in the output of the following `ip` command.
+While I was working in the first version of this guide, I detected that incoming (`RX`) packets were being dropped only by the `vmbr0` bridge for some unknown reason. I noticed this in the output of the following `ip` command.
-~~~bash
+~~~sh
$ ip -s link show
1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- RX: bytes packets errors dropped missed mcast
- 62755 932 0 0 0 0
- TX: bytes packets errors dropped carrier collsns
- 62755 932 0 0 0 0
-2: enp2s0: mtu 1500 qdisc pfifo_fast master vmbr0 state UP mode DEFAULT group default qlen 1000
- link/ether a8:ae:ed:27:c1:7d brd ff:ff:ff:ff:ff:ff
- RX: bytes packets errors dropped missed mcast
- 327191 3186 0 0 0 2279
- TX: bytes packets errors dropped carrier collsns
- 65623 639 0 0 0 0
-3: wlp3s0: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000
- link/ether 64:39:ac:32:cb:23 brd ff:ff:ff:ff:ff:ff
- RX: bytes packets errors dropped missed mcast
- 0 0 0 0 0 0
- TX: bytes packets errors dropped carrier collsns
- 0 0 0 0 0 0
-4: vmbr0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
- link/ether a8:ae:ed:27:c1:7d brd ff:ff:ff:ff:ff:ff
- RX: bytes packets errors dropped missed mcast
- 280443 3186 0 1494 0 2279
- TX: bytes packets errors dropped carrier collsns
- 65677 640 0 0 0 0
+ RX: bytes packets errors dropped missed mcast
+ 213838 2817 0 0 0 0
+ TX: bytes packets errors dropped carrier collsns
+ 213838 2817 0 0 0 0
+2: enp3s0: mtu 1500 qdisc fq_codel master vmbr0 state UP mode DEFAULT group default qlen 1000
+ link/ether 87:ea:ab:02:15:a5 brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped missed mcast
+ 4365227 13252 0 448 0 4972
+ TX: bytes packets errors dropped carrier collsns
+ 3220375 5281 0 1 0 0
+ altname enx98eecb0305a3
+3: vmbr0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
+ link/ether 87:ea:ab:02:15:a5 brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped missed mcast
+ 4051731 12474 0 0 0 4524
+ TX: bytes packets errors dropped carrier collsns
+ 3220459 5283 0 0 0 0
~~~
-In the output above, you can see that only the `vmbr0` interface is reporting many `dropped` `RX` packets. As you can imagine, this was kind of bothersome, so I tracked them down. To do that there's a command called `tcpdump` that comes handy to see the packets themselves. The command captures the traffic going on in your system's network and prints a description of each packet it sees. I executed `tcpdump` as follows.
+In the output above, you can see that only the `vmbr0` interface is reporting many `dropped` `RX` packets. As you can imagine, this was kind of bothersome, so I tracked them down. To do that there is a command called `tcpdump` that comes handy to see the packets themselves. The command captures the traffic going on in your system's network and prints a description of each packet it sees. I executed `tcpdump` as follows.
-~~~bash
-$ tcpdump -vvv -leni vmbr0 > captured-packets.txt
+~~~sh
+$ sudo tcpdump -vvv -leni vmbr0 > captured-packets.txt
~~~
-Notice the following:
+Notice the following in the `tcpdump` command:
- `-vvv`: enables the command's maximum verbosity.
- `leni`: enables printing more details from each intercepted packet
@@ -422,22 +599,25 @@ This command will run in the foreground for as long as you allow it till you sto
...
~~~
-From all the traffic I saw there, the thing that stood out were the packets with the `ethertype Unknown (0xfe68)` string on them. I investigated further, but the best guess I could find was that the routers or modems installed by internet service providers (ISPs) send packets like those for some reason every second or so. Knowing that much, those packets were just noise to me in my system's network, and I wanted to drop them silently before they ever reached my `vmbr0` bridge. To do this I just needed to add the proper rule in `ebtables`.
+From all the traffic I saw there, the thing that stood out were the packets with the `ethertype Unknown (0xfe68)` string on them. I investigated further, but the best guess I could find was that the routers or modems installed by internet service providers (ISPs) send packets like those for some reason every second or so. Knowing that much, those packets were just noise to me in my system's network, and I wanted to drop them silently before they ever reached my `vmbr0` bridge. To do this I just needed to add the proper rule in ebtables.
-~~~bash
+~~~sh
$ sudo ebtables -A INPUT -p fe68 -j DROP
~~~
-The command above means the following.
+The command above means the following:
- The rule table is omitted, meaning this rule will be added to the default `filter` table.
+
- `-A INPUT`: append rule to rule chain `INPUT`.
+
- `-p fe68`: packet protocol or ethertype `fe68`, which is the hexadecimal number returned by `tcpdump`.
+
- `-j DROP`: target of this rule, `DROP` in this case.
-With the rule added, I checked how it looked in the `filter` table of `ebtables`.
+With the rule added, I checked how it looked in the `filter` table of ebtables:
-~~~bash
+~~~sh
$ sudo ebtables -L
Bridge table: filter
@@ -449,12 +629,6 @@ Bridge chain: FORWARD, entries: 1, policy: ACCEPT
Bridge chain: OUTPUT, entries: 0, policy: ACCEPT
-Bridge chain: BROUTING, entries: 0, policy: ACCEPT
-
-Bridge chain: POSTROUTING, entries: 0, policy: ACCEPT
-
-Bridge chain: PREROUTING, entries: 0, policy: ACCEPT
-
Bridge chain: PVEFW-FORWARD, entries: 3, policy: ACCEPT
-p IPv4 -j ACCEPT
-p IPv6 -j ACCEPT
@@ -463,24 +637,26 @@ Bridge chain: PVEFW-FORWARD, entries: 3, policy: ACCEPT
Bridge chain: PVEFW-FWBR-OUT, entries: 0, policy: ACCEPT
~~~
-Notice that:
+See in this output that:
- The `ebtables -L` command only shows the `filter` table. To specify which table to see, you have to use the `-t` option as in `ebtables -t filter -L`.
+
- My new rule appears in the bridge chain `INPUT`.
-- Proxmox VE also has its own rules in `ebtables`, see the `PVEFW-FORWARD` and `PVEFW-FWBR-OUT` rule chains.
-The final step was persisting the rules with the `netfilter-persistent` command.
+- Proxmox VE also has its own rules in ebtables, see the `PVEFW-FORWARD` and `PVEFW-FWBR-OUT` rule chains.
+
+The final step was persisting the rules with the `netfilter-persistent` command:
-~~~bash
+~~~sh
$ sudo netfilter-persistent save
run-parts: executing /usr/share/netfilter-persistent/plugins.d/35-ebtables save
~~~
-Notice, in the command's output, how it calls the `35-ebtables` shell script. Now, if you rebooted your Proxmox VE host, the `netfilter-persistent` command would restore the `ebtables` rules you've just persisted, also invoking the `35-ebtables` script for that. Look for this loading in the `/var/log/syslog` file, by searching the `netfilter-persistent` string.
+Notice how, in the command's output, how it calls the `35-ebtables` shell script. Now, if you rebooted your Proxmox VE host, the `netfilter-persistent` command would restore the ebtables rules you've just persisted, also invoking the `35-ebtables` script for that. Look for this loading in the Journal logging system (`journalctl` command), by searching the `netfilter-persistent` string.
Thanks to this configuration, I don't have any more `dropped` packets of that unknown ethertype showing up on my `vmbr0` bridge.
-~~~bash
+~~~sh
$ ip -s link show vmbr0
4: vmbr0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
link/ether a8:ae:ed:27:c1:7d brd ff:ff:ff:ff:ff:ff
@@ -492,62 +668,91 @@ $ ip -s link show vmbr0
## Firewall fine tuning
-You now have the Proxmox VE firewall up and running with essentially default settings, but know that you can adjust it further. In particular, there are a number of options at the node level that you should have a look to. So go to the `Firewall > Options` view at your `pve` node level.
+Now you have your Proxmox VE's firewall up and running with default settings, but you can adjust it further. In particular, there are a number of options at the node level you should have a look to. Browse to the `Firewall > Options` view at your `pve` node level.
+
+
+
+Since the official documentation is not very detailed about them, let me tell you a bit about these options:
+
+- `Firewall`\
+ Enabled by default. Enables the firewall at the `pve` node/host level.
+
+- `SMURFS filter`\
+ Enabled by default. Gives protection against SMURFS attacks, which are a form of distributed denial of service (DDoS) attacks "[that overloads network resources by broadcasting ICMP echo requests to devices across the network](https://www.fortinet.com/resources/cyberglossary/smurf-attack)".
+
+- `TCP flags filter`\
+ Disabled by default. Blocks illegal TCP flag combinations that could be found in packets.
+
+- `NDP`\
+ Enabled by default. NDP stands for _Neighbor Discovery Protocol_, and it's the IPv6 version of IPv4's ARP (_Address Resolution Protocol_). Since IPv6 is disabled in this guide series' setup, you could disable this option.
-
+- `nf_conntrack_max`\
+ `262144` by default. This value corresponds to both the `sysctl` parameters `net.netfilter.nf_conntrack_max` and `net.nf_conntrack_max`, meaning that by changing this value you'll change the two of them at the same time.
-Since the official documentation is not very detailed about them, let me tell you a bit about those options.
+ These two values limit how many simultaneous connections can be established with your host and be tracked in the corresponding connections table maintained by netfilter. When the maximum value is reached, your host will stop accepting new TCP connections silently. If you detect connectivity issues in your setup, one of the things you can try is making this value bigger.
-- `Firewall`: disabled by default. Enables the firewall at the `pve` node/host level.
+ This is one of the `sysctl` parameters enabled by the firewall when it got fully active, as I mentioned in the [chapter **G012**](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md).
-- `SMURFS filter`: enabled by default. Gives protection against SMURFS attacks, which are a form of distributed denial of service (DDoS) attacks "[that overloads network resources by broadcasting ICMP echo requests to devices across the network](https://www.fortinet.com/resources/cyberglossary/smurf-attack)".
+- `nf_conntrack_tcp_timeout_established`\
+ `432000` by default (in seconds). This value says for how long established connections can be considered to be in such state, before being discarded as too old. The default value is equivalent to 5 days, which may or may not be excessive for your needs or requirements. If you want to reduce this value, know that the Proxmox VE web console won't allow you to put a value lower than `7875` seconds, which is equivalent to roughly 2 hours and 11 minutes.
-- `TCP flags filter`: disabled by default. Blocks illegal TCP flag combinations that could be found in packets.
+- `log_level_in`\
+ `nolog` value by default. To enable the logging of the firewall activity with traffic INCOMING into your host, edit this value and choose `info`. This way, you'll manage to see things like dropped packets. There are other logging levels you can choose from, as shown in the capture below.
-- `NDP`: enabled by default. NDP stands for _Neighbor Discovery Protocol_, and it's the IPv6 version of IPv4's ARP (_Address Resolution Protocol_). Since IPv6 is disabled in this guide series' setup, you could disable this option.
+ 
-- `nf_conntrack_max`: `262144` by default. This value corresponds to both the `sysctl` parameters `net.netfilter.nf_conntrack_max` and `net.nf_conntrack_max`, meaning that by changing this value you'll change the two of them at the same time. These two values limit how many simultaneous connections can be established with your host and be tracked in the corresponding connections table maintained by netfilter. When the maximum value is reached, your host will stop accepting new TCP connections silently. If you detect connectivity issues in your setup, one of the things you can try is making this value bigger. This is one of the sysctl parameters enabled by the firewall when it got fully active, as I mentioned back in the [G012 guide](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md).
+- `log_level_out`\
+ `nolog` value by default. Change it to `info` to see the firewall activity with traffic OUTGOING from your host.
-- `nf_conntrack_tcp_timeout_established`: `432000` by default (in seconds). This value says for how long established connections can be considered to be in such state, before being discarded as too old. The default value is equivalent to 5 days, which may or may not be excessive for your needs or requirements. If you want to reduce this value, know that the Proxmox VE web console won't allow you to put a value lower than `7875` seconds, which is equivalent to roughly 2 hours and 11 minutes.
+- `log_level_forward`\
+ `nolog` value by default. Change it to `info` to see the firewall activity regarding the traffic FORWARDED towards your VMs.
-- `log_level_in`: `nolog` value by default. To enable the logging of the firewall activity with traffic INCOMING into your host, edit this value and choose `info`. This way, you'll manage to see things like dropped packets. There are other logging levels you can choose from, as shown in the capture below.
- 
+- `tcp_flags_log_level`\
+ `nolog` value by default. To see logs of firewall actions related to TCP flags filtering.
-- `log_level_out`: `nolog` value by default. Change it to `info` to see the firewall activity with traffic OUTGOING from your host.
+- `smurf_log_level_in`\
+ `nolog` value by default. To get logs of firewall action regarding SMURFS attacks protection.
-- `tcp_flags_log_level`: `nolog` value by default. To see logs of firewall actions related to TCP flags filtering.
+- `nftables (tech preview)`\
+ Disabled by default. Enable this if you want to try the newer, **but still in tech preview state**, nftables-based firewall of Proxmox VE.
-- `smurf_log_level_in`: `nolog` value by default. To get logs of firewall action regarding SMURFS attacks protection.
+### Enabling TCP SYN flood protection
-### _Enabling TCP SYN flood protection_
+There are three extra options related to SYN flood protection that are mentioned only in the Proxmox VE firewall documentation, [in the Host Specific Configuration subsection](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pve_firewall_host_specific_configuration).
-There are three extra options related to SYN flood protection that are mentioned only in the Proxmox VE firewall documentation, [in the Host Specific Configuration subsection](https://pve.deimos.cloud:8006/pve-docs/chapter-pve-firewall.html#pve_firewall_host_specific_configuration).
+- `protection_synflood`\
+ Disabled (value `0`) by default. Enables the protection against TCP syn flood attacks, which is another kind of DDoS that essentially can saturate your host's with new connection requests. This protection essentially controls how many connections can be requested by other IPs at once and per second.
-- `protection_synflood`: disabled (value `0`) by default. Enables the protection against TCP syn flood attacks, which is another kind of DDoS that essentially can saturate your host's with new connection requests. This protection essentially controls how many connections can be requested by other IPs at once and per second.
+- `protection_synflood_burst`\
+ `1000` by default. Puts a limit on how many new connections can be requested from other IPs to this host.
-- `protection_synflood_burst`: 1000 by default. Puts a limit on how many new connections can be requested from other IPs to this host.
+- `protection_synflood_rate`\
+ `200` by default. Its the maximum number of new connections that can be requested **per second** to this host from other IPs.
-- `protection_synflood_rate`: 200 by default. Its the maximum number of new connections that can be requested **per second** to this host from other IPs.
+These three parameters haven't been made directly available from the PVE web console. If you want to enable and configure this synflood protection, you need to enter the parameters directly in the corresponding `host.fw` file of your PVE node.
-These three values haven't been made directly available from the PVE web console so, if you want to enable this synflood protection and configure it, you'll need to put these parameters in the corresponding `host.fw` file of your PVE node.
+1. Since in this guide the Proxmox VE node is called `pve`, the full path of that file is `/etc/pve/nodes/pve/host.fw`. Open a shell with your `mgrsys` user, then edit the file:
-1. Since in this guide the Proxmox VE node is called `pve`, hence the full path of that file is `/etc/pve/nodes/pve/host.fw`. Open a shell with your `mgrsys` user, then edit the file.
+ > [!NOTE]
+ > **Remember, this file only appears if you change the node's firewall configuration**\
+ > Modify something to make Proxmox VE generate the `/etc/pve/nodes/pve/host.fw` file.
~~~properties
[OPTIONS]
+ nf_conntrack_tcp_timeout_established: 7875
+ ndp: 0
+ smurf_log_level: info
tcp_flags_log_level: info
log_level_out: info
log_level_in: info
- ndp: 0
- smurf_log_level: info
tcpflags: 1
- nf_conntrack_tcp_timeout_established: 7875
+ log_level_forward: info
~~~
Above you can see the configuration I set up in my host, which may differ from what you have chosen to configure in your setup.
-2. Below the last parameter you see in the `[OPTIONS]` section, append the synflood parameters.
+2. Below the last parameter you see in the `[OPTIONS]` section, append the synflood parameters:
~~~properties
[OPTIONS]
@@ -557,186 +762,440 @@ These three values haven't been made directly available from the PVE web console
protection_synflood_rate: 200
~~~
- Notice that, in my case, I chose to stick with the default values for `protection_synflood_burst` and `protection_synflood_rate`.
+ See that, in my case, I chose to stick with the default values for `protection_synflood_burst` and `protection_synflood_rate`.
-3. The Proxmox VE documentation doesn't say if the changes in the `host.fw` file are automatically applied after saving them so, if you want to be sure, you may reboot your PVE node.
+3. The Proxmox VE documentation does not say if the changes in the `host.fw` file are automatically applied after saving them. Just reboot your PVE node to be sure.
## Firewall logging
-If you've configured your firewall as I've showed you before, now you have its log enabled at the node level. You can see it in the PVE web console, at the `Firewall > Log` view of your `pve` node.
+If you've configured your firewall as I've showed you before, now you have its log enabled at the node level. You can see it in the PVE web console, at the `Firewall > Log` view of your `pve` node:
-
+
This log is a text file that you can find in your PVE host at the path `/var/log/pve-firewall.log`, and is rotated daily.
-### _Understanding the firewall log_
+### Understanding the firewall log
-Let's imagine you just booted up your Proxmox VE system and then you connect through a SSH client, like PuTTY, to open a shell in your PVE host. With the configuration explained in this guide, you may see lines in your firewall log like the following ones.
+Let's imagine you just booted up your Proxmox VE system and then you try to ping your PVE host form another computer within the same LAN. With the configuration explained in this guide, you will see lines in your firewall log like the following ones:
~~~log
-0 5 - 16/Nov/2021:09:28:09 +0100 starting pvefw logger
-0 5 - 16/Nov/2021:09:28:12 +0100 received terminate request (signal)
-0 5 - 16/Nov/2021:09:28:12 +0100 stopping pvefw logger
-0 5 - 16/Nov/2021:09:28:12 +0100 starting pvefw logger
-0 6 PVEFW-HOST-IN 16/Nov/2021:10:17:00 +0100 policy DROP: IN=vmbr0 PHYSIN=enp2s0 MAC=ae:b7:cd:26:7f:d2:54:5e:96:4b:2c:cf:08:00 SRC=192.168.1.2 DST=192.168.1.107 LEN=60 TOS=0x00 PREC=0x00 TTL=128 ID=64406 PROTO=ICMP TYPE=8 CODE=0 ID=1 SEQ=1
-0 6 PVEFW-HOST-IN 16/Nov/2021:10:17:17 +0100 policy DROP: IN=vmbr0 PHYSIN=enp2s0 MAC=ae:b7:cd:26:7f:d2:54:5e:96:4b:2c:cf:08:00 SRC=192.168.1.2 DST=192.168.1.107 LEN=60 TOS=0x00 PREC=0x00 TTL=128 ID=64488 PROTO=ICMP TYPE=8 CODE=0 ID=1 SEQ=2
+0 5 - 30/Aug/2025:20:22:39 +0200 starting pvefw logger
+0 5 - 30/Aug/2025:20:22:52 +0200 received terminate request (signal)
+0 5 - 30/Aug/2025:20:22:52 +0200 stopping pvefw logger
+0 5 - 30/Aug/2025:20:22:52 +0200 starting pvefw logger
+0 6 PVEFW-HOST-IN 30/Aug/2025:20:33:05 +0200 policy DROP: IN=vmbr0 PHYSIN=enp3s0 MAC=87:ea:ab:02:15:a5:1a:e3:a5:13:f2:57:08:00 SRC=10.157.123.220 DST=10.1.0.1 LEN=60 TOS=0x00 PREC=0x00 TTL=128 ID=20335 PROTO=ICMP TYPE=8 CODE=0 ID=1 SEQ=1
+0 6 PVEFW-HOST-IN 30/Aug/2025:20:33:09 +0200 policy DROP: IN=vmbr0 PHYSIN=enp3s0 MAC=87:ea:ab:02:15:a5:1a:e3:a5:13:f2:57:08:00 SRC=10.157.123.220 DST=10.1.0.1 LEN=60 TOS=0x00 PREC=0x00 TTL=128 ID=20402 PROTO=ICMP TYPE=8 CODE=0 ID=1 SEQ=2
~~~
-The typical log line is formatted with the following schema:
+These and any other log lines are formatted with the following schema:
-~~~bash
+~~~sh
VMID LOGID CHAIN TIMESTAMP POLICY: PACKET_DETAILS
~~~
This schema is translated as follows:
-- `VMID`: identifies the virtual machine on which the firewall rule or policy is acting. For the Proxmox VE host itself this value is always `0`.
+- `VMID`\
+ Identifies the virtual machine on which the firewall rule or policy is acting. For the Proxmox VE host itself this value is always `0`.
-- `LOGID`: tells the logging level this log line has. Proxmox VE firewall has eight levels, check them out [in its official wiki](https://pve.proxmox.com/wiki/Firewall#_logging_of_firewall_rules).
+- `LOGID`\
+ Tells the logging level this log line has. Proxmox VE firewall has eight levels, check them out [in its official wiki](https://pve.proxmox.com/wiki/Firewall#_logging_of_firewall_rules).
-- `CHAIN`: the firewall rule chain's name in which the policy or rule that provoked the log line is set. For cases in which there's no related chain, a `-` is printed here.
+- `CHAIN`\
+ The firewall rule chain's name in which the policy or rule that provoked the log line is set. For cases in which there is no related chain, a `-` is printed here.
-- `TIMESTAMP`: a full timestamp that includes the UTC.
+- `TIMESTAMP`\
+ A full timestamp that includes the UTC.
-- `POLICY`: indicates what policy was applied in the rule that printed the log line.
+- `POLICY`\
+ Indicates what policy was applied in the rule that printed the log line.
-- `PACKET_DETAILS`: details from the affected packet, like source and destination IPs, protocol used, etc.
+- `PACKET_DETAILS`\
+ Details from the affected packet, like source and destination IPs, protocol used, etc.
-Knowing all that above, let's translate the previous logging example.
+Knowing all that above, let's translate the previous logging example:
- All the lines are relative to the Proxmox VE host, since all of them start with the `VMID` set as `0`.
-- The first four lines are **notices** just warning about the firewall logger activity:
- - Their `LOGID` is `5`, which corresponds to the **notice** logging level.
- - These log lines don't correspond to any rule chain of the firewall, so their `CHAIN` is just a `-`.
- - The `TIMESTAMP` is a full date plus the UTC reference as expected.
- - The rest of their content is just a string in which the POLICY part is just empty and the PACKET_DETAILS is used to print the notice's message.
-
-- The following two lines **inform** of two packets that have been dropped by the firewall.
- - Their `LOGID` is `6`, which corresponds to the **info** logging level.
- - The rule that has printed this log is set in the `PVEFW-HOST-IN` chain.
- - The `TIMESTAMP` is like in the previous notice log lines.
- - The `POLICY` string indicates the `DROP` action taken with the packets.
- - The `PACKET_DETAILS` is a list of details from the dropped packets.
- - `IN`: refers to the input network device through which the packet came, in this case the bridge `vmbr0`.
- - `PHYSIN`: is the _physical_ input network device through which the packet came. Here is the ethernet network card `enp2s0` of this host, which is the one used by the bridge `vmbr0` as networking port.
- - `MAC`: this is a rather odd value because is not just the mac of a particular network device, but the concatenation of the packet's source mac (`ae:b7:cd:26:7f:d2`) with the destination mac (`54:5e:96:4b:2c:cf`) and an extra suffix `08:00`.
- - `SRC`: IP of this packet's source.
- - `DST`: IP of this packet's destination.
- - `LEN`: packet's total length in bytes.
- - `TOS`: [Type Of Service](https://linuxreviews.org/Type_of_Service_(ToS)_and_DSCP_Values), meant for prioritization of packets within a network.
- - `PREC`: the precedence bits of the previous `TOS` field.
- - `TTL`: Time To Live, which is the number of hops this packet can make before being discarded.
- - `ID`: packet's datagram identifier.
- - `PROTO`: message protocol used in the packet, [ICMP](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol) in these two cases.
- - `TYPE`: is the ICMP type of [control message](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages). Here is `8` which is the echo request, used to ping.
- - `CODE`: parameter used in ICMP messages, some types have several codes but the 8 only has the `0` one.
- - `ID`: this second identifier is also an ICMP field.
- - `SEQ`: a sequence identifier set up too by the ICMP protocol.
-
-So, the conclusion you can get from those log lines is that this Proxmox VE setup is dropping incoming ping packets, something common as a network hardening method. Be aware that, in the `PACKET_DETAILS` section, you should expect seeing very different parameters depending on the protocol used on each logged packet. In the example above you've seen ICMP ping packets, but other times you might see other types of ICMP messages or, more commonly, TCP or UDP packets.
+- The first four lines are **notices** warning about the firewall logger activity:
+
+ - Their `LOGID` is `5`, which corresponds to the **notice** logging level.
+ - These log lines do not correspond to any rule chain of the firewall, so their `CHAIN` is just a `-`.
+ - The `TIMESTAMP` is a full date plus the UTC reference as expected.
+ - The rest of their content is just a string in which the `POLICY` part is just empty and the `PACKET_DETAILS` is used to print the notice's message.
+
+- The following two lines **inform** of two ICMP packets that have been dropped by the firewall:
+
+ - Their `LOGID` is `6`, which corresponds to the **info** logging level.
+ - The rule that has printed this log is set in the `PVEFW-HOST-IN` chain.
+ - The `TIMESTAMP` is like in the previous notice log lines.
+ - The `POLICY` string indicates the `DROP` action taken with the packets.
+ - The `PACKET_DETAILS` is a list of details from the dropped packets.
+
+ - `IN`\
+ Refers to the input network device through which the packet came, in this case the bridge `vmbr0`.
+
+ - `PHYSIN`\
+ Is the _physical_ input network device through which the packet came. Here is the ethernet network card `enp3s0` of this host, which is the one used by the bridge `vmbr0` as networking port.
+
+ - `MAC`\
+ This is a rather odd value. It is not just the MAC of a particular network device, but the concatenation of the packet's source MAC (`87:ea:ab:02:15:a5`) with the destination MAC (`1a:e3:a5:13:f2:57`) and an extra suffix `08:00`.
+
+ - `SRC`\
+ IP of this packet's source.
+
+ - `DST`\
+ IP of this packet's destination.
+
+ - `LEN`\
+ Packet's total length in bytes.
+
+ - `TOS`\
+ [Type Of Service](https://linuxreviews.org/Type_of_Service_(ToS)_and_DSCP_Values), meant for prioritization of packets within a network.
+
+ - `PREC`\
+ The precedence bits of the previous `TOS` field.
+
+ - `TTL`\
+ Time To Live, which is the number of hops this packet can make before being discarded.
+
+ - `ID`\
+ Packet's datagram identifier.
+
+ - `PROTO`\
+ Message protocol used in the packet, [ICMP](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol) in these two cases.
+
+ - `TYPE`\
+ Is the ICMP type of [control message](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages). Here is `8` which is the echo request, used to ping.
+
+ - `CODE`\
+ Parameter used in ICMP messages, some types have several codes but the 8 only has the `0` one.
+
+ - `ID`\
+ This second identifier is also an ICMP field.
+
+ - `SEQ`\
+ A sequence identifier set up too by the ICMP protocol.
+
+The conclusion you can get from the example log lines is that this Proxmox VE setup is dropping incoming ping packets, something common as a network hardening method. Be aware that, in the `PACKET_DETAILS` section, you should expect seeing very different parameters depending on the protocol used on each logged packet. In the example above you've seen ICMP ping packets, but other times you might see other types of ICMP messages or, more commonly, TCP or UDP packets.
## Connection tracking tool
-In this guide and in the [previous G012 one](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md) I've mentioned you a few times the connection tracking system enabled by the firewall. This is popularly known as _conntrack_ and not only has its own particular `sysctl` parameters (all those with the `nf_conntrack_` prefix) but there's also a command meant for system administrators that allows them manage the in-kernel connection tracking state tables.
+In this chapter and in the [previous **G012** one](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md) I've mentioned a few times the connection tracking system enabled by the firewall. This is popularly known as _conntrack_ and, adding to its own particular `sysctl` parameters (identified by the `nf_conntrack_` prefix), there is also a command enabling system administrators to manage the in-kernel connection tracking state tables:
-1. Install the `conntrack` command with `apt`.
+1. The `conntrack` command is already installed in Proxmox VE 9.0, as `apt` warns:
- ~~~bash
+ ~~~sh
$ sudo apt install conntrack
+ conntrack is already the newest version (1:1.4.8-2).
+ Summary:
+ Upgrading: 0, Installing: 0, Removing: 0, Not Upgrading: 17
~~~
-2. Test the command by showing the currently tracked connections.
+2. Test the command by showing the currently tracked connections. Pipe it to the `less` command to be able to paginate through its long output:
- ~~~bash
- $ sudo conntrack -L
+ ~~~sh
+ $ sudo conntrack -L | less
~~~
- The `-L` option makes `conntrack` list the currently "established" or "expected" connections in your system.
-
- ~~~bash
- tcp 6 82 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39406 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39406 [ASSURED] mark=0 use=1
- tcp 6 85 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39410 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39410 [ASSURED] mark=0 use=1
- tcp 6 62 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39394 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39394 [ASSURED] mark=0 use=1
- tcp 6 95 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39414 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39414 [ASSURED] mark=0 use=1
- tcp 6 18 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39374 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39374 [ASSURED] mark=0 use=1
- tcp 6 51 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39390 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39390 [ASSURED] mark=0 use=1
- tcp 6 68 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39398 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39398 [ASSURED] mark=0 use=1
- tcp 6 60 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39388 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39388 [ASSURED] mark=0 use=1
- tcp 6 89 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39412 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39412 [ASSURED] mark=0 use=1
- tcp 6 98 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39416 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39416 [ASSURED] mark=0 use=1
- tcp 6 20 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39378 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39378 [ASSURED] mark=0 use=1
- tcp 6 55 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39392 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39392 [ASSURED] mark=0 use=1
- tcp 6 5 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39370 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39370 [ASSURED] mark=0 use=1
- tcp 6 105 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39420 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39420 [ASSURED] mark=0 use=1
- tcp 6 108 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39422 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39422 [ASSURED] mark=0 use=1
- tcp 6 25 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39380 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39380 [ASSURED] mark=0 use=1
- tcp 6 73 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39402 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39402 [ASSURED] mark=0 use=1
- tcp 6 4 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39366 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39366 [ASSURED] mark=0 use=2
- tcp 6 15 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39376 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39376 [ASSURED] mark=0 use=1
- tcp 6 87 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39408 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39408 [ASSURED] mark=0 use=1
- tcp 6 7874 ESTABLISHED src=127.0.0.1 dst=127.0.0.1 sport=39426 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39426 [ASSURED] mark=0 use=1
- tcp 6 69 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39400 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39400 [ASSURED] mark=0 use=1
- tcp 6 78 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39404 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39404 [ASSURED] mark=0 use=1
- tcp 6 7874 ESTABLISHED src=192.168.1.2 dst=192.168.1.107 sport=26945 dport=22 src=192.168.1.107 dst=192.168.1.2 sport=22 dport=26945 [ASSURED] mark=0 use=1
- tcp 6 7873 ESTABLISHED src=192.168.1.2 dst=192.168.1.107 sport=11140 dport=8006 src=192.168.1.107 dst=192.168.1.2 sport=8006 dport=11140 [ASSURED] mark=0 use=1
- tcp 6 42 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39386 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39386 [ASSURED] mark=0 use=1
- tcp 6 74 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39396 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39396 [ASSURED] mark=0 use=1
- tcp 6 7873 ESTABLISHED src=127.0.0.1 dst=127.0.0.1 sport=39428 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39428 [ASSURED] mark=0 use=1
- tcp 6 39 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39384 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39384 [ASSURED] mark=0 use=1
- tcp 6 113 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39424 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39424 [ASSURED] mark=0 use=1
- tcp 6 6 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39372 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39372 [ASSURED] mark=0 use=1
- tcp 6 27 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39382 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39382 [ASSURED] mark=0 use=1
- unknown 2 574 src=192.168.1.1 dst=224.0.0.1 [UNREPLIED] src=224.0.0.1 dst=192.168.1.1 mark=0 use=1
- tcp 6 102 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39418 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39418 [ASSURED] mark=0 use=1
- tcp 6 7870 ESTABLISHED src=127.0.0.1 dst=127.0.0.1 sport=53236 dport=3493 src=127.0.0.1 dst=127.0.0.1 sport=3493 dport=53236 [ASSURED] mark=0 use=1
- tcp 6 7874 ESTABLISHED src=192.168.1.2 dst=192.168.1.107 sport=11137 dport=8006 src=192.168.1.107 dst=192.168.1.2 sport=8006 dport=11137 [ASSURED] mark=0 use=1
- unknown 2 578 src=192.168.1.107 dst=224.0.0.106 [UNREPLIED] src=224.0.0.106 dst=192.168.1.107 mark=0 use=1
- conntrack v1.4.6 (conntrack-tools): 37 flow entries have been shown.
+ The `-L` option makes `conntrack` list the currently "established" or "expected" connections in your system:
+
+ ~~~sh
+ tcp 6 103 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58546 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58546 [ASSURED] mark=0 use=1
+ tcp 6 58 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61942 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61942 [ASSURED] mark=0 use=1
+ tcp 6 69 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61952 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61952 [ASSURED] mark=0 use=1
+ tcp 6 79 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58110 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58110 [ASSURED] mark=0 use=1
+ tcp 6 62 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61945 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61945 [ASSURED] mark=0 use=1
+ tcp 6 92 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35924 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35924 [ASSURED] mark=0 use=1
+ tcp 6 99 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61984 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61984 [ASSURED] mark=0 use=1
+ tcp 6 29 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56546 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56546 [ASSURED] mark=0 use=1
+ tcp 6 49 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61931 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61931 [ASSURED] mark=0 use=1
+ tcp 6 38 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32788 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32788 [ASSURED] mark=0 use=1
+ tcp 6 20 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45300 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45300 [ASSURED] mark=0 use=1
+ tcp 6 13 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46314 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46314 [ASSURED] mark=0 use=1
+ tcp 6 113 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37396 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37396 [ASSURED] mark=0 use=1
+ tcp 6 5 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57050 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57050 [ASSURED] mark=0 use=1
+ tcp 6 46 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61930 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61930 [ASSURED] mark=0 use=1
+ tcp 6 22 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61903 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61903 [ASSURED] mark=0 use=1
+ tcp 6 16 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46346 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46346 [ASSURED] mark=0 use=1
+ tcp 6 79 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61964 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61964 [ASSURED] mark=0 use=2
+ tcp 6 102 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58544 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58544 [ASSURED] mark=0 use=1
+ tcp 6 68 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36384 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36384 [ASSURED] mark=0 use=1
+ tcp 6 58 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=41962 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=41962 [ASSURED] mark=0 use=1
+ tcp 6 50 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61936 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61936 [ASSURED] mark=0 use=1
+ tcp 6 66 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61949 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61949 [ASSURED] mark=0 use=1
+ tcp 6 99 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58522 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58522 [ASSURED] mark=0 use=1
+ tcp 6 24 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61905 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61905 [ASSURED] mark=0 use=1
+ tcp 6 39 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32804 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32804 [ASSURED] mark=0 use=1
+ tcp 6 104 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61988 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61988 [ASSURED] mark=0 use=1
+ tcp 6 85 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58158 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58158 [ASSURED] mark=0 use=1
+ tcp 6 17 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61898 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61898 [ASSURED] mark=0 use=1
+ tcp 6 3 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61882 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61882 [ASSURED] mark=0 use=1
+ tcp 6 28 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61910 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61910 [ASSURED] mark=0 use=1
+ tcp 6 106 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61990 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61990 [ASSURED] mark=0 use=1
+ tcp 6 49 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39872 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39872 [ASSURED] mark=0 use=1
+ tcp 6 42 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32834 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32834 [ASSURED] mark=0 use=1
+ tcp 6 54 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39906 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39906 [ASSURED] mark=0 use=1
+ tcp 6 97 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61981 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61981 [ASSURED] mark=0 use=1
+ tcp 6 76 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36462 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36462 [ASSURED] mark=0 use=2
+ tcp 6 114 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37400 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37400 [ASSURED] mark=0 use=1
+ tcp 6 93 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61976 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61976 [ASSURED] mark=0 use=1
+ tcp 6 37 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61921 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61921 [ASSURED] mark=0 use=1
+ tcp 6 102 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61983 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61983 [ASSURED] mark=0 use=1
+ tcp 6 24 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45356 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45356 [ASSURED] mark=0 use=1
+ tcp 6 26 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61907 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61907 [ASSURED] mark=0 use=1
+ tcp 6 36 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61920 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61920 [ASSURED] mark=0 use=1
+ tcp 6 107 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61991 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61991 [ASSURED] mark=0 use=1
+ tcp 6 41 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32824 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32824 [ASSURED] mark=0 use=1
+ tcp 6 55 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39912 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39912 [ASSURED] mark=0 use=1
+ tcp 6 8 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46280 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46280 [ASSURED] mark=0 use=1
+ tcp 6 38 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61922 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61922 [ASSURED] mark=0 use=1
+ tcp 6 44 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32852 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32852 [ASSURED] mark=0 use=1
+ tcp 6 1 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57024 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57024 [ASSURED] mark=0 use=1
+ tcp 6 117 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37424 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37424 [ASSURED] mark=0 use=1
+ tcp 6 60 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=41990 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=41990 [ASSURED] mark=0 use=1
+ tcp 6 19 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61900 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61900 [ASSURED] mark=0 use=1
+ tcp 6 100 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58526 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58526 [ASSURED] mark=0 use=1
+ tcp 6 98 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35992 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35992 [ASSURED] mark=0 use=2
+ tcp 6 95 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61979 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61979 [ASSURED] mark=0 use=1
+ tcp 6 83 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58156 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58156 [ASSURED] mark=0 use=1
+ tcp 6 57 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61943 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61943 [ASSURED] mark=0 use=1
+ tcp 6 109 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37360 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37360 [ASSURED] mark=0 use=1
+ tcp 6 37 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56602 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56602 [ASSURED] mark=0 use=1
+ tcp 6 10 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61890 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61890 [ASSURED] mark=0 use=1
+ tcp 6 89 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61973 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61973 [ASSURED] mark=0 use=1
+ tcp 6 5 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61885 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61885 [ASSURED] mark=0 use=1
+ tcp 6 108 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37348 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37348 [ASSURED] mark=0 use=1
+ tcp 6 17 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46348 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46348 [ASSURED] mark=0 use=1
+ tcp 6 2 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57030 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57030 [ASSURED] mark=0 use=1
+ tcp 6 91 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35910 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35910 [ASSURED] mark=0 use=1
+ tcp 6 7 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57060 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57060 [ASSURED] mark=0 use=1
+ tcp 6 90 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61974 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61974 [ASSURED] mark=0 use=1
+ tcp 6 94 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35942 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35942 [ASSURED] mark=0 use=1
+ tcp 6 96 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35968 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35968 [ASSURED] mark=0 use=1
+ tcp 6 66 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=42030 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=42030 [ASSURED] mark=0 use=1
+ tcp 6 80 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61962 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61962 [ASSURED] mark=0 use=1
+ tcp 6 98 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61982 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61982 [ASSURED] mark=0 use=2
+ tcp 6 18 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45268 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45268 [ASSURED] mark=0 use=1
+ tcp 6 40 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61924 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61924 [ASSURED] mark=0 use=1
+ tcp 6 14 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61895 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61895 [ASSURED] mark=0 use=1
+ tcp 6 52 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61934 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61934 [ASSURED] mark=0 use=1
+ tcp 6 101 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61986 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61986 [ASSURED] mark=0 use=1
+ tcp 6 61 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=41996 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=41996 [ASSURED] mark=0 use=1
+ tcp 6 87 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61971 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61971 [ASSURED] mark=0 use=1
+ tcp 6 4 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61884 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61884 [ASSURED] mark=0 use=1
+ tcp 6 25 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61906 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61906 [ASSURED] mark=0 use=1
+ tcp 6 109 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61993 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61993 [ASSURED] mark=0 use=1
+ tcp 6 9 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46286 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46286 [ASSURED] mark=0 use=1
+ tcp 6 74 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61957 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61957 [ASSURED] mark=0 use=1
+ tcp 6 114 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=62002 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=62002 [ASSURED] mark=0 use=1
+ tcp 6 51 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61935 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61935 [ASSURED] mark=0 use=1
+ tcp 6 45 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32854 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32854 [ASSURED] mark=0 use=1
+ unknown 2 591 src=0.0.0.0 dst=224.0.0.1 [UNREPLIED] src=224.0.0.1 dst=0.0.0.0 mark=0 use=1
+ tcp 6 59 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=41976 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=41976 [ASSURED] mark=0 use=1
+ tcp 6 67 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61950 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61950 [ASSURED] mark=0 use=1
+ tcp 6 11 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46294 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46294 [ASSURED] mark=0 use=2
+ tcp 6 15 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61897 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61897 [ASSURED] mark=0 use=1
+ tcp 6 111 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37372 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37372 [ASSURED] mark=0 use=1
+ tcp 6 81 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58134 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58134 [ASSURED] mark=0 use=1
+ tcp 6 77 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61961 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61961 [ASSURED] mark=0 use=1
+ tcp 6 115 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61997 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61997 [ASSURED] mark=0 use=1
+ tcp 6 56 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39916 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39916 [ASSURED] mark=0 use=1
+ tcp 6 67 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=42032 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=42032 [ASSURED] mark=0 use=1
+ tcp 6 32 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61916 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61916 [ASSURED] mark=0 use=1
+ tcp 6 27 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45370 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45370 [ASSURED] mark=0 use=1
+ tcp 6 4 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57040 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57040 [ASSURED] mark=0 use=1
+ tcp 6 63 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61948 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61948 [ASSURED] mark=0 use=1
+ tcp 6 116 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=62003 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=62003 [ASSURED] mark=0 use=1
+ tcp 6 34 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56580 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56580 [ASSURED] mark=0 use=1
+ tcp 6 39 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61923 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61923 [ASSURED] mark=0 use=1
+ tcp 6 116 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37420 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37420 [ASSURED] mark=0 use=1
+ tcp 6 31 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56560 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56560 [ASSURED] mark=0 use=1
+ tcp 6 119 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=34464 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=34464 [ASSURED] mark=0 use=1
+ tcp 6 20 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61901 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61901 [ASSURED] mark=0 use=1
+ tcp 6 42 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61927 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61927 [ASSURED] mark=0 use=2
+ tcp 6 71 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61953 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61953 [ASSURED] mark=0 use=1
+ tcp 6 55 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61939 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61939 [ASSURED] mark=0 use=1
+ tcp 6 62 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=42010 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=42010 [ASSURED] mark=0 use=1
+ tcp 6 106 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58584 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58584 [ASSURED] mark=0 use=1
+ tcp 6 9 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61889 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61889 [ASSURED] mark=0 use=1
+ tcp 6 54 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61938 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61938 [ASSURED] mark=0 use=1
+ tcp 6 44 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61929 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61929 [ASSURED] mark=0 use=1
+ tcp 6 89 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35884 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35884 [ASSURED] mark=0 use=1
+ tcp 6 53 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39902 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39902 [ASSURED] mark=0 use=1
+ tcp 6 23 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45340 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45340 [ASSURED] mark=0 use=1
+ tcp 6 88 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58196 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58196 [ASSURED] mark=0 use=1
+ tcp 6 92 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61978 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61978 [ASSURED] mark=0 use=1
+ tcp 6 43 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32842 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32842 [ASSURED] mark=0 use=1
+ tcp 6 107 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58588 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58588 [ASSURED] mark=0 use=1
+ tcp 6 22 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45330 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45330 [ASSURED] mark=0 use=1
+ tcp 6 47 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32870 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32870 [ASSURED] mark=0 use=1
+ tcp 6 50 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39878 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39878 [ASSURED] mark=0 use=1
+ tcp 6 43 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61928 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61928 [ASSURED] mark=0 use=1
+ tcp 6 10 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46290 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46290 [ASSURED] mark=0 use=2
+ tcp 6 25 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45360 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45360 [ASSURED] mark=0 use=1
+ tcp 6 6 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61886 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61886 [ASSURED] mark=0 use=1
+ tcp 6 2 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61881 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61881 [ASSURED] mark=0 use=1
+ tcp 6 1 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61879 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61879 [ASSURED] mark=0 use=1
+ tcp 6 30 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61915 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61915 [ASSURED] mark=0 use=1
+ tcp 6 30 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56548 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56548 [ASSURED] mark=0 use=1
+ tcp 6 83 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61968 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61968 [ASSURED] mark=0 use=1
+ tcp 6 41 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61925 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61925 [ASSURED] mark=0 use=1
+ tcp 6 87 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58180 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58180 [ASSURED] mark=0 use=1
+ tcp 6 72 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61955 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61955 [ASSURED] mark=0 use=1
+ tcp 6 104 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58558 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58558 [ASSURED] mark=0 use=1
+ tcp 6 117 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=62005 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=62005 [ASSURED] mark=0 use=1
+ tcp 6 73 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36432 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36432 [ASSURED] mark=0 use=1
+ tcp 6 31 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61914 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61914 [ASSURED] mark=0 use=1
+ tcp 6 105 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61989 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61989 [ASSURED] mark=0 use=1
+ tcp 6 48 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61933 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61933 [ASSURED] mark=0 use=1
+ tcp 6 78 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36486 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36486 [ASSURED] mark=0 use=1
+ tcp 6 85 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61970 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61970 [ASSURED] mark=0 use=1
+ tcp 6 27 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61908 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61908 [ASSURED] mark=0 use=2
+ tcp 6 12 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46300 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46300 [ASSURED] mark=0 use=1
+ tcp 6 57 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39926 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39926 [ASSURED] mark=0 use=1
+ tcp 6 113 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61998 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61998 [ASSURED] mark=0 use=1
+ tcp 6 6 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57056 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57056 [ASSURED] mark=0 use=1
+ tcp 6 118 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=34454 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=34454 [ASSURED] mark=0 use=1
+ tcp 6 110 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61994 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61994 [ASSURED] mark=0 use=1
+ tcp 6 77 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36472 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36472 [ASSURED] mark=0 use=1
+ tcp 6 110 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37364 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37364 [ASSURED] mark=0 use=1
+ tcp 6 62 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61946 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61946 [ASSURED] mark=0 use=1
+ tcp 6 7871 ESTABLISHED src=127.0.0.1 dst=127.0.0.1 sport=43472 dport=3493 src=127.0.0.1 dst=127.0.0.1 sport=3493 dport=43472 [ASSURED] mark=0 use=1
+ tcp 6 16 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61896 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61896 [ASSURED] mark=0 use=1
+ tcp 6 56 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61941 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61941 [ASSURED] mark=0 use=1
+ tcp 6 23 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61904 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61904 [ASSURED] mark=0 use=1
+ tcp 6 8 CLOSE src=10.157.123.220 dst=10.1.0.1 sport=62004 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=62004 [ASSURED] mark=0 use=1
+ tcp 6 13 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61894 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61894 [ASSURED] mark=0 use=1
+ tcp 6 71 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36406 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36406 [ASSURED] mark=0 use=1
+ tcp 6 76 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61960 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61960 [ASSURED] mark=0 use=1
+ tcp 6 101 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58532 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58532 [ASSURED] mark=0 use=1
+ tcp 6 68 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61951 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61951 [ASSURED] mark=0 use=2
+ tcp 6 36 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56592 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56592 [ASSURED] mark=0 use=1
+ tcp 6 21 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61902 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61902 [ASSURED] mark=0 use=1
+ tcp 6 66 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61947 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61947 [ASSURED] mark=0 use=1
+ tcp 6 100 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61985 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61985 [ASSURED] mark=0 use=1
+ tcp 6 19 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45284 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45284 [ASSURED] mark=0 use=1
+ tcp 6 29 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61913 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61913 [ASSURED] mark=0 use=1
+ tcp 6 115 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37416 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37416 [ASSURED] mark=0 use=1
+ tcp 6 32 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56566 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56566 [ASSURED] mark=0 use=1
+ tcp 6 78 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61963 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61963 [ASSURED] mark=0 use=1
+ tcp 6 95 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35954 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35954 [ASSURED] mark=0 use=1
+ tcp 6 111 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61995 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61995 [ASSURED] mark=0 use=1
+ tcp 6 94 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61977 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61977 [ASSURED] mark=0 use=1
+ tcp 6 11 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61892 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61892 [ASSURED] mark=0 use=1
+ tcp 6 33 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56578 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56578 [ASSURED] mark=0 use=1
+ tcp 6 86 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61969 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61969 [ASSURED] mark=0 use=1
+ tcp 6 112 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=37384 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=37384 [ASSURED] mark=0 use=1
+ tcp 6 82 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61967 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61967 [ASSURED] mark=0 use=1
+ tcp 6 74 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36438 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36438 [ASSURED] mark=0 use=1
+ tcp 6 300 ESTABLISHED src=10.157.123.220 dst=10.1.0.1 sport=60507 dport=22 src=10.1.0.1 dst=10.157.123.220 sport=22 dport=60507 [ASSURED] mark=0 use=2
+ tcp 6 65 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=42016 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=42016 [ASSURED] mark=0 use=1
+ tcp 6 8 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61888 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61888 [ASSURED] mark=0 use=1
+ tcp 6 7 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61887 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61887 [ASSURED] mark=0 use=1
+ tcp 6 3 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57038 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57038 [ASSURED] mark=0 use=1
+ tcp 6 96 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61980 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61980 [ASSURED] mark=0 use=1
+ tcp 6 15 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46332 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46332 [ASSURED] mark=0 use=1
+ tcp 6 82 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58144 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58144 [ASSURED] mark=0 use=1
+ tcp 6 51 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39886 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39886 [ASSURED] mark=0 use=1
+ tcp 6 93 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35936 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35936 [ASSURED] mark=0 use=1
+ tcp 6 60 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61944 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61944 [ASSURED] mark=0 use=1
+ tcp 6 97 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35982 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35982 [ASSURED] mark=0 use=1
+ tcp 6 35 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61919 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61919 [ASSURED] mark=0 use=1
+ tcp 6 59 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61940 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61940 [ASSURED] mark=0 use=1
+ tcp 6 0 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61878 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61878 [ASSURED] mark=0 use=1
+ tcp 6 88 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61972 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61972 [ASSURED] mark=0 use=1
+ tcp 6 53 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61937 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61937 [ASSURED] mark=0 use=1
+ tcp 6 105 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58568 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58568 [ASSURED] mark=0 use=1
+ tcp 6 90 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=35898 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=35898 [ASSURED] mark=0 use=1
+ tcp 6 69 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36396 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36396 [ASSURED] mark=0 use=2
+ tcp 6 0 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=57016 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=57016 [ASSURED] mark=0 use=1
+ tcp 6 63 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=42014 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=42014 [ASSURED] mark=0 use=1
+ tcp 6 119 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=62006 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=62006 [ASSURED] mark=0 use=1
+ tcp 6 33 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61918 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61918 [ASSURED] mark=0 use=1
+ tcp 6 81 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61966 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61966 [ASSURED] mark=0 use=1
+ tcp 6 108 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61992 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61992 [ASSURED] mark=0 use=1
+ tcp 6 12 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61891 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61891 [ASSURED] mark=0 use=1
+ tcp 6 91 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61975 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61975 [ASSURED] mark=0 use=1
+ tcp 6 26 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45362 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45362 [ASSURED] mark=0 use=1
+ tcp 6 75 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61958 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61958 [ASSURED] mark=0 use=1
+ tcp 6 21 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=45314 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=45314 [ASSURED] mark=0 use=1
+ tcp 6 80 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58122 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58122 [ASSURED] mark=0 use=1
+ tcp 6 52 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39900 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39900 [ASSURED] mark=0 use=1
+ tcp 6 34 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61917 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61917 [ASSURED] mark=0 use=1
+ tcp 6 40 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=32820 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=32820 [ASSURED] mark=0 use=1
+ tcp 6 28 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56544 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56544 [ASSURED] mark=0 use=1
+ tcp 6 18 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61899 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61899 [ASSURED] mark=0 use=1
+ tcp 6 48 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=39866 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=39866 [ASSURED] mark=0 use=1
+ tcp 6 47 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61932 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61932 [ASSURED] mark=0 use=2
+ tcp 6 103 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61987 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61987 [ASSURED] mark=0 use=1
+ tcp 6 72 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36416 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36416 [ASSURED] mark=0 use=1
+ tcp 6 112 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61996 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61996 [ASSURED] mark=0 use=1
+ tcp 6 75 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=36454 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=36454 [ASSURED] mark=0 use=1
+ tcp 6 73 TIME_WAIT src=10.157.123.220 dst=10.1.0.1 sport=61954 dport=8006 src=10.1.0.1 dst=10.157.123.220 sport=8006 dport=61954 [ASSURED] mark=0 use=1
+ tcp 6 35 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=56584 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=56584 [ASSURED] mark=0 use=1
+ tcp 6 86 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=58172 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=58172 [ASSURED] mark=0 use=1
+ tcp 6 14 TIME_WAIT src=127.0.0.1 dst=127.0.0.1 sport=46322 dport=85 src=127.0.0.1 dst=127.0.0.1 sport=85 dport=46322 [ASSURED] mark=0 use=1
+ conntrack v1.4.8 (conntrack-tools): 235 flow entries have been shown.
~~~
-Know that the connection tracking subsystem uses four different internal tables: _conntrack_ (the default one for active connections), _expect_, _dying_ and _unconfirmed_. Check [the `conntrack` command's `man` page](https://manpages.debian.org/bullseye/conntrack/conntrack.8.en.html) to know more.
+Know that the connection tracking subsystem uses four different internal tables: _conntrack_ (the default one for active connections), _expect_, _dying_ and _unconfirmed_. Check [the `conntrack` command's `man` page](https://manpages.debian.org/trixie/conntrack/conntrack.8.en.html) to know more.
## Relevant system paths
-### _Directories_
+### Directories
+- `/etc/ebtables/`
- `/etc/pve/firewall`
-- `/etc/pve/nodes/`
+- `/etc/pve/nodes//`
+- `/etc/pve/sdn/firewall/`
+- `/usr/share/netfilter-persistent/`
+- `/usr/share/netfilter-persistent/plugins.d/`
+- `/var/log/`
-### _Files_
+### Files
+- `/etc/ebtables/rules.broute`
+- `/etc/ebtables/rules.filter`
+- `/etc/ebtables/rules.nat`
- `/etc/pve/firewall/.fw`
- `/etc/pve/firewall/cluster.fw`
- `/etc/pve/nodes//host.fw`
+- `/etc/pve/sdn/firewall/.fw`
+- `/usr/share/netfilter-persistent/plugins.d/35-ebtables`
+- `/var/log/pve-firewall.log`
## References
-### _Proxmox VE firewall_
+### [Proxmox VE](https://pve.proxmox.com/)
- [Proxmox VE admin guide. Firewall](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pve_firewall)
+ - [Directions & Zones. Zones](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#_directions_amp_zones)
+ - [nftables](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pve_firewall_nft)
- [Proxmox VE wiki. Firewall](https://pve.proxmox.com/wiki/Firewall)
+- [nftables HOWTO documentation](https://wiki.nftables.org/wiki-nftables/index.php/Main_Page)
+- [Why Configure the Firewall on Proxmox?. Defaults. Node level](https://homelab.casaursus.net/new_install-firewall/#node-level)
- [Postinstall Configuration of Proxmox VE 6.2](https://lowendspirit.com/postinstall-configuration-of-proxmox-ve-6-2)
- [Secure Proxmox Install – Sudo, Firewall with IPv6, and more – How to Configure from Start to Finish](https://www.kiloroot.com/secure-proxmox-install-sudo-firewall-with-ipv6-and-more-how-to-configure-from-start-to-finish/)
- [Hardening Proxmox VE management interface with 2FA, reverse proxy and Let's Encrypt](https://loicpefferkorn.net/2020/11/hardening-proxmox-ve-management-interface-with-2fa-reverse-proxy-and-lets-encrypt/)
+- [Proxmox Port Forwarding To VM | An Easy Way](https://bobcares.com/blog/proxmox-port-forwarding-to-vm/)
-### _Ethernet Bridge firewall `ebtables`_
+### Ethernet Bridge firewall ebtables
- [ebtables netfilter](https://ebtables.netfilter.org/)
- [Proxmox dropping packets](https://www.reddit.com/r/homelab/comments/inqncm/proxmox_dropping_packets/)
- [Tracking down dropped packets](https://blog.hambier.lu/post/tracking-dropped-packets)
- [iptables-persistent: also persist ebtables and arptables?](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=697088)
-### _Network auditing on Linux_
+### Network auditing on Linux
- [How to show dropped packets per interface on Linux](https://www.cyberciti.biz/faq/linux-show-dropped-packets-per-interface-command/)
- [How to capture "dropped packets" in tcpdump](https://superuser.com/questions/1208783/how-to-capture-dropped-packets-in-tcpdump)
- [Dropped packets in all Linux and Unix](https://serverfault.com/questions/780195/dropped-packets-in-all-linux-and-unix)
- [Lots of dropped packages when tcpdumping on busy interface](https://serverfault.com/questions/421789/lots-of-dropped-packages-when-tcpdumping-on-busy-interface)
-### _Network security concepts_
+### Network security concepts
-- [Linux TCP/IP Tuning for Scalability](https://developer.akamai.com/blog/2012/09/27/linux-tcpip-tuning-scalability)
- [What Is a Smurf Attack?](https://www.fortinet.com/resources/cyberglossary/smurf-attack)
- [Smurf attack](https://en.wikipedia.org/wiki/Smurf_attack)
- [Tcpdump: Filter Packets with Tcp Flags](https://www.howtouselinux.com/post/tcpdump-capture-packets-with-tcp-flags)
@@ -745,19 +1204,18 @@ Know that the connection tracking subsystem uses four different internal tables:
- [Address Resolution Protocol (ARP)](https://en.wikipedia.org/wiki/Address_Resolution_Protocol)
- [Netfilter Conntrack Sysfs variables](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html)
- [TCP SYN Flood](https://www.imperva.com/learn/ddos/syn-flood/)
-- [Firewall Log Messages What Do They Mean](https://www.halolinux.us/firewalls/firewall-log-messages-what-do-they-mean.html)
-### _Networking concepts_
+### Networking concepts
- [Internet Control Message Protocol](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol)
- [What is the Internet Control Message Protocol (ICMP)?](https://www.cloudflare.com/learning/ddos/glossary/internet-control-message-protocol-icmp/)
-- [TCP/IP packets](https://inc0x0.com/tcp-ip-packets-introduction/)
+- [TCP/IP packets – Introduction](https://inc0x0.com/tcp-ip-packets-introduction/)
- [Type of Service (ToS) and DSCP Values](https://linuxreviews.org/Type_of_Service_(ToS)_and_DSCP_Values)
-### _`conntrack` command_
+### conntrack command
-- [CONNTRACK(8)](https://manpages.debian.org/bullseye/conntrack/conntrack.8.en.html)
-- [Package: conntrack (1:1.4.6-2)](https://packages.debian.org/bullseye/conntrack)
+- [CONNTRACK(8)](https://manpages.debian.org/trixie/conntrack/conntrack.8.en.html)
+- [Package: conntrack (1:1.4.8-2 and others)](https://packages.debian.org/trixie/conntrack)
- [Matching connection tracking stateful metainformation](https://wiki.nftables.org/wiki-nftables/index.php/Matching_connection_tracking_stateful_metainformation)
## Navigation
diff --git a/G015 - Host optimization 01 ~ Adjustments through sysctl.md b/G015 - Host optimization 01 ~ Adjustments through sysctl.md
index d6e11df..37dc4f5 100644
--- a/G015 - Host optimization 01 ~ Adjustments through sysctl.md
+++ b/G015 - Host optimization 01 ~ Adjustments through sysctl.md
@@ -1,27 +1,53 @@
# G015 - Host optimization 01 ~ Adjustments through `sysctl`
-You can get performance improvements in your system just by setting some parameters in `sysctl` configuration files. Remember that you did something like this for hardening the TCP/IP stack in the [**G012** guide](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md).
-
-The changes explained in the following sections are focused on improving the performance of your system on different concerns. For the sake of clarity, each concern will have its own `sysctl` file with their own particular parameter set. This is meant to avoid the problem of having the same parameter defined twice on different configuration files, and worrying about in which order are being read (`sysctl` only keeps the last value read for each parameter).
-
-> **BEWARE!**
-> You should revise and adjust the values set in the following sections to suit your own system setup and presumed load.
-
-In this guide, you're going to create a bunch of sysctl configuration files that all have to be placed in the `/etc/sysctl.d`. So, `cd` to that path and start working on the sections below.
-
-~~~bash
+- [Tune your Proxmox VE system's `sysctl` files to improve performance](#tune-your-proxmox-ve-systems-sysctl-files-to-improve-performance)
+- [First go the `sysctl` directory](#first-go-the-sysctl-directory)
+- [Network optimizations](#network-optimizations)
+- [Memory optimizations](#memory-optimizations)
+- [Kernel optimizations](#kernel-optimizations)
+- [Reboot the system](#reboot-the-system)
+- [Final considerations](#final-considerations)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [`sysctl` variables](#sysctl-variables)
+ - [About `sysctl` in general](#about-sysctl-in-general)
+ - [About network optimizations](#about-network-optimizations)
+ - [About memory optimizations](#about-memory-optimizations)
+ - [Inotify system](#inotify-system)
+ - [About optimizing the kernel](#about-optimizing-the-kernel)
+- [Navigation](#navigation)
+
+## Tune your Proxmox VE system's `sysctl` files to improve performance
+
+You can get performance improvements in your Proxmox VE system just by setting some parameters in `sysctl` configuration files. Remember that you did something like this for hardening the TCP/IP stack back in the [**G012** chapter](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md).
+
+The changes explained in the following sections are focused on improving the performance of your system on different concerns. For the sake of clarity, each concern will have its own `sysctl` file with their own particular parameter set. This is to avoid two problems: having the same parameter defined twice on different configuration files, and worrying about the order in which the parameters are being read (`sysctl` only keeps the last value read for each parameter).
+
+> [!IMPORTANT]
+> **Do not apply this configuration blindly in your PVE system**\
+> Revise and adjust the values set in the following sections to suit your own system setup and presumed load.
+
+## First go the `sysctl` directory
+
+In this chapter you're going to create a bunch of `sysctl` configuration files that all have to be placed in the `/etc/sysctl.d` directory. So, first `cd` to that path:
+
+~~~sh
$ cd /etc/sysctl.d/
~~~
+From this directory, apply the configurations specified in the following sections.
+
## Network optimizations
-1. Create a new empty file called `85_network_optimizations.conf`.
+1. Create a new empty file called `85_network_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_network_optimizations.conf
~~~
-2. Edit the `85_network_optimizations.conf` file and input the following content.
+2. Edit the `85_network_optimizations.conf` file and set the following content:
~~~properties
## NETWORK optimizations
@@ -163,21 +189,21 @@ $ cd /etc/sysctl.d/
net.unix.max_dgram_qlen = 1024
~~~
-3. Save the `85_network_optimizations.conf` file and apply the changes.
+3. Save the `85_network_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_network_optimizations.conf
~~~
## Memory optimizations
-1. Create a new empty file called `85_memory_optimizations.conf`.
+1. Create a new empty file called `85_memory_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_memory_optimizations.conf
~~~
-2. Edit the `85_memory_optimizations.conf` file and input the following content.
+2. Edit the `85_memory_optimizations.conf` file and enter the following content:
~~~properties
## Memory optimizations
@@ -238,21 +264,21 @@ $ cd /etc/sysctl.d/
vm.nr_hugepages = 1
~~~
-3. Save the `85_memory_optimizations.conf` file and apply the changes.
+3. Save the `85_memory_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_memory_optimizations.conf
~~~
## Kernel optimizations
-1. Create a new empty file called `85_kernel_optimizations.conf`.
+1. Create a new empty file called `85_kernel_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_kernel_optimizations.conf
~~~
-2. Edit the `85_kernel_optimizations.conf` file and input the following content.
+2. Edit the `85_kernel_optimizations.conf` file and input the following content:
~~~properties
## Kernel optimizations
@@ -260,17 +286,11 @@ $ cd /etc/sysctl.d/
# Controls whether unprivileged users can load eBPF programs.
# For most scenarios this is recommended to be set as 1 (enabled).
# This is a kernel hardening concern rather than a optimization one, but
- # is left here since its just this value.
+ # is left here since its just this value.
kernel.unprivileged_bpf_disabled=1
# Process Scheduler related settings
#
- # Determines how long a migrated process has to be running before the kernel
- # will consider migrating it again to another core. So, a higher value makes
- # the kernel take longer before migrating again an already migrated process.
- # Value in MILLISECONDS.
- kernel.sched_migration_cost_ns = 5000000
- #
# This setting groups tasks by TTY, to improve perceived responsiveness on an
# interactive system. On a server with a long running forking daemon, this will
# tend to keep child processes from migrating away as soon as they should.
@@ -278,35 +298,35 @@ $ cd /etc/sysctl.d/
kernel.sched_autogroup_enabled = 0
~~~
-3. Save the `85_kernel_optimizations.conf` file and apply the changes.
+3. Save the `85_kernel_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_kernel_optimizations.conf
~~~
## Reboot the system
-Although you've applied the changes with the `sysctl -p` command, it'll be better to restart your server too.
+Although you've applied the changes with the `sysctl -p` command, it'll be better to restart your server too:
-~~~bash
+~~~sh
$ sudo reboot
~~~
-Then, open a new shell as your `mgrsys` user and check the log files (`syslog` in particular) under the `/var/log` directory to look for possible errors or warnings related to your changes.
+Then, open a new shell as your `mgrsys` user and check your system's journal (with the `journalctl` command), and also check the log files under the `/var/log` directory, to look for possible errors or warnings related to your changes.
## Final considerations
-All the values modified in the previous sections have to be measured and tested against the possibilities of your system and the real load it has. So expect to revise this configuration later to fit it better to your needs, and maybe even adjust some other `sysctl` parameters that haven't been shown in this guide.
+All the values modified in the previous sections have to be measured and tested against the possibilities of your system and the real load running on it. So expect to revise this configuration later to make it fit to your needs, and maybe even adjust some other `sysctl` parameters that haven't been shown in this guide.
On the other hand, notice how I avoided touching any `sysctl` configuration files already present in the system (like the ones related to the PVE platform). This guarantees that future updates can change them without complaining about being different as they expected them to be.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/sysctl.d`
-### _Files_
+### Files
- `/etc/sysctl.d/85_kernel_optimizations.conf`
- `/etc/sysctl.d/85_memory_optimizations.conf`
@@ -314,19 +334,19 @@ On the other hand, notice how I avoided touching any `sysctl` configuration file
## References
-### _`sysctl` variables_
+### `sysctl` variables
- [Networking ip variables](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt)
- [Virtual memory variables](https://www.kernel.org/doc/Documentation/sysctl/vm.txt)
- [Summary of hugetlbpage (huge pages) support in the Linux kernel](https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt)
-### _About `sysctl` in general_
+### About `sysctl` in general
- [Most popular speedup sysctl options for Proxmox, corrected for the 5.3.18-3-pve kernel](https://gist.github.com/sergey-dryabzhinsky/bcc1a15cb7d06f3d4606823fcc834824#gistcomment-3297285)
- [Sysctl on Archlinux wiki](https://wiki.archlinux.org/index.php/Sysctl)
-- [Linux Hardening: A 15-Step Checklist For A Secure Linux Server](https://www.pluralsight.com/blog/it-ops/linux-hardening-secure-server-checklist)
+- [Linux Hardening. Secure your Linux Distro in 15 Steps](https://www.pluralsight.com/resources/blog/tech-operations/linux-hardening-secure-server-checklist)
-### _Network optimizations_
+### About network optimizations
- [Netfilter Conntrack Sysfs variables](https://www.kernel.org/doc/html/latest/networking/nf_conntrack-sysctl.html)
- [How long does conntrack remember a connection?](https://unix.stackexchange.com/questions/524295/how-long-does-conntrack-remember-a-connection)
@@ -339,27 +359,26 @@ On the other hand, notice how I avoided touching any `sysctl` configuration file
- [Linux Increase TCP Port Range with net.ipv4.ip_local_port_range Kernel Parameter](https://www.cyberciti.biz/tips/linux-increase-outgoing-network-sockets-range.html)
- [Linux increase ip_local_port_range TCP port range](https://ma.ttias.be/linux-increase-ip_local_port_range-tcp-port-range/)
- [Neighbour Table Overflow – sysctl.Conf Tuning](https://www.serveradminblog.com/2011/02/neighbour-table-overflow-sysctl-conf-tunning/)
-- [tcp_slow_start_after_idle tcp_no_metrics_save performance](https://github.com/ton31337/tools/wiki/tcp_slow_start_after_idle---tcp_no_metrics_save-performance)
- [Overflow in datagram type sockets](https://www.toptip.ca/2013/02/overflow-in-datagram-type-sockets.html)
-### _Memory optimizations_
+### About memory optimizations
- [Understanding vm.swappiness](https://linuxhint.com/understanding_vm_swappiness/)
- [How does vm.overcommit_memory work?](https://serverfault.com/questions/606185/how-does-vm-overcommit-memory-work)
- [Memory Overcommit Settings](https://iainvlinux.wordpress.com/2014/02/16/memory-overcommit-settings/)
- [Reducing inode and dentry caches to keep OOM killer at bay](https://major.io/2008/12/03/reducing-inode-and-dentry-caches-to-keep-oom-killer-at-bay/)
- [Better Linux Disk Caching & Performance with vm.dirty_ratio & vm.dirty_background_ratio](https://lonesysadmin.net/2013/12/22/better-linux-disk-caching-performance-vm-dirty_ratio/)
-- [Hugepages on Debian wiki](https://wiki.debian.org/Hugepages)
+- [Debian. Wiki. Hugepages](https://wiki.debian.org/Hugepages)
- [Linux HugePages](https://www.educba.com/linux-hugepages/)
- [Hugepages and Multiple VMs](https://forum.proxmox.com/threads/hugepages-and-multiple-vms.34075/)
-### _Inotify system_
+### Inotify system
- [What is a reasonable amount of inotify watches with Linux?](https://stackoverflow.com/questions/535768/what-is-a-reasonable-amount-of-inotify-watches-with-linux)
- [Increasing the amount of inotify watchers](https://gist.github.com/ntamvl/7c41acee650d376863fd940b99da836f)
- [Ubuntu Increase Inotify Watcher (File Watch Limit)](https://dev.to/rubiin/ubuntu-increase-inotify-watcher-file-watch-limit-kf4)
-### _Kernel optimizations_
+### About optimizing the kernel
- [Unable to run bpf program as non root](https://stackoverflow.com/questions/65949586/unable-to-run-bpf-program-as-non-root)
- [Disable unprivileged BPF](https://gitlab.tails.boum.org/tails/tails/-/issues/11827)
@@ -367,4 +386,4 @@ On the other hand, notice how I avoided touching any `sysctl` configuration file
## Navigation
-[<< Previous (**G014. Host hardening 08**)](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G016. Host optimization 02**) >>](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md)
+[<< Previous (**G014. Host hardening 08**)](G014%20-%20Host%20hardening%2008%20~%20Firewalling.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G016. Host optimization 02**) >>](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md)
diff --git a/G016 - Host optimization 02 ~ Disabling the transparent hugepages.md b/G016 - Host optimization 02 ~ Disabling the transparent hugepages.md
new file mode 100644
index 0000000..7d23628
--- /dev/null
+++ b/G016 - Host optimization 02 ~ Disabling the transparent hugepages.md
@@ -0,0 +1,119 @@
+# G016 - Host optimization 02 ~ Disabling the transparent hugepages
+
+- [Understanding the transparent hugepages](#understanding-the-transparent-hugepages)
+- [Status of the transparent hugepages in your host](#status-of-the-transparent-hugepages-in-your-host)
+- [Disabling the transparent hugepages](#disabling-the-transparent-hugepages)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [About transparent hugepages](#about-transparent-hugepages)
+ - [About exclusive hugepages](#about-exclusive-hugepages)
+- [Navigation](#navigation)
+
+## Understanding the transparent hugepages
+
+**Transparent hugepages** is a Linux kernel feature intended to improve performance by making more efficient use of your processor’s memory-mapping hardware. This feature gives some applications a small performance boost, but can cause significant latency issues (to database engines for instance) or even apparent memory leaks at worst.
+
+> [!NOTE]
+> **Do not confuse transparent hugepages with explicit hugepages**\
+> While transparent hugepages are dynamically reassigned by the kernel to be reused by any compatible application, explicit hugepages are reserved for specific services and render inaccessible to other workloads in the system.
+
+In this chapter I'll show you how to disable the transparent hugepages since, as far as I've read:
+
+- There is no recommended setup for Proxmox VE about this feature.
+- It may not be worth having it enabled in a small server setup (specially on the RAM side) like the one used in this guide.
+- Can provoke serious latency issues in a system when it kicks in to compact fragmented RAM.
+
+> [!IMPORTANT]
+> **Do not dismiss using transparent hugepages forever!**\
+> Remember that some applications may benefit from this feature, although they have to be built for it specifically.
+>
+> Therefore, research the [references found at the end of this chapter](#references) and carefully evaluate if your system's workload can benefit from having transparent hugepages enabled.
+
+## Status of the transparent hugepages in your host
+
+To check out the current status of the transparent hugepages in your standalone PVE node, log as your `mgrsys` user in a shell and execute:
+
+~~~sh
+$ cat /sys/kernel/mm/transparent_hugepage/enabled
+always [madvise] never
+~~~
+
+The highlighted value is `madvise`, which means that transparent hugepages are enabled but only for applications that request it specifically.
+
+There is other parameter that hints about the usage of transparent hugepages, `AnonHugePages`. To see its current value, execute the following command:
+
+~~~sh
+$ grep AnonHuge /proc/meminfo
+AnonHugePages: 0 kB
+~~~
+
+At this point, no application has made use of transparent hugepages, so the value of `AnonHugePages` is 0 KiB.
+
+## Disabling the transparent hugepages
+
+To switch the status of the transparent hugepages from `madvise` to `never`, you must modify the configuration of your Debian's Grub boot system.
+
+1. Open a shell as `mgrsys`, `cd` to `/etc/default/` and make a backup of the original `grub` file:
+
+ ~~~sh
+ $ cd /etc/default/
+ $ sudo cp grub grub.orig
+ ~~~
+
+2. Edit the `grub` file, modifying the `GRUB_CMDLINE_LINUX=""` line as follows:
+
+ ~~~properties
+ GRUB_CMDLINE_LINUX="transparent_hugepage=never"
+ ~~~
+
+3. Update the grub and reboot the system:
+
+ ~~~sh
+ $ sudo update-grub
+ $ sudo reboot
+ ~~~
+
+4. Log again as `mgrsys` and check the current status of the transparent hugepages:
+
+ ~~~sh
+ $ cat /sys/kernel/mm/transparent_hugepage/enabled
+ always madvise [never]
+ ~~~
+
+ The highlighted status should be `never` now, as shown in the snippet above.
+
+## Relevant system paths
+
+### Directories
+
+- `/etc/default`
+
+### Files
+
+- `/etc/default/grub`
+- `/etc/default/grub.orig`
+- `/proc/meminfo`
+- `/sys/kernel/mm/transparent_hugepage/enabled`
+
+## References
+
+### About transparent hugepages
+
+- [The Linux Kernel Archives. Transparent Hugepage Support](https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html)
+- [Google Groups. mechanical-sympathy. failing to understand the issues with transparent huge paging](https://groups.google.com/g/mechanical-sympathy/c/sljzehnCNZU)
+- [The mole is digging. Transparent Hugepages: measuring the performance impact](https://alexandrnikitin.github.io/blog/transparent-hugepages-measuring-the-performance-impact/)
+- [Proxmox. Forums. How should Transparent Hugepages be configured?](https://forum.proxmox.com/threads/how-should-transparent-hugepages-be-configured.132611/)
+- [GoLinuxHub. How to enable or disable transparent (THP) and explicit (nr_hugepages) hugepage and check the status in Linux with examples (explained in detail)](https://www.golinuxhub.com/2018/08/enable-or-disable-transparent-anon-hugepage-thp-check-status-examples-linux/)
+- [Blog for Database and System Administrators. Debian 10: How to disable transparent hugepages](https://dbsysupgrade.com/debian-10-how-to-disable-transparent-hugepages/)
+- [StackExchange. Unix & Linux. Which distributions enable transparent huge pages “for all applications”?](https://unix.stackexchange.com/questions/495816/which-distributions-enable-transparent-huge-pages-for-all-applications)
+
+### About exclusive hugepages
+
+- [Proxmox HugePages for VMs](https://dev.to/sergelogvinov/proxmox-hugepages-for-vms-1fh3)
+- [Enabling Hugepages in Proxmox](https://docs.renderex.ae/posts/Enabling-hugepages/)
+
+## Navigation
+
+[<< Previous (**G015. Host optimization 01**)](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G017. Virtual Networking**) >>](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md)
diff --git a/G016 - Host optimization 02 ~ Disabling transparent hugepages.md b/G016 - Host optimization 02 ~ Disabling transparent hugepages.md
deleted file mode 100644
index 42f84b2..0000000
--- a/G016 - Host optimization 02 ~ Disabling transparent hugepages.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# G016 - Host optimization 02 ~ Disabling transparent hugepages
-
-**Transparent Hugepages** is a Linux kernel feature intended to improve performance by making more efficient use of your processor’s memory-mapping hardware. It is enabled (“enabled=always”) by default in most Linux distributions.
-
-Transparent Hugepages gives some applications a small performance improvement (~ 10% at best, 0-3% more typically), but can cause significant performance problems (to database engines for instance) or even apparent memory leaks at worst.
-
-## Status of transparent hugepages in your host
-
-To check out the current status of the transparent hugepages in your standalone PVE node, log as your `mgrsys` user in a shell and execute.
-
-~~~bash
-$ cat /sys/kernel/mm/transparent_hugepage/enabled
-always [madvise] never
-~~~
-
-The highlighted value is `madvise`, which means that transparent hugepages are enabled but not system wide. It's a feature available only for applications that request it specifically.
-
-There's other parameter that hints about the usage of transparent hugepages, `AnonHugePages`. To see its current value, execute the following command.
-
-~~~bash
-$ grep AnonHuge /proc/meminfo
-AnonHugePages: 0 kB
-~~~
-
-At this point, no application has made use of transparent hugepages, so the value of `AnonHugePages` is 0 KiB.
-
-## Disabling the transparent hugepages
-
-To switch the status of the transparent hugepages from `madvise` to `never`, you'll have to modify the configuration of your Debian's Grub boot system.
-
-1. Open a shell as `mgrsys`, `cd` to `/etc/default/` and make a backup of the original `grub` file.
-
- ~~~bash
- $ cd /etc/default/
- $ sudo cp grub grub.orig
- ~~~
-
-2. Edit the `grub` file, modifying the `GRUB_CMDLINE_LINUX=""` line as follows.
-
- ~~~properties
- GRUB_CMDLINE_LINUX="transparent_hugepage=never"
- ~~~
-
-3. Update the grub and reboot the system.
-
- ~~~bash
- $ sudo update-grub
- $ sudo reboot
- ~~~
-
-4. Log again as `mgrsys` and check the current status of the transparent hugepages.
-
- ~~~bash
- $ cat /sys/kernel/mm/transparent_hugepage/enabled
- always madvise [never]
- ~~~
-
-## Relevant system paths
-
-### _Directories_
-
-- `/etc/default`
-
-### _Files_
-
-- `/etc/default/grub`
-- `/etc/default/grub.orig`
-- `/proc/meminfo`
-- `/sys/kernel/mm/transparent_hugepage/enabled`
-
-## References
-
-- [Transparent Hugepage Support](https://www.kernel.org/doc/Documentation/vm/transhuge.txt)
-- [Debian 10: How to disable transparent hugepages](https://dbsysupgrade.com/debian-10-how-to-disable-transparent-hugepages/)
-- [Disabling transparent hugepages on Ubuntu/Debian](https://lxadm.com/Disabling_transparent_hugepages_on_Ubuntu/Debian)
-- [Which distributions enable transparent huge pages “for all applications”?](https://unix.stackexchange.com/questions/495816/which-distributions-enable-transparent-huge-pages-for-all-applications)
-
-## Navigation
-
-[<< Previous (**G015. Host optimization 01**)](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G017. Virtual Networking**) >>](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md)
diff --git a/G017 - Virtual Networking ~ Network configuration.md b/G017 - Virtual Networking ~ Network configuration.md
index c80a774..c663b1e 100644
--- a/G017 - Virtual Networking ~ Network configuration.md
+++ b/G017 - Virtual Networking ~ Network configuration.md
@@ -1,99 +1,127 @@
# G017 - Virtual Networking ~ Network configuration
-In the upcoming guides, I'll show you how to setup a small Kubernetes cluster run with virtual machines, and those VMs will need networking among each other and also with your LAN. Therefore, you need revise the virtual network setup you have and make it fit for the needs you'll face later.
+- [Preparing your virtual network for Kubernetes](#preparing-your-virtual-network-for-kubernetes)
+- [Current virtual network setup](#current-virtual-network-setup)
+- [Target network scenario](#target-network-scenario)
+- [Creating an isolated Linux bridge](#creating-an-isolated-linux-bridge)
+- [Bridges management](#bridges-management)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [Linux and virtual networking](#linux-and-virtual-networking)
+- [Navigation](#navigation)
+
+## Preparing your virtual network for Kubernetes
+
+In the upcoming chapters, I'll show you how to setup a small Kubernetes cluster run on virtual machines. Those VMs will need networking among each other and also with your LAN. Therefore, you need revise the virtual network setup you have and make it fit for the needs you will face later.
## Current virtual network setup
-The setup of your Proxmox VE standalone system is kept at the node level. To see it, you need to get into your PVE web console and browse to the `System > Network` view of your `pve` node.
+The network setup of your Proxmox VE standalone system is kept at its node level. To see it, you need to get into your PVE web console and browse to the `System > Network` view of your `pve` node.
-
+
-In the capture above you can see the setup on my own Proxmox VE host, which has three network interfaces:
+In the capture above you can see the setup on my own Proxmox VE host, which has these network interfaces:
-- `enp2s0` : is my host's real Ethernet NIC.
+- `enp3s0`\
+ Is my host's real Ethernet NIC.
-- `vmbr0` : is the Linux bridge generated in the installation of Proxmox VE. It holds the IP of this host, and "owns" the `enp2s0` NIC. If you remember, all this was set up [back in the Proxmox VE installation](G002%20-%20Proxmox%20VE%20installation.md).
+- `vmbr0`\
+ Is the Linux bridge generated in the installation of Proxmox VE. It holds the IP of this host, and "owns" the `enp3s0` NIC. If you remember, all this was set up [back in the Proxmox VE installation](G002%20-%20Proxmox%20VE%20installation.md).
-- `wlp3s0` : is the host's real WIFI NIC. Notice how Proxmox VE doesn't know it's `Type` and that it's not `Active`.
+Your system should have, at least, one `en*` device and the `vmbr0` Linux bridge.
-At least, your system should have one `en*` device and the `vmbr0` Linux bridge.
+Be aware that any changes you make in this page will be saved in the `/etc/network/interfaces` file of your PVE host. Open a shell as `mgrsys` and then make a backup of that file before you start changing your PVE network:
-Another thing you must be aware of is that any changes you make in this page will be saved in the `/etc/network/interfaces` file of your PVE host. So, open a shell as `mgrsys` and then make a backup of that file before you start changing things here.
-
-~~~bash
+~~~sh
$ sudo cp /etc/network/interfaces /etc/network/interfaces.orig
~~~
## Target network scenario
-The idea is to create a small cluster with a few virtual machines in which each VM will have two network cards. Why two network cards? To separate the internal communications that a Kubernetes cluster has between its nodes from the traffic between the cluster and the external or LAN network.
+The idea is to create a small Kubernetes cluster run with a few virtual machines. Each of those VMs will have two network cards. Why two network cards? To separate the internal communications that a Kubernetes cluster has between its nodes from the traffic between the cluster and the external or LAN network.
-So, the VMs will have access to the external network and be reachable through one NIC, and communicate only with each other for cluster-related tasks through the other NIC. This is achieved simply by setting up the NICs on different IP subnets but, to guarantee true isolation for the internal-cluster-communication NICs you can emulate how it would be done if you were using real hardware: by setting up another Linux bridge not connected to the external network and connecting the internal-cluster-communication NICs to it.
+The VMs will have access to the external or LAN network and be reachable through one NIC, and communicate only with each other for cluster-related tasks through the other NIC. This is achieved simply by setting up the NICs on different IP subnets but, to guarantee true isolation for the internal-cluster-communication NICs you can emulate how it would be done if you were using real hardware: by setting up another Linux bridge not connected to the external network and connecting the internal-cluster-communication NICs to it.
## Creating an isolated Linux bridge
-Creating a new and isolated Linux bridge in your Proxmox VE system is rather simple through the web console.
+Creating a new and isolated Linux bridge in your Proxmox VE system is rather simple through the web console:
-1. Browse to the `System > Network` of your `pve` node. Then click on the `Create` button to unfold a list of options.
+1. Browse to the `System > Network` of your `pve` node. Then click on the `Create` button to unfold a list of options:
- 
+ 
Notice that there are two options groups in the unfolded list:
- - `Linux` options: networking technology included in the Linux kernel, meaning that it's already available in your system.
+ - **`Linux` options**\
+ Networking technology included in the Linux kernel, meaning that it's already available in your PVE system.
+
+ - **`OVS` options**\
+ Relative to **Open vSwitch** technology. Since it's not installed in your system, these options won't work in your setup.
- - `OVS` options: relative to **Open vSwitch** technology. Since it's not installed in your system, these options won't work in your setup. If you want to know how to enable the OVS technology in your system, check the [**G910** appendix guide](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md).
+ > [!NOTE]
+ > If you want to know how to enable the OVS technology in your system, check the [**G910** appendix chapter](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md).
-2. Click on the `Linux Bridge` option and you'll meet the `Create: Linux Bridge` form window.
+2. Click on the `Linux Bridge` option and you'll meet the `Create: Linux Bridge` form window:
- 
+ 
The default values are just fine:
- - `Name`: you could put a different name if you wanted, but since Proxmox VE follows a naming convention for device names like these, it's better to leave the default one to avoid potential issues.
+ - `Name`\
+ You could put a different name if you wanted to but, since Proxmox VE follows a naming convention for device names like these, it is better to leave the default one to avoid potential issues.
- - `IPv4/CIDR` and `Gateway (IPv4)`: left empty because you don't really need an IP assigned to a bridge for it to do its job at the MAC level.
+ - `IPv4/CIDR` and `Gateway (IPv4)`\
+ Left empty because you don't really need an IP assigned to a bridge for it to do its job at the MAC level.
- - `IPv6/CIDR` and `Gateway (IPv6)`: the same reason as with the IPv4 values, plus you're not using IPv6 in the setup explained in this guide series.
+ - `IPv6/CIDR` and `Gateway (IPv6)`\
+ Also left empty for the same reason as with the IPv4 values, plus you're not even using IPv6 in the setup explained in this guide.
- - `Autostart`: you want this bridge to be always available when the system boots up.
+ - `Autostart`\
+ You want this bridge to be always available when the system boots up.
- - `VLAN aware`: for the scenario contemplated in this guide series, there's no need for you to use VLANs at all. In fact, the other `vmbr0` bridge doesn't have this option enabled either.
+ - `VLAN aware`\
+ For the scenario contemplated in this guide series, there is no need for you to use VLANs at all. In fact, the existing `vmbr0` bridge does not have this option enabled either.
- - `Bridge ports`: here will be listed all the interfaces connected to this bridge. Right now this list has to be left empty in this bridge. Notice that, in the `vmbr0` bridge, the `enp2s0` appears here.
+ - `Bridge ports`\
+ Here will be listed all the interfaces connected to this bridge. Right now this list has to be left empty in this bridge. Notice that, in the `vmbr0` bridge, the `enp3s0` interface appears listed in this field.
- - `Comment`: Here you could put a string like `K3s cluster inner networking` (**K3s** will be the Kubernetes distribution used to set up the cluster).
+ - `Comment`\
+ Here you could enter a string like `K3s cluster inner networking` (**K3s** will be the Kubernetes distribution used to set up the cluster later).
-3. Click on `Create` and you'll see your new `vmbr1` Linux bridge added to the list of network devices.
+3. Click on `Create` and you'll see your new `vmbr1` Linux bridge added to the list of network devices:
- 
+ 
- You'll notice two things:
+ You'll see that:
- The `Apply Configuration` button has been enabled.
- - A log console has appeared right below the network devices list, showing you the "pending changes" that you have to apply.
+ - Your new Linux bridge has been added to the network list, **but is not active**.
+ - A log console has appeared right below the network devices list, showing you the "pending changes" you have to apply.
-4. Press on the `Apply Configuration` button to make the underlying `ifupdown2` commands apply the changes. This action demands confirmation by a window as the one shown below.
+4. Press on the `Apply Configuration` button to make the underlying `ifupdown2` commands apply the changes. This action demands confirmation in the window shown below:
- 
+ 
- Press on `Yes`, and you'll see a small progress window that should finish rather fast.
+ Press on `Yes`, and you'll see a small progress window that should finish rather fast:
- 
+ 
-5. The Network page will refresh automatically and you'll see your new vmbr1 Linux bridge active in the devices list.
+5. The `Network` page will refresh automatically and you'll see your new `vmbr1` Linux bridge active in the devices list:
- 
+ 
-6. You can also check out the changes applied at the `/etc/network/interfaces` configuration file of your PVE host. So, open a shell as mgrsys and open the file with `less`.
+6. You can also check out the changes applied at the `/etc/network/interfaces` configuration file of your PVE host. Open a shell as `mgrsys` and open the file with `less`.
- ~~~bash
+ ~~~sh
$ less /etc/network/interfaces
~~~
- The file should look like the following at this point.
+ The file should look now like this:
- ~~~bash
+ ~~~sh
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
@@ -108,97 +136,88 @@ Creating a new and isolated Linux bridge in your Proxmox VE system is rather sim
auto lo
iface lo inet loopback
- iface enp2s0 inet manual
+ iface enp3s0 inet manual
auto vmbr0
iface vmbr0 inet static
- address 192.168.1.107/24
- gateway 192.168.1.1
- bridge-ports enp2s0
+ address 10.1.0.1/8
+ gateway 10.0.0.1
+ bridge-ports enp3s0
bridge-stp off
bridge-fd 0
- iface wlp3s0 inet manual
-
auto vmbr1
iface vmbr1 inet manual
bridge-ports none
bridge-stp off
bridge-fd 0
#K3s cluster inner networking
+
+ source /etc/network/interfaces.d/*
~~~
- You'll find your new `vmbr1` bridge added at the bottom of this `interfaces` file, with a set of `bridge-` options similar to the original `vmbr0` bridge.
+ You will find your new `vmbr1` bridge appended to this `interfaces` file with a set of `bridge-` options similar to the original `vmbr0` bridge.
## Bridges management
-You can handle your bridges through your Proxmox VE web console, but that's a rather limited tool for solving more complex situations. So, be aware that:
+You can handle your bridges through your Proxmox VE web console, but that is a rather limited tool for solving more complex situations:
-- You can always work directly with the `/etc/network/interfaces` to adjust the parameters of your bridges (and any other network interface running in your system).
+- You can use the `ip` command to handle the bridges like any other network device. For instance, you can compare the traffic statistics of your new `vmbr1` bridge with the ones from `vmbr0`.
-- You can use the `ip` command to handle the bridges as with any other network device. For instance, you can compare the traffic statistics of your new `vmbr1` bridge with the ones from `vmbr0`.
-
- ~~~bash
+ ~~~sh
$ ip -s link show vmbr0
- 4: vmbr0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
- link/ether b8:ae:ed:27:d1:7f brd ff:ff:ff:ff:ff:ff
- RX: bytes packets errors dropped missed mcast
- 13175973 48855 0 0 0 23611
- TX: bytes packets errors dropped carrier collsns
- 11374278 17226 0 0 0 0
+ 3: vmbr0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
+ link/ether 98:ee:cb:03:05:a3 brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped missed mcast
+ 3682639 12855 0 0 0 5660
+ TX: bytes packets errors dropped carrier collsns
+ 3750287 4968 0 0 0 0
$ ip -s link show vmbr1
- 6: vmbr1: mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default qlen 1000
- link/ether b2:6a:1c:f9:50:fa brd ff:ff:ff:ff:ff:ff
- RX: bytes packets errors dropped missed mcast
- 0 0 0 0 0 0
- TX: bytes packets errors dropped carrier collsns
- 0 0 0 0 0 0
+ 4: vmbr1: mtu 1500 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
+ link/ether 32:06:ef:79:b5:9d brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped missed mcast
+ 0 0 0 0 0 0
+ TX: bytes packets errors dropped carrier collsns
+ 0 0 0 0 0 0
~~~
- Notice how, at this point, the `vmbr1` bridge has no traffic whatsoever while `vmbr0` has some going through it.
-
-- There's a command with specific functionality meant for managing bridges, called `bridge`. It's installed in your Proxmox VE system, so you can use it right away. For instance, you could get the details of the links used with your bridges.
-
- ~~~bash
- $ sudo bridge -d link show
- 2: enp2s0: mtu 1500 master vmbr0 state forwarding priority 32 cost 100
- hairpin off guard off root_block off fastleave off learning on flood on mcast_flood on mcast_to_unicast off neigh_suppress off vlan_tunnel off isolated off
- 4: vmbr0: mtu 1500 master vmbr0
- 6: vmbr1: mtu 1500 master vmbr1
- ~~~
+ See how, at this point, the `vmbr1` bridge has no traffic whatsoever while `vmbr0` has some networking flow going through it.
- Notice that `bridge` requires `sudo` to be executed. To understand this command, beyond reading its `man` page, you'll also need to study the particularities of bridges in general. Please take a look to the References I've linked at the end of this guide.
+- There is a command with specific functionality meant for managing bridges, called `bridge`. It's installed in your Proxmox VE system, so you can use it right away. Also, be aware that **the `bridge` command requires `sudo` to be executed**.
-- In old documentation you'll see that they use a `brctl` command. This was the old way of managing bridges, but has been deprecated and replaced by `bridge`. You'll find the `brctl` command available in your Proxmox VE system, but try not to use it and stick to the `bridge` one instead, mainly to avoid unexpected issues due to incompatibilities between the old way of configuring bridges and the new one.
+ > [!NOTE]
+ > **To understand the bridge command, you also need to study the particularities of bridges in general**\
+ > Please take a look to the [references linked at the end of this chapter](#references).
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/network`
-### _Files_
+### Files
- `/etc/network/interfaces`
- `/etc/network/interfaces.orig`
## References
-### _Proxmox VE networking_
+### [Proxmox VE](https://pve.proxmox.com/)
-- [Proxmox VE documentation. Network Configuration](https://pve.deimos.cloud:8006/pve-docs/chapter-sysadmin.html#sysadmin_network_configuration)
-- [Proxmox VE Wiki. Network Configuration](https://pve.proxmox.com/wiki/Network_Configuration)
+- [Host System Administration. Network Configuration](https://pve.proxmox.com/pve-docs/chapter-sysadmin.html#sysadmin_network_configuration)
+- [Wiki. Network Configuration](https://pve.proxmox.com/wiki/Network_Configuration)
-### _Linux and virtual networking_
+### Linux and virtual networking
+- [Linux Expert. Deep Guide to Bridge Command Line in Linux](https://www.linuxoperatingsystem.net/deep-guide-bridge-command-line-in-linux/)
- [Fun with veth devices, Linux virtual bridges, KVM, VMware – attach the host and connect bridges via veth](https://linux-blog.anracom.com/tag/linux-bridge-linking/)
-- [Linux Bridge - Part 1](https://hechao.li/2017/12/13/linux-bridge-part1/)
- [KVM networking](https://www.linux-kvm.org/page/Networking)
- [How to setup and configure network bridge on Debian Linux](https://www.cyberciti.biz/faq/how-to-configuring-bridging-in-debian-linux/)
-- [Mini Container Series Part 5](https://hechao.li/2020/07/01/Mini-Container-Series-Part-5-Network-Isolation/)
+- [Hechao's Blog. Linux Bridge - Part 1](https://hechao.li/posts/linux-bridge-part1/)
+- [Hechao's Blog. Mini Container Series Part 5](https://hechao.li/posts/Mini-Container-Series-Part-5-Network-Isolation/)
- [Linux: bridges, VLANs and RSTP](https://serverfault.com/questions/824621/linux-bridges-vlans-and-rstp)
- [Bridging Ethernet Connections (as of Ubuntu 16.04)](https://help.ubuntu.com/community/NetworkConnectionBridge)
## Navigation
-[<< Previous (**G016. Host optimization 02**)](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G018. K3s cluster setup 01**) >>](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md)
+[<< Previous (**G016. Host optimization 02**)](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G018. K3s cluster setup 01**) >>](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md)
diff --git a/G018 - K3s cluster setup 01 ~ Requirements and arrangement.md b/G018 - K3s cluster setup 01 ~ Requirements and arrangement.md
index 10fb07c..da5b331 100644
--- a/G018 - K3s cluster setup 01 ~ Requirements and arrangement.md
+++ b/G018 - K3s cluster setup 01 ~ Requirements and arrangement.md
@@ -1,93 +1,115 @@
# G018 - K3s cluster setup 01 ~ Requirements and arrangement
-With your Proxmox VE standalone node ready, you can start building your private cloud of services. The idea is to setup first a Kubernetes K3s cluster running on KVM virtual machines. Then, you would deploy the apps and services you want in that cluster.
+- [Gearing up for your K3s cluster](#gearing-up-for-your-k3s-cluster)
+- [Requirements for the K3s cluster and the services to deploy in it](#requirements-for-the-k3s-cluster-and-the-services-to-deploy-in-it)
+ - [Rancher K3s Kubernetes cluster](#rancher-k3s-kubernetes-cluster)
+ - [Nextcloud](#nextcloud)
+ - [Gitea](#gitea)
+ - [Kubernetes cluster monitoring stack](#kubernetes-cluster-monitoring-stack)
+ - [Prometheus](#prometheus)
+ - [Grafana](#grafana)
+- [Arrangement of VMs and services](#arrangement-of-vms-and-services)
+- [References](#references)
+ - [About Kubernetes](#about-kubernetes)
+ - [About Rancher K3s](#about-rancher-k3s)
+ - [About Nextcloud](#about-nextcloud)
+ - [About Gitea](#about-gitea)
+ - [About Prometheus](#about-prometheus)
+ - [About Grafana](#about-grafana)
+- [Navigation](#navigation)
+
+## Gearing up for your K3s cluster
+
+With your Proxmox VE standalone node ready, you can start building your private cloud of services. The idea is to setup first a Kubernetes K3s cluster running on KVM virtual machines. Then, you would deploy the apps and services you want in that Kubernetes cluster.
## Requirements for the K3s cluster and the services to deploy in it
-Let's go over the list of services I'm aiming to run in the K3s cluster, and check their requirements. This is necessary to plan in advance how to distribute the hardware resources available in the Proxmox VE server among the virtual machines that will act as nodes of the K3s cluster.
+Let's go over the list of services this guide aims to run in the K3s cluster, and check their requirements. This is necessary to plan in advance how to distribute the hardware resources available in the Proxmox VE server among the virtual machines that will act as nodes of the K3s cluster.
-### _Rancher K3s Kubernetes cluster_
+### Rancher K3s Kubernetes cluster
-[Kubernetes](https://kubernetes.io/), also known as K8s, is an open-source platform for automating deployment, scaling, and management of containerized applications. It can be run as one single node, but to have a more realistic K8s experience it's better to run a cluster of, at least, **three** VMs.
+[Kubernetes](https://kubernetes.io/), also known as K8s, is an open-source platform for automating deployment, scaling, and management of containerized applications. It can be run as one single node, but to have a more realistic K8s experience it's better to run a cluster of, at least, **three VMs**.
-Since the hardware I'm using in this guide series is rather limited, instead of using the official K8s binaries, I'll use the [Rancher K3s Kubernetes distribution](https://k3s.io/). It's a Kubernetes distribution designed for resource-constrained ("edge") environments, it's compact and already comes with all the necessary addons to start running right away. It's [minimum hardware requirements](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#hardware) are the following.
+Since the virtual hardware I'm using in this guide series is rather limited, instead of using the official K8s binaries, I'll use the [Rancher K3s Kubernetes distribution](https://k3s.io/). It's a Kubernetes distribution originally designed for resource-constrained ("edge") environments. It is compact, lightweight and already comes with the minimum necessary addons to start running right away. It's [minimum hardware requirements](https://docs.k3s.io/installation/requirements#hardware) are the following.
-- RAM: 512 MiB.
-- 1 CPU.
+| Node | CPU | RAM |
+|:------:|:-------:|:------:|
+| Server | 2 cores | 2 GB |
+| Agent | 1 core | 512 MB |
-### _Nextcloud_
+### Nextcloud
-[**Nextcloud**](https://nextcloud.com/) is a software for file syncing and sharing, so it's main requirement will always be storage room for saving data. Still, it has some [recommended system requirements](https://docs.nextcloud.com/server/21/admin_manual/installation/system_requirements.html) to work properly.
+[Nextcloud](https://nextcloud.com/) is a software mainly for file syncing and sharing, so it's main requirement will always be storage room for saving data. Still, it has some [recommended system requirements](https://docs.nextcloud.com/server/latest/admin_manual/installation/system_requirements.html) to work properly.
-- Database: MySQL 8.0+ or MariaDB 10.2+.
+- Database: MySQL 8.4 or MariaDB 10.11.
- Web server: Apache 2.4 with mod_php or php-fpm.
-- PHP Runtime: 8.0.
-- RAM: 512 MiB.
+- PHP Runtime: 8.3
+- RAM: 512 MiB per process.
-### _Gitea_
+### Gitea
-[**Gitea**](https://gitea.io/) is a lightweight self-hosted git service, so its main requirement will be storage space.
+[Gitea](https://gitea.io/) is a lightweight self-hosted git service, so its main requirement will be storage space.
-- Database: MySQL (>= 5.7), PostgreSQL (>= 10), SQLite3.
-- Git version >= 1.8.3.
+- Database: PostgreSQL (>= 12), MySQL (>= 8.0), MariaDB (>= 10.4), SQLite (builtin), and MSSQL (>= 2012 SP4).
+- Git version >= 2.0.
- A functioning SSH server to make connections through SSH rather than HTTPS.
- In the official Gitea docs there's no minimum or recommended hardware requirements specified.
-### _Kubernetes cluster monitoring stack_
+### Kubernetes cluster monitoring stack
-For monitoring the K3s Kubernetes cluster, you'll install a stack which includes **Prometheus** and **Grafana**, among other monitoring modules.
+For monitoring the K3s Kubernetes cluster, you will install a stack which includes **Prometheus** and **Grafana**, among other monitoring modules.
-#### **Prometheus**
+#### Prometheus
-[**Prometheus**](https://prometheus.io/) is a popular open-source systems monitoring and alerting toolkit. There aren't minimal or recommended requirementes for Prometheus, since it completely depends on how many systems Prometheus will monitor. Still, it'll need storage for saving metrics.
+[Prometheus](https://prometheus.io/) is a popular open-source systems monitoring and alerting toolkit. There aren't minimal or recommended requirements for Prometheus, since it completely depends on how many systems Prometheus will monitor. Still, it'll need storage for saving metrics.
-#### **Grafana**
+#### Grafana
-[**Grafana**](https://grafana.com/) is an open source visualization and analytics platform that is commonly used to visualize Prometheus data. Grafana provides out-of-the-box support for Prometheus, so it only makes sense to use these two tools together. The minimum hardware requirements for Grafana are the ones next.
+[Grafana](https://grafana.com/) is an open source visualization and analytics platform that is commonly used to visualize Prometheus data. Grafana provides out-of-the-box support for Prometheus, so it only makes sense to use these two tools together. The [minimum hardware requirements for Grafana](https://grafana.com/docs/grafana/latest/setup-grafana/installation/#hardware-recommendations) are:
-- Database: MySQL, PostgreSQL, SQLite.
-- RAM: 255 MiB.
+- Database: SQLite 3, MySQL 8.0+, PostgreSQL 12+.
+- RAM: 512 MiB.
- CPU: 1 core.
## Arrangement of VMs and services
-Now that we have a rough idea about what each software requires, it's time to stablish a proper arrangement for them. So, in my four-single-threaded cores CPU and 8 GiB hardware, I'll go with three VMs with the hardware configuration listed next:
+Now that you have a rough idea about what each software requires, it's time to stablish a proper arrangement for them. So, in my virtual hardware of four-single-threaded cores CPU and 8 GiB of RAM, I'll go with three VMs with the hardware configuration listed next:
-- **One** VM with 2 vCPU and 1.5 GiB of RAM. This will become the K3s **server** (_master_) node of the Kubernetes cluster.
+- **One VM with 2 vCPU and 2 GiB of RAM**\
+ This will become the K3s **server** (_master_) node of the Kubernetes cluster.
-- **Two** VMs with 3 vCPU and 2 GiB of RAM. These will be K3s **agent** (_worker_) nodes where most of the Kubernetes pods will run.
+- **Two VMs with 3 vCPU and 2 GiB of RAM**\
+ These will be K3s **agent** (_worker_) nodes where most of the Kubernetes pods will run.
-If your hardware setup has more RAM and cores than mine, you can consider either putting more VMs in your system or just assigning them more RAM and vCPUs. Also, since all your VMs will run on the same host, Proxmox VE will be able to use [**KSM** for a more efficient and dynamic shared use of RAM among them](https://pve.proxmox.com/wiki/Dynamic_Memory_Management).
+If your hardware setup has more RAM and cores than the one used in this guide, you can consider either putting more VMs in your system or just assigning them more RAM and vCPUs. Also, since all your VMs will run on the same host, Proxmox VE will be able to use [**KSM and Auto-Ballooning** for a more efficient and dynamic shared use of RAM among them](https://pve.proxmox.com/wiki/Dynamic_Memory_Management).
## References
-### _Kubernetes_
+### About [Kubernetes](https://kubernetes.io/)
-- [Kubernetes](https://kubernetes.io/)
+- [Getting Started. Production Environment](https://kubernetes.io/docs/setup/production-environment/)
-### _Rancher K3s_
+### About [Rancher K3s](https://k3s.io/)
-- [Rancher K3s Kubernetes distribution](https://k3s.io/)
+- [Docs. Installation. Requirements](https://docs.k3s.io/installation/requirements#hardware)
-### _Nextcloud_
+### About [Nextcloud](https://nextcloud.com/)
-- [Nextcloud](https://nextcloud.com/)
-- [Nextcloud system requirements](https://docs.nextcloud.com/server/21/admin_manual/installation/system_requirements.html)
+- [Nextcloud system requirements](https://docs.nextcloud.com/server/latest/admin_manual/installation/system_requirements.html)
-### _Gitea_
+### About [Gitea](https://gitea.io/)
-- [Gitea](https://gitea.io/)
- [Gitea. Docs. Installation](https://docs.gitea.com/category/installation)
-### _Prometheus_
+### About [Prometheus](https://prometheus.io/)
-- [Prometheus](https://prometheus.io/)
- [Prometheus Docs - Overview](https://prometheus.io/docs/introduction/overview/)
- [How much RAM does Prometheus 2.x need for cardinality and ingestion?](https://www.robustperception.io/how-much-ram-does-prometheus-2-x-need-for-cardinality-and-ingestion)
-### _Grafana_
+### About [Grafana](https://grafana.com/)
-- [Grafana](https://grafana.com/)
+- [Grafana documentation. Set up. Install Grafana. Hardware recommendations](https://grafana.com/docs/grafana/latest/setup-grafana/installation/#hardware-recommendations)
+- [Grafana documentation. Set up. Install Grafana. Supported databases](https://grafana.com/docs/grafana/latest/setup-grafana/installation/#supported-databases)
## Navigation
diff --git a/G019 - K3s cluster setup 02 ~ Storage setup.md b/G019 - K3s cluster setup 02 ~ Storage setup.md
index 6a3cbfb..57f1682 100644
--- a/G019 - K3s cluster setup 02 ~ Storage setup.md
+++ b/G019 - K3s cluster setup 02 ~ Storage setup.md
@@ -1,6 +1,26 @@
# G019 - K3s cluster setup 02 ~ Storage setup
-Before you can start creating VMs or containers in your standalone PVE node, there's something still pending to do: organizing the current free storage space you have available in your node. And what are the data elements you must have in mind?
+- [Identifying your storage needs and current setup](#identifying-your-storage-needs-and-current-setup)
+- [Storage organization model](#storage-organization-model)
+- [Creating the logical volumes (LVs)](#creating-the-logical-volumes-lvs)
+- [Enabling the LVs for Proxmox VE](#enabling-the-lvs-for-proxmox-ve)
+ - [Formatting and mounting of LVs](#formatting-and-mounting-of-lvs)
+ - [Enabling directories within Proxmox VE](#enabling-directories-within-proxmox-ve)
+ - [Setting up the directories](#setting-up-the-directories)
+ - [Disabling the `local` directory](#disabling-the-local-directory)
+ - [Enabling the thinpools within Proxmox VE](#enabling-the-thinpools-within-proxmox-ve)
+- [Configuration file](#configuration-file)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [About Logical Volume Manager (LVM)](#about-logical-volume-manager-lvm)
+ - [About Proxmox VE storage configuration](#about-proxmox-ve-storage-configuration)
+- [Navigation](#navigation)
+
+## Identifying your storage needs and current setup
+
+Before you can start creating VMs or containers in your standalone PVE node, there's something still pending to do. You must reorganize the current free storage space you have available in your node. The data elements you must keep in mind in this reorganization are:
- ISO images, container templates and snippets.
- VMs and container disks images.
@@ -8,146 +28,159 @@ Before you can start creating VMs or containers in your standalone PVE node, the
- Backup of vzdumps and snapshots of VMs and containers.
- Backups of data stored or generated by apps and services.
-On the other hand, there's the particular basic LVM storage arrangement you already set up in the [**G005** guide](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md).
+On the other hand, there is the particular basic LVM storage arrangement you already set up in the [**G005** chapter](G005%20-%20Host%20configuration%2003%20~%20LVM%20storage.md).
- One partitioned LVM VG group for the PVE system itself, in the internal SSD drive, called `pve`.
- An empty LVM VG group, also in the internal SSD drive, called `ssdint`.
- An empty LVM VG group, in the internal HDD drive, called `hddint`.
- An empty LVM VG group, in the external USB HDD drive, called `hddusb`.
-Next, I'll show how a way to organize the data elements among the empty LVM VG groups available.
+In this chapter I'll show you how to organize the data elements among those empty LVM VG groups available.
## Storage organization model
-First, you need to figure out how you want to distribute the data elements in your available storage. Let's go through this element by element.
+First, you need to figure out how you want to distribute the data elements in your available storage. Let's make an element-by-element analysis:
-- **OSes ISO images, container templates and snippets**:
- These could be stored in the `local` storage already available in the `pve` group, but it's better to keep the Proxmox VE system as isolated as possible from anything else. Hence, you'll create a new small LV within the `hddint` VG just to store ISOs, container templates and snippets.
+- **OSes ISO images, container templates and snippets**\
+ These could be stored in the `local` storage already available in the `pve` group, but it is better to keep the Proxmox VE system as isolated as possible from anything else. Hence, you'll create a new small LV within the `hddint` VG just to store ISOs, container templates and snippets.
-- **VMs and container disks images**:
- To store the disks images in Proxmox VE, you need to create a new LVM-thin (or thinpool) storage within the `ssdint` VG. This way you'll get the best performance possible for the VMs and containers by making them run on the ssd drive.
+- **VMs and container disks images**\
+ To store the disks images in Proxmox VE, you need to create a new LVM-thin (or thinpool) storage within the `ssdint` VG. This way you'll get the best performance possible for the VMs and containers by making them run on the ssd drive.
-- **Data generated by apps or services**:
- This data is mainly the information generated or just stored by the services running in this setup. For these you'll use two different thinpools, the one already mentioned above within the `ssdint` VG, and other that you must create within the `hddint` VG.
+- **Data generated by apps or services**\
+ This data is mainly the information generated or just stored by the services running in this setup. For these you will use two different thinpools:
-- **Backups and snapshots of VMs and containers**:
- The proper thing to do is not to keep the backups inside the host itself. So, you'll create a LV within the `hddusb` VG to store the VMs and containers' backups and snapshots in your external usb drive.
+ - The one already mentioned in the previous point for disk images within the `ssdint` VG.
+ - Another one which you must create within the `hddint` VG.
-- **Backups of data generated by apps and services**:
- In a similar fashion to the backups of VMs and containers, you'll create a thinpool also in the `hddusb` to store backups of data.
+- **Backups and snapshots of VMs and containers**\
+ The proper thing to do is not to keep the backups inside the host itself. So, you'll create a LV within the `hddusb` VG to store the VMs and containers' backups and snapshots in your external usb drive.
+
+- **Backups of data generated by apps and services**\
+ In a similar fashion to the backups of VMs and containers, you'll create a thinpool also in the `hddusb` to store backups of data.
## Creating the logical volumes (LVs)
-Now that you know how to organize the available free storage in your setup, you can start creating the logical volumes you require.
+After deciding how to organize the available free storage in your setup, you can start by creating the logical volumes you require.
-1. Log in with `mgrsys` and check with `vgs` how much space available you have on each volume group.
+1. Log in with `mgrsys` and check with `vgs` how much space available you have on each volume group:
- ~~~bash
+ ~~~sh
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
hddint 1 0 0 wz--n- <930.51g <930.51g
hddusb 1 0 0 wz--n- <1.82t <1.82t
- pve 1 2 0 wz--n- <49.50g 0
- ssdint 1 0 0 wz--n- <881.51g <881.51g
+ pve 1 2 0 wz--n- <62.00g 0
+ ssdint 1 0 0 wz--n- <868.51g <868.51g
~~~
2. Being aware of the storage available, now you create all the LVs you need with `lvcreate`.
- ~~~bash
- $ sudo lvcreate --type thin-pool -L 881g -n ssd_disks ssdint
+ ~~~sh
+ $ sudo lvcreate --type thin-pool -L 867g -n ssd_disks ssdint
$ sudo lvcreate -L 60g -n hdd_templates hddint
- $ sudo lvcreate --type thin-pool -L 870g -n hdd_data hddint
- $ sudo lvcreate -L 520g -n hddusb_bkpvzdumps hddusb
- $ sudo lvcreate --type thin-pool -L 1340g -n hddusb_bkpdata hddusb
+ $ sudo lvcreate --type thin-pool -L 869g -n hdd_data hddint
+ $ sudo lvcreate -L 560g -n hddusb_bkpvzdumps hddusb
+ $ sudo lvcreate --type thin-pool -L 1300g -n hddusb_bkpdata hddusb
+ ~~~
+
+ The `lvcreate` commands for creating the `thin-pools` will print the following warnings:
+
+ ~~~sh
+ WARNING: Pool zeroing and 512.00 KiB large chunk size slows down thin provisioning.
+ WARNING: Consider disabling zeroing (-Zn) or using smaller chunk size (<512.00 KiB).
~~~
- > **BEWARE!**
- > The LVs haven't taken up the whole space on each drive. You must leave some room available in case any of the thinpools' metadata need to grow.
+ The chunk size affects the size of the metadata pool used to manage the thinly provisioned volumes. It is also relevant from a performance point of view if those volumes are going to be provisioned at a high rate, as it can happen in a real production environment. Since the homelab setup of this guide is not meant for such a demanding scenario, you can just ignore the `lvcreate` warnings.
-3. With `lsblk` you can verify that you've got the storage structure you want.
+ > [!IMPORTANT]
+ > **The LVs must not eat up the whole available space on each drive**\
+ > You must leave some room available in case any of the thinpools' metadata needs to grow.
- ~~~bash
+3. With `lsblk` you can verify that you've got the storage structure you want:
+
+ ~~~sh
$ lsblk
- NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+ NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 931.5G 0 disk
├─sda1 8:1 0 1007K 0 part
- ├─sda2 8:2 0 512M 0 part /boot/efi
- ├─sda3 8:3 0 49.5G 0 part
- │ ├─pve-swap 253:0 0 12G 0 lvm [SWAP]
- │ └─pve-root 253:1 0 37.5G 0 lvm /
- └─sda4 8:4 0 881.5G 0 part
- ├─ssdint-ssd_disks_tmeta 253:2 0 112M 0 lvm
- │ └─ssdint-ssd_disks 253:4 0 881G 0 lvm
- └─ssdint-ssd_disks_tdata 253:3 0 881G 0 lvm
- └─ssdint-ssd_disks 253:4 0 881G 0 lvm
+ ├─sda2 8:2 0 1G 0 part
+ ├─sda3 8:3 0 62G 0 part
+ │ ├─pve-swap 252:0 0 12G 0 lvm [SWAP]
+ │ └─pve-root 252:1 0 50G 0 lvm /
+ └─sda4 8:4 0 868.5G 0 part
+ ├─ssdint-ssd_disks_tmeta 252:2 0 112M 0 lvm
+ │ └─ssdint-ssd_disks 252:4 0 867G 0 lvm
+ └─ssdint-ssd_disks_tdata 252:3 0 867G 0 lvm
+ └─ssdint-ssd_disks 252:4 0 867G 0 lvm
sdb 8:16 0 931.5G 0 disk
└─sdb1 8:17 0 931.5G 0 part
- ├─hddint-hdd_templates 253:5 0 60G 0 lvm
- ├─hddint-hdd_data_tmeta 253:6 0 112M 0 lvm
- │ └─hddint-hdd_data 253:8 0 870G 0 lvm
- └─hddint-hdd_data_tdata 253:7 0 870G 0 lvm
- └─hddint-hdd_data 253:8 0 870G 0 lvm
+ ├─hddint-hdd_templates 252:5 0 60G 0 lvm
+ ├─hddint-hdd_data_tmeta 252:6 0 112M 0 lvm
+ │ └─hddint-hdd_data 252:8 0 869G 0 lvm
+ └─hddint-hdd_data_tdata 252:7 0 869G 0 lvm
+ └─hddint-hdd_data 252:8 0 869G 0 lvm
sdc 8:32 0 1.8T 0 disk
└─sdc1 8:33 0 1.8T 0 part
- ├─hddusb-hddusb_bkpvzdumps 253:9 0 520G 0 lvm
- ├─hddusb-hddusb_bkpdata_tmeta 253:10 0 84M 0 lvm
- │ └─hddusb-hddusb_bkpdata 253:12 0 1.3T 0 lvm
- └─hddusb-hddusb_bkpdata_tdata 253:11 0 1.3T 0 lvm
- └─hddusb-hddusb_bkpdata 253:12 0 1.3T 0 lvm
+ ├─hddusb-hddusb_bkpvzdumps 252:9 0 560G 0 lvm
+ ├─hddusb-hddusb_bkpdata_tmeta 252:10 0 84M 0 lvm
+ │ └─hddusb-hddusb_bkpdata 252:12 0 1.3T 0 lvm
+ └─hddusb-hddusb_bkpdata_tdata 252:11 0 1.3T 0 lvm
+ └─hddusb-hddusb_bkpdata 252:12 0 1.3T 0 lvm
~~~
- You can also use the `vgs` command to see the current volumes within the VGs.
+ You can also use the `vgs` command to see the status of your current volumes within the VGs.
- ~~~bash
+ ~~~sh
$ sudo vgs -o +lv_size,lv_name
VG #PV #LV #SN Attr VSize VFree LSize LV
- hddint 1 2 0 wz--n- <930.51g 296.00m 60.00g hdd_templates
- hddint 1 2 0 wz--n- <930.51g 296.00m 870.00g hdd_data
- hddusb 1 2 0 wz--n- <1.82t 868.00m 520.00g hddusb_bkpvzdumps
- hddusb 1 2 0 wz--n- <1.82t 868.00m <1.31t hddusb_bkpdata
- pve 1 2 0 wz--n- <49.50g 0 12.00g swap
- pve 1 2 0 wz--n- <49.50g 0 <37.50g root
- ssdint 1 1 0 wz--n- <881.51g 296.00m 881.00g ssd_disks
+ hddint 1 2 0 wz--n- <930.51g <1.29g 869.00g hdd_data
+ hddint 1 2 0 wz--n- <930.51g <1.29g 60.00g hdd_templates
+ hddusb 1 2 0 wz--n- <1.82t 868.00m <1.27t hddusb_bkpdata
+ hddusb 1 2 0 wz--n- <1.82t 868.00m 560.00g hddusb_bkpvzdumps
+ pve 1 2 0 wz--n- <62.00g 0 12.00g swap
+ pve 1 2 0 wz--n- <62.00g 0 <50.00g root
+ ssdint 1 1 0 wz--n- <868.51g <1.29g 867.00g ssd_disks
~~~
-4. At this point, the PVE web console will already show your newly created LVM-thin thinpools. Find them at your `pve` node level, in the `Disks > LVM-Thin` screen.
+4. At this point, the PVE web console will already show your newly created LVM-thin thinpools. Find them at your `pve` node level, in the `Disks > LVM-Thin` screen:
- 
+ 
## Enabling the LVs for Proxmox VE
Before you enable the new LV volumes and thinpools in Proxmox VE, there are a few more things to do yet.
-### _Formatting and mounting of LVs_
+### Formatting and mounting of LVs
-The new LVs are virtual partitions that still doesn't have a defined filesystem. So you need to format them to have one, `ext4` in your case.
+The new LVs are virtual partitions that still do not have a defined filesystem. You need to format them to have one, `ext4` in your case.
-> **BEWARE!**
-> Next you'll format and mount just the new LVs, NOT the new thinpools!
+> [!WARNING]
+> Next you'll format and mount just the new LVs, **NOT the new thinpools**!
-1. Before you format the new LVs, you need to see their `/dev/mapper/` paths with `fdisk`.
+1. Before you format the new LVs, you need to see their `/dev/mapper/` paths with `fdisk`:
- ~~~bash
+ ~~~sh
$ sudo fdisk -l | grep /dev/mapper
Disk /dev/mapper/pve-swap: 12 GiB, 12884901888 bytes, 25165824 sectors
- Disk /dev/mapper/pve-root: 37.5 GiB, 40261124096 bytes, 78635008 sectors
+ Disk /dev/mapper/pve-root: 50 GiB, 53682896896 bytes, 104849408 sectors
Disk /dev/mapper/hddint-hdd_templates: 60 GiB, 64424509440 bytes, 125829120 sectors
- Disk /dev/mapper/hddusb-hddusb_bkpvzdumps: 520 GiB, 558345748480 bytes, 1090519040 sectors
+ Disk /dev/mapper/hddusb-hddusb_bkpvzdumps: 560 GiB, 601295421440 bytes, 1174405120 sectors
~~~
-2. After discovering their paths, you can use the `mkfs.ext4` command to format the LVs.
+2. After discovering their paths, you can use the `mkfs.ext4` command to format the LVs:
- ~~~bash
+ ~~~sh
$ sudo mkfs.ext4 /dev/mapper/hddint-hdd_templates
$ sudo mkfs.ext4 /dev/mapper/hddusb-hddusb_bkpvzdumps
~~~
- Each `mkfs.ext4` command will print an output as below.
+ Each `mkfs.ext4` command will print the following:
- ~~~bash
- mke2fs 1.46.2 (28-Feb-2021)
+ ~~~sh
+ mke2fs 1.47.2 (1-Jan-2025)
Creating filesystem with 15728640 4k blocks and 3932160 inodes
- Filesystem UUID: c8a71921-3368-4fb5-95fb-12c077a64962
+ Filesystem UUID: 1fbdc885-c059-46d6-abae-1eaefc3430c7
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000, 7962624, 11239424
@@ -158,222 +191,248 @@ The new LVs are virtual partitions that still doesn't have a defined filesystem.
Writing superblocks and filesystem accounting information: done
~~~
-3. Before you can mount these LVs like any other partition, you need to create their corresponding mount points. This means you have to create a directory for each LV.
+3. Before you can mount these LVs like any other partition, you need to create their corresponding mount points. This means you have to create a directory for each LV:
- ~~~bash
+ ~~~sh
$ sudo mkdir -p /mnt/{hdd_templates,hddusb_bkpvzdumps}
~~~
- To check out fast that the folder structure is correct, you can use the `tree` command.
+ To check out fast that the folder structure is correct, you can use the `tree` command:
- ~~~bash
- $ tree -F /mnt/
+ ~~~sh
+ $ tree -F /mnt
/mnt/
├── hdd_templates/
- ├── hddusb_bkpvzdumps/
- └── hostrun/
+ └── hddusb_bkpvzdumps/
3 directories, 0 files
~~~
- Don't mind the `hostrun` folder, it was already present at `mnt`. Just check if the folder structure is as expected.
-
-4. Now you can mount the LVs on their mount points. To do that, use the `mount` command.
+4. Now you can mount the LVs on their mount points. To do that, use the `mount` command:
- ~~~bash
+ ~~~sh
$ sudo mount /dev/mapper/hddint-hdd_templates /mnt/hdd_templates
$ sudo mount /dev/mapper/hddusb-hddusb_bkpvzdumps /mnt/hddusb_bkpvzdumps
~~~
The `mount` command won't output anything if it executes correctly.
- To verify that you see the LVs as mounted filesystems, use `df`.
+ To verify that you see the LVs as mounted filesystems, use `df`:
- ~~~bash
+ ~~~sh
$ df -h
Filesystem Size Used Avail Use% Mounted on
udev 3.8G 0 3.8G 0% /dev
- tmpfs 785M 1.1M 784M 1% /run
- /dev/mapper/pve-root 38G 3.9G 32G 11% /
- tmpfs 3.9G 31M 3.9G 1% /dev/shm
+ tmpfs 783M 1.4M 782M 1% /run
+ /dev/mapper/pve-root 50G 3.6G 44G 8% /
+ tmpfs 3.9G 34M 3.8G 1% /dev/shm
+ efivarfs 128K 101K 23K 82% /sys/firmware/efi/efivars
tmpfs 5.0M 0 5.0M 0% /run/lock
- /dev/sda2 511M 328K 511M 1% /boot/efi
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/systemd-journald.service
+ tmpfs 3.9G 0 3.9G 0% /tmp
/dev/fuse 128M 16K 128M 1% /etc/pve
- tmpfs 785M 0 785M 0% /run/user/1000
- /dev/mapper/hddint-hdd_templates 59G 24K 56G 1% /mnt/hdd_templates
- /dev/mapper/hddusb-hddusb_bkpvzdumps 511G 28K 485G 1% /mnt/hddusb_bkpvzdumps
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/getty@tty1.service
+ tmpfs 783M 4.0K 783M 1% /run/user/1000
+ /dev/mapper/hddint-hdd_templates 59G 2.1M 56G 1% /mnt/hdd_templates
+ /dev/mapper/hddusb-hddusb_bkpvzdumps 551G 2.1M 523G 1% /mnt/hddusb_bkpvzdumps
~~~
- You can see your newly mounted filesystems at the end of the list.
+ You can see your newly mounted filesystems at the bottom of the list.
-5. To make the previous mounting permanent, you need to edit the `/etc/fstab` file. First make a backup of it.
+5. To make the previous mounting permanent, you need to edit the `/etc/fstab` file. First make a backup of it:
- ~~~bash
- $ cd /etc
- $ sudo cp fstab fstab.orig
+ ~~~sh
+ $ sudo cp /etc/fstab /etc/fstab.orig
~~~
- Then **append** the following lines to the `fstab` file.
+ Then **append** the following lines to the `fstab` file:
- ~~~bash
+ ~~~sh
/dev/mapper/hddint-hdd_templates /mnt/hdd_templates ext4 defaults,nofail 0 0
/dev/mapper/hddusb-hddusb_bkpvzdumps /mnt/hddusb_bkpvzdumps ext4 defaults,nofail 0 0
~~~
-6. To verify that the mounting is truly working permanently, reboot your PVE system.
+6. To verify that the mounting is truly working permanently, reboot your PVE system:
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-7. Finally, verify with `df` that the mounting is still working.
+7. After the reboot, verify with `df` that the mounting is still working:
- ~~~bash
+ ~~~sh
$ df -h
Filesystem Size Used Avail Use% Mounted on
udev 3.8G 0 3.8G 0% /dev
- tmpfs 785M 1.1M 784M 1% /run
- /dev/mapper/pve-root 38G 3.9G 32G 11% /
- tmpfs 3.9G 13M 3.9G 1% /dev/shm
+ tmpfs 783M 1.2M 782M 1% /run
+ /dev/mapper/pve-root 50G 3.5G 44G 8% /
+ tmpfs 3.9G 16M 3.9G 1% /dev/shm
+ efivarfs 128K 102K 22K 83% /sys/firmware/efi/efivars
tmpfs 5.0M 0 5.0M 0% /run/lock
- /dev/mapper/hddusb-hddusb_bkpvzdumps 511G 28K 485G 1% /mnt/hddusb_bkpvzdumps
- /dev/mapper/hddint-hdd_templates 59G 24K 56G 1% /mnt/hdd_templates
- /dev/sda2 511M 328K 511M 1% /boot/efi
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/systemd-journald.service
+ tmpfs 3.9G 0 3.9G 0% /tmp
+ /dev/mapper/hddusb-hddusb_bkpvzdumps 551G 2.1M 523G 1% /mnt/hddusb_bkpvzdumps
+ /dev/mapper/hddint-hdd_templates 59G 2.1M 56G 1% /mnt/hdd_templates
/dev/fuse 128M 16K 128M 1% /etc/pve
- tmpfs 785M 0 785M 0% /run/user/1000
+ tmpfs 1.0M 0 1.0M 0% /run/credentials/getty@tty1.service
+ tmpfs 783M 4.0K 783M 1% /run/user/1000
~~~
- Now the lines for your new LVs filesystems are shown in a different order because they've been mounted by the system.
+ The lines for your new LVs filesystems are shown in a different order now because they have been mounted by the system at boot time.
-### _Enabling directories within Proxmox VE_
+### Enabling directories within Proxmox VE
-Each storage type supported by Proxmox VE can store only a limited range of content types. In particular, to enable Proxmox VE to make backups of VMs or containers, or to store ISO images, the only option we have for the limited setup used in this guide series is to use **directories**.
+Each storage type supported by Proxmox VE can store only a limited range of content types. In particular, to enable Proxmox VE to make backups of VMs or containers, or to store ISO images, the only option available for the limited setup used in this guide is to use **directories**.
A directory is just that, a path currently existing in your filesystem. In your standalone PVE node you already have one enabled, which you can see in the `Datacenter > Storage` section.
-
+
In this snapshot you can see the `local` directory highlighted. This directory is, in fact, the `root` directory of your Proxmox VE installation. It comes configured to support only three content types, although Proxmox VE can store more content types in a directory.
-#### **Setting up the directories**
+#### Setting up the directories
Now you'll enable as directories the two LVs you've just created and mounted before:
- The `hdd_templates` LV will hold ISO images, container templates and snippets.
- The `hddusb_bkpvzdumps` LV will store virtual machine dumps (`VZDump`).
-1. Get into the web console, open the `Datacenter > Storage` page and click on the `Add` button.
+1. Get into the web console, open the `Datacenter > Storage` page and click on the `Add` button:
- 
+ 
- You'll get the whole list of storage types supported by Proxmox VE, although in our small setup we're limited to use just the first three.
+ You will get the whole list of storage types supported by Proxmox VE, although in our small setup we're limited to use just the first four.
- > **NOTE**
- > Technically, you could also use NFS but, since its not in the scope of this guide series, I won't consider it an option for our build.
+ > [!NOTE]
+ > **This guide does not considers NFS as an storage option**\
+ > Technically, you could also use NFS but, since it's not under this guide's scope, I won't consider it an option for this homelab build.
-2. Click on `Directory` and you'll get to the window below.
+2. Click on `Directory` and you'll get to the window below:
- 
+ 
By default, it opens at its `General` tab, which has the following parameters:
- - `ID`: this is the name for the storage, to identify it within Proxmox VE.
- - `Directory`: the directory's path you want to enable here.
- - `Content`: this is a multichoice list in which you choose the content types you want to support on this storage.
- - `Nodes`: in a PVE cluster, this allows you to restrict on which nodes you want to have this storage available.
- - `Enable`: to enable or disable this storage.
- - `Shared`: in a PVE cluster, this allows to mark a storage as shared among the nodes.
- > **BEWARE!**
- > Not all storage types support this option (like `LVM-Thin`, for instance).
+ - `ID`\
+ This is the name for the storage, to identify it within Proxmox VE.
+
+ - `Directory`\
+ The directory's path you want to enable here.
+
+ - `Content`\
+ This is a multichoice list in which you choose the content types you want to support in the directory.
- The `Backup Retention` tab looks like below.
+ - `Nodes`\
+ In a PVE cluster, this allows you to restrict on which nodes you want to have this storage available.
- 
+ - `Enable`\
+ To enable or disable this storage, comes enabled by default.
- Here you can configure your backup retention policy applied in the directory. By default, the `Keep all backups` comes already checked, but you can uncheck it to define a concrete prune policy to clear old backups stored in this storage: you may just keep a number of recent backups with `Keep Last`, from restricted periods of time with the rest of `Keep` parameters, or define a more complex combination with all those parameters.
+ - `Shared`\
+ In a PVE cluster, this allows to indicate if a storage is already being shared among the nodes.
- > **BEWARE!**
- > In the official documentation, it's not clear if `Backup Retention` policies apply to all content types or just to some of them. In this guide I'll assume the former.
+ > [!IMPORTANT]
+ > **Not all storage types support this option**\
+ > Like `LVM-Thin`, for instance.
- You may also have noticed the `Advanced` checkbox in this window.
+ The `Backup Retention` tab looks like below:
- 
+ 
- This option allows you to specify how you want space preallocation to work with this storage unit. Since at the time of writing this there are no details whatsoever about this option in the official Proxmox VE documentation, I can't tell you more about it. If you know this, just choose the option you prefer, otherwise I'd stick with `Default`.
+ Here you can configure the backup retention policy you want to apply within the directory. By default, the `Keep all backups` comes already checked, but you can uncheck it to define a concrete prune policy to clear old backups stored in this storage. You may just keep a number of recent backups with `Keep Last`, from restricted periods of time with the rest of `Keep` parameters, or define a more complex combination with all those parameters.
-3. Let's enable the directory for the VZDumps (VMs backups).
+ The `Maximum Protected` field indicates the [maximum number of protected backups per guest (VMs or containers) are allowed on the storage](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#vzdump_protection). Protected backups are those that cannot be pruned from the storage by the backup retention policy.
- 
+ Finally, if you enable the `Advanced` checkbox, you will get some extra options under the `General` tab:
+
+ 
+
+ The `Preallocation` option allows you to specify which mode to use for space preallocation in this storage unit. Seems to affect only _raw_ and _qcow2_ images on file-based storages like the `directory` one. Just keep leave it as `Default` in your homelab. The other `Allow Snapshots as Volume-Chain` option is, in Proxmox VE 9.0 at least, still in preview so I'd recommend avoid using it unless you already know what you are dealing with. This is an option to "_Enable support for creating storage-vendor agnostic snapshot through volume backing-chains_".
+
+3. Let's enable the directory for the VMs backups:
+
+ 
Above, you can see that:
- - The `ID` is just a string, but it should be as descriptive as possible.
+ - The `ID` is just a string, `hddusb_bkpvzdumps` in this case, but it should be as descriptive as possible.
- - In `Directory` goes the **absolute** path of the folder already present in your PVE node.
+ - In `Directory` goes the **absolute** path of the folder already present in your PVE node, which here is `/mnt/hddusb_bkpvzdumps`.
- - In `Content` there's only one content type selected, the one related to VM dumps, while the `Disk image` that was marked by default has been unselected.
+ - In `Content` there is only one content type selected, the one related to backups, while the `Disk image` that was marked by default has been unselected.
+
+ > [!NOTE]
+ > **Proxmox VE backups are VZ dumps**\
+ > Although the list of content types no longer specifies it (as it was in previous major Proxmox VE versions), the `Backup` type are VZ dumps.
- No other option has been touched, not even in the `Backup Retention` tab.
-4. Click on `Add` and, after an instant, you should see your directory added to the list of available storages at your `Datacenter` level.
+4. Click on `Add` and, after a moment of processing, you should see your directory added to the list of available storages at your `Datacenter` level:
- 
+ 
5. Like you've just done for the `vzdumps` directory, do likewise for the `templates` directory:
- ID: `hdd_templates`, Directory: `/mnt/hdd_templates`, Content: `ISO image, Container template, Snippets`.
-6. After enabling both of them, your `Datacenter`'s storage list should look like below.
+6. After enabling both of them, your `Datacenter`'s storage list should look like below:
+
+ 
- 
+#### Disabling the `local` directory
-#### **Disabling the `local` directory**
+As you have already seen, Proxmox VE comes with one directory storage enabled by default, the `local` one. You can disable it as storage since:
-As you've already seen, Proxmox VE comes with one directory storage enabled by default, the `local` one. Since this is the `root` directory of your system, and with the new directory layout you cover the same things as with this one, you can disable it as storage.
+- This is the `root` directory of your system.
+- Your new directory layout covers the same things as with this one.
-1. Open the PVE web console and go to the `Datacenter > Storage` screen. There, choose the `local` directory and press on `Edit`.
+1. Open the PVE web console and go to the `Datacenter > Storage` screen. There, choose the `local` directory and press on `Edit`:
- 
+ 
- Also, notice how the `local` storage appears under your PVE node in the `Server View` tree (which you have to unfold first), at the page's left.
+ Also, notice how the `local` storage appears under your PVE node in the `Server View` tree (which you may have to unfold first), at the page's left.
-2. On the `Edit` window, just uncheck the `Enable` option and then click `OK`.
+2. On the `Edit` window, just uncheck the `Enable` option and then click `OK`:
- 
+ 
- Also, you could reduce the number of content types it supports, but you cannot leave the Content box empty. You must leave at least one type selected there.
+ Also, you could reduce the number of content types it supports, but you cannot leave the `Content` box empty. You must leave at least one type selected there.
-3. Now the `local` directory will show up with the `Enabled` column set as `No`.
+3. Now the `local` directory will show up with the `Enabled` column set as `No`:
- 
+ 
- Notice that the `local` storage is not present anymore at the tree list on the left.
+ Also notice that the `local` storage is not present anymore at the tree list on the left.
-> **BEWARE!**
-> The PVE web console won't allow you to `Remove` the `local` directory storage. If you try that, PVE will just reenable the storage and change its supported content types enabling **all of them**.
+> [!WARNING]
+> **The PVE web console will not allow you to `Remove` the `local` directory storage**\
+> If you try that, PVE will just reenable the storage and set as supported content types **all of them**.
-### _Enabling the thinpools within Proxmox VE_
+### Enabling the thinpools within Proxmox VE
-Here you're going to enable in your Proxmox VE datacenter all the thinpools you've created before.
+Here you're going to enable in your Proxmox VE datacenter all the thinpools you've created before:
-1. In the web console, go to the `Datacenter > Storage` page, click on `Add` and choose the `LVM-Thin` storage option.
+1. In the web console, go to the `Datacenter > Storage` page, click on `Add` and choose the `LVM-Thin` storage option:
- 
+ 
-2. You'll reach the window in the snapshot below.
+2. You'll reach the window for adding an LVM thinpool:
- 
+ 
- You'll notice some differences from the form you filled when you created the `directories`. There are two new parameters, but no `Shared` or `Advanced` options.
+ You'll notice some differences from the form you filled when you added the directories. There are two new parameters, but no `Shared` nor `Advanced` options.
- - `Volume group`: list where you must choose the VG in which the thinpool you want to enable resides.
- - `Thin Pool`: another list with the available thinpools in the chosen VG.
+ - `Volume group`\
+ List where you must choose the VG in which the thinpool you want to enable resides. Notice that the field is already filled with an automatically preselected value.
- If you click on the Backup Retention tab, you'll see that it's completely disabled with a warning: the LVM-Thin type **cannot** store Proxmox VE backups.
+ - `Thin Pool`\
+ Another list with the available thinpools in the chosen VG. Notice that the field is already filled with an automatically preselected value.
- 
+ If you click on the `Backup Retention` tab, you'll see that it is completely disabled with a warning meaning that **the LVM-Thin storage type cannot store Proxmox VE backups**.
-3. So, just fill the `General` tab for each thinpool as follows.
+ 
+
+3. So, just fill the `General` tab for each thinpool as follows:
- ID: `ssd_disks`, Volume group: `ssdint`, Thin Pool: `ssd_disks`, Content: `Disk image, Container`.
@@ -383,19 +442,25 @@ Here you're going to enable in your Proxmox VE datacenter all the thinpools you'
The form for the `ssd_disks` thinpool storage should look like as below.
- 
+ 
After filling it just click on `Add`.
-4. The new thinpool storages will appear both in the storage list and in the tree list on the left of your PVE web console. Since this view orders by `ID`, reorder by `Type` to see them better.
+4. The new thinpool storages will appear both in the storage list and in the tree list on the left of your PVE web console. Since this view orders with the `ID` field by default, reorder by `Type` to see them listed together:
- 
+ 
## Configuration file
-The storage configuration at the `Datacenter` level is saved by Proxmox VE in the file `/etc/pve/storage.cfg`. After applying all the previous changes to your system, your `storage.cfg` will look like the following.
+The storage configuration at the `Datacenter` level is saved by Proxmox VE in the file `/etc/pve/storage.cfg`. After applying all the previous changes to your system, your `storage.cfg` should look like this:
+
+~~~sh
+dir: local
+ disable
+ path /var/lib/vz
+ content iso,backup,vztmpl
+ shared 0
-~~~bash
dir: hddusb_bkpvzdumps
path /mnt/hddusb_bkpvzdumps
content backup
@@ -404,21 +469,14 @@ dir: hddusb_bkpvzdumps
dir: hdd_templates
path /mnt/hdd_templates
- content iso,snippets,vztmpl
- prune-backups keep-all=1
- shared 0
-
-dir: local
- disable
- path /var/lib/vz
- content vztmpl,iso,backup
+ content iso,vztmpl,snippets
prune-backups keep-all=1
shared 0
lvmthin: ssd_disks
thinpool ssd_disks
vgname ssdint
- content rootdir,images
+ content images,rootdir
lvmthin: hdd_data
thinpool hdd_data
@@ -431,35 +489,43 @@ lvmthin: hddusb_bkpdata
content rootdir,images
~~~
-You can also modify this file manually to adjust your `Datacenter` tier's storage configuration.
+> [!NOTE]
+> The ordering of the storage blocks within the `storage.cfg` file may be different in your system.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/pve`
-### _Files_
+### Files
- `/etc/pve/storage.cfg`
## References
-### _About LVM (Logical Volume Manager)_
+### About Logical Volume Manager (LVM)
+
+- [Red Hat Enterprise Linux 7. Logical Volume Manager Administration](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/logical_volume_manager_administration/index)
+- [DigitalOcean. An Introduction to LVM Concepts, Terminology, and Operations](https://www.digitalocean.com/community/tutorials/an-introduction-to-lvm-concepts-terminology-and-operations)
+- [Stack Overflow. LVM Thinpool - How to resize a thinpool LV?](https://stackoverflow.com/questions/40553483/lvm-thinpool-how-to-resize-a-thinpool-lv)
+- [TecMint. Setup Thin Provisioning Volumes in Logical Volume Management (LVM) – Part IV](https://www.tecmint.com/setup-thin-provisioning-volumes-in-lvm/)
+- [WARNING: Pool zeroing and 1.00 MiB large chunk size slows down thin provisioning?](https://www.linuxquestions.org/questions/linux-newbie-8/warning-pool-zeroing-and-1-00-mib-large-chunk-size-slows-down-thin-provisioning-4175638074/)
+
+### About Proxmox VE storage configuration
+
+- [Proxmox VE. Wiki. Storage](https://pve.proxmox.com/wiki/Storage)
+
+- [Proxmox. Forum. thin lvm and zeroing/chunk size](https://forum.proxmox.com/threads/thin-lvm-and-zeroing-chunk-size.56262/)
-- [Logical Volume Manager Administration](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/logical_volume_manager_administration/index)
-- [An Introduction to LVM Concepts, Terminology, and Operations](https://www.digitalocean.com/community/tutorials/an-introduction-to-lvm-concepts-terminology-and-operations)
-- [LVM Thinpool - How to resize a thinpool LV?](https://stackoverflow.com/questions/40553483/lvm-thinpool-how-to-resize-a-thinpool-lv)
-- [Setup Thin Provisioning Volumes in Logical Volume Management (LVM) – Part IV](https://www.tecmint.com/setup-thin-provisioning-volumes-in-lvm/)
+- [Proxmox VE Administration Guide](https://pve.proxmox.com/pve-docs/pve-admin-guide.html)
+ - [Host System Administration. Logical Volume Manager (LVM)](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_lvm)
+ - [Proxmox VE Storage](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_storage)
+ - [Backup and Restore. Backup Protection](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#vzdump_protection)
-### _About Proxmox VE storage configuration_
+- [Proxmox configuration overview](https://imgur.com/a/promox-configuration-overview-OFHyxiU)
-- [Proxmox VE wiki. Storage](https://pve.proxmox.com/wiki/Storage)
-- [PVE admin guide. Logical Volume Manager (LVM)](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_lvm)
-- [PVE admin guide. Storage](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_storage)
-- [Proxmox configuration overview](https://imgur.com/a/OFHyxiU)
-- [How to add storage to Proxmox](https://nubcakes.net/index.php/2019/03/05/how-to-add-storage-to-proxmox/)
-- [Advice on storage structure](https://www.reddit.com/r/Proxmox/comments/bdss9x/advice_on_storage_structure/el0nxl5/)
+- [Reddit. Proxmox. Advice on storage structure](https://www.reddit.com/r/Proxmox/comments/bdss9x/advice_on_storage_structure/el0nxl5/)
## Navigation
diff --git a/G020 - K3s cluster setup 03 ~ Debian VM creation.md b/G020 - K3s cluster setup 03 ~ Debian VM creation.md
index 6027b26..d126101 100644
--- a/G020 - K3s cluster setup 03 ~ Debian VM creation.md
+++ b/G020 - K3s cluster setup 03 ~ Debian VM creation.md
@@ -1,172 +1,226 @@
# G020 - K3s cluster setup 03 ~ Debian VM creation
-Your Proxmox VE system is now configured well enough for you to start creating in it the virtual machines you require. In this guide, I'll show you how to create a rather generic VM with Debian. This Debian VM will be the base over which you'll build, in the following guides, a more specialized VM template for your K3s cluster's nodes.
+- [You can start creating VMs in your Proxmox VE server](#you-can-start-creating-vms-in-your-proxmox-ve-server)
+- [Preparing the Debian ISO image](#preparing-the-debian-iso-image)
+ - [Obtaining the latest stable Debian ISO image](#obtaining-the-latest-stable-debian-iso-image)
+ - [Storing the ISO image in the Proxmox VE platform](#storing-the-iso-image-in-the-proxmox-ve-platform)
+- [Building a Debian virtual machine](#building-a-debian-virtual-machine)
+ - [Setting up a new virtual machine](#setting-up-a-new-virtual-machine)
+ - [Adding an extra network device to the new VM](#adding-an-extra-network-device-to-the-new-vm)
+ - [Installing Debian on the new VM](#installing-debian-on-the-new-vm)
+- [Note about the VM's `Boot Order` option](#note-about-the-vms-boot-order-option)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories](#directories)
+ - [Files](#files)
+- [References](#references)
+ - [Debian](#debian)
+ - [Virtual Machines on Proxmox VE](#virtual-machines-on-proxmox-ve)
+ - [Units of measurement for storage](#units-of-measurement-for-storage)
+- [Navigation](#navigation)
+
+## You can start creating VMs in your Proxmox VE server
+
+Your Proxmox VE system is now configured well enough for you to start creating the virtual machines you require in it. This chapter will show you how to create a rather generic VM with Debian. This Debian VM will be the base over which you'll build, in the following chapters, a more specialized VM template for your K3s cluster's nodes.
## Preparing the Debian ISO image
-### _Obtaining the latest Debian ISO image_
+Since the operative system chosen to build the VMs is Debian Linux, first you need to get its ISO and store it in your Proxmox VE server.
-First, you'll need to download the Debian ISO image. At the time of writing this, the latest version is [**11.1.0 Bullseye**](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-11.1.0-amd64-netinst.iso).
+### Obtaining the latest stable Debian ISO image
-You'll find the _net install_ version of the ISO for the latest Debian version right at the [Debian project's webpage](https://www.debian.org/).
+At the time of writing this, the latest stable version of Debian is [**13.0.0 "trixie"**](https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-13.0.0-amd64-netinst.iso).
-
+You can find the _net install_ version of the ISO for the latest Debian version right at the [Debian project's webpage](https://www.debian.org/):
-After clicking on that big `Download` button your browser should start downloading the `debian-11.1.0-amd64-netinst.iso` file automatically.
+
-### _Storing the ISO image in the Proxmox VE platform_
+After clicking on that big `Download` button, your browser should start downloading the `debian-13.0.0-amd64-netinst.iso` file.
-Proxmox VE needs that ISO image saved in the proper storage space it has to be able to use it. This means that you need to upload the `debian-11.1.0-amd64-netinst.iso` file to the storage you configured to hold such files, the one called `hdd_templates`.
+### Storing the ISO image in the Proxmox VE platform
-1. Open the web console and unfold the tree of available storages under your `pve` node. Click on `hdd_templates` and you'll reach its `Summary` page.
+Proxmox VE needs that ISO image saved in the proper storage space it has available to be able to use it. Therefore, you need to upload the `debian-13.0.0-amd64-netinst.iso` file to the storage space you configured to hold such files, the one called `hdd_templates`:
- 
+1. Open the web console and unfold the tree of available storages under your `pve` node. Click on `hdd_templates` to reach its `Summary` page:
-2. Click on the `ISO Images` tab, and you'll see the page of available ISO images.
+ 
- 
+2. Click on the `ISO Images` tab, and you'll see the page of available ISO images:
+
+ 
You'll find it empty at this point.
-3. Click on the `Upload` button to raise the dialog below.
+3. Click on the `Upload` button to raise the dialog below:
+
+ 
+
+ > [!NOTE]
+ > **Ensure having enough free storage available in your Proxmox VE's root filesystem before uploading files**\
+ > Notice the warning about where Proxmox VE temporarily stores the file you upload before moving it to its definitive place. The `/var/tmp/` path lays in the root filesystem of your PVE server, so be sure of having enough room in it or the upload will fail.
- 
+4. Click on `Select File`, find and select your `debian-13.0.0-amd64-netinst.iso` file in your computer, then click on `Upload`. The same dialog will show the upload progress:
-4. Click on `Select File`, find and select your `debian-11.1.0-amd64-netinst.iso` file in your computer, then click on `Upload`. The same dialog will show the upload progress.
+ 
- 
+ When the upload is finished, Proxmox VE will show you another dialog with the result of the task that moves the ISO file from the `/var/tmp/` PVE system path to the `hdd_templates` storage:
-5. After the upload is completed, the upload dialog will close on its own and you'll be back to the `ISO Images` page. Refresh the page, then you'll see the list now updated with your newly uploaded ISO image.
+ 
- 
+5. Close the Task viewer dialog to return to the `ISO Images` page. You will see that the list now shows your newly uploaded Debian ISO image:
- Notice how the ISO is only identified by its file name. Sometimes ISOs don't have detailed names like the one for the Debian distribution, so be sure of giving the ISOs you upload meaningful and unique names so you can tell them apart.
+ 
-6. Finally, you can check out in the `hdd_templates` storage's `Summary` how much space you have left (`Usage` field).
+ Notice how the ISO is only identified by its file name. Sometimes ISOs don't have detailed names like the one for the Debian distribution, so be sure of giving the ISOs you upload meaningful and unique names to tell them apart.
- 
+6. Finally, you can check out in the `hdd_templates` storage's `Summary` how much space you have left (`Usage` field):
-Now you only have one ISO image but, over time, you may accumulate a number of them. So be always mindful of the free space you have left, and remove old images or container templates you're not using anymore.
+ 
+
+Now you only have one ISO image but, over time, you may accumulate a number of them. Be mindful of the free space you have left, and prune old images or container templates you are not using anymore.
## Building a Debian virtual machine
In this section you'll see how to create a basic and lean Debian VM, then how to turn it into a VM template.
-### _Setting up a new virtual machine_
+### Setting up a new virtual machine
-First, you need to configure the VM itself.
+First, you need to create and configure a new VM:
-1. Click on the `Create VM` button found at the web console's top right.
+1. Click on the `Create VM` button found at the web console's top right:
- 
+ 
-2. You'll see the `Create: Virtual Machine` window, opened at the `General` tab.
+2. You'll see the `Create: Virtual Machine` window, opened at the `General` tab:
- 
+ 
From it, only worry about the following parameters:
- - `Node`: in a cluster with several nodes you would need to choose where to put this VM. Since you only have one standalone node, just leave the default value.
+ - `Node`\
+ In a cluster with several nodes you would need to choose where to put this VM. Since you only have one standalone node, just leave the default `pve` value.
+
+ - `VM ID`\
+ A numerical identifier for the VM.
- - `VM ID`: a numerical identifier for the VM.
- > **BEWARE!**
- > Proxmox VE doesn't allow IDs lower than 100.
+ > [!NOTE]
+ > Proxmox VE does not allow IDs lower than `100`.
- - `Name`: this must be a valid DNS name, like `debiantpl` or something longer such as `debiantpl.your.pve.domain`.
- > **BEWARE!**
- > The official Proxmox VE says that this name is `a free form text string you can use to describe the VM`, which contradicts what the web console actually validates as correct.
+ - `Name`\
+ This field must be a valid DNS name, like `debiantpl` or something longer such as `debiantpl.homelab.cloud`.
- - `Resource Pool`: here you can indicate to which pool of resources (you have none defined at this point) you want to make this VM a member of.
+ > [!NOTE]
+ > The official Proxmox VE says that this name is `a free form text string you can use to describe the VM`, which contradicts what the web console actually validates as correct.
- The only value you need to set here is the name, which in this case could be `debiantpl` (for Debian Template).
+ - `Resource Pool`\
+ Here you can indicate to which pool of resources (you have none defined at this point) you want to make this VM a member of.
- 
+ The only value you really need to set here is the name, which in this case it could be `debiantpl` (for Debian Template).
-3. Click on the `Next` button and you'll reach the `OS` tab.
+ 
- 
+3. Click on the `Next` button and you'll reach the `OS` tab:
+
+ 
In this form, you only have to choose the Debian ISO image you uploaded before. The `Guest OS` options are already properly set up for the kind of OS (a Linux distribution) you're going to install in this VM.
- So, be sure of having the `Use CD/DVD disc image file` option enabled, so you can select the proper `Storage` (you should only see here the `hdd_templates` one) and `ISO image` (you just have one ISO right now).
+ Therefore, be sure of having the `Use CD/DVD disc image file` option enabled, then select the proper `Storage` (you should only see here the `hdd_templates` one) and `ISO image` (you just have one ISO right now).
- 
+ 
-4. The next tab you should go to is `System`.
+4. The next tab you should go to is `System`:
- 
+ 
- Here, only tick the `Qemu Agent` box and leave the rest with their default values.
+ Here, only tick the `Qemu Agent` checkbox and leave the rest with their default values.
- 
+ 
- The `Qemu Agent` option _"lets Proxmox VE know that it can use its features to show some more information, and complete some actions (for example, shutdown or snapshots) more intelligently"_.
+ The QEMU agent _"lets Proxmox VE know that it can use its features to show some more information, and complete some actions (for example, shutdown or snapshots) more intelligently"_.
-5. Hit on `Next` to reach the `Disk` tab.
+5. Hit on `Next` to advance to the `Disks` tab:
- 
+ 
- In this step you have a form in which you can add several storage drives to your VM, but there are certain parameters that you need to see to create a virtual SSD drive. So, enable the `Advanced` checkbox at the bottom of this window and you'll see some extra parameters which I've highlighted in the next snapshot.
+ In this step you have a form in which you can add several storage drives to your VM, but there are certain parameters that you need to see to create a virtual SSD drive. So, enable the `Advanced` checkbox at the bottom of this window and you'll get some extra parameters which I've highlighted in the next snapshot:
- > **BEWARE!**
- > The `Advanced` checkbox affects all the steps of this wizard, but not all of those steps have advanced parameters to show.
+ > [!NOTE]
+ > Although the `Advanced` checkbox appears in all the steps of this wizard, not all of those steps have advanced parameters to offer.
- 
+ 
From the many parameters showing now in this form, just pay attention to the following ones:
- - `Storage`: here you must choose on which storage you will place the disk image of this VM. At this point, in the list you'll only see the thinpools you created in the [**G019** guide](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md).
+ - `Storage`\
+ Here you must choose on which storage you will place the disk image of this VM. At this point, in the list you'll only see the thinpools you created in the [chapter **G019**](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md).
+
+ - `Disk size (GiB)`\
+ How big you want the main disk for this VM, [in gibibytes](https://en.wikipedia.org/wiki/Gigabyte#Base_2_(binary)).
+
+ - `Discard`\
+ Since this drive will be put on a thin provisioned storage, you can enable this option to make this drive's image get shrunk when space is marked as freed after removing files within the VM.
- - `Disk size (GiB)`: how big you want the main disk for this VM, in gigabytes.
+ - `SSD emulation`\
+ If your storage is on an SSD drive, (like the `ssd_disks` thinpool), you can turn this on.
- - `Discard`: since this drive will be put on a thin provisioned storage, you can enable this option so this drive's image get's shrunk when space is marked as freed after removing files within the VM.
+ - `IO thread`\
+ With this option enabled, IO activity will have its own CPU thread in the VM, which could improve the VM's performance and also sets the `SCSI Controller` of the drive to _VirtIO SCSI single_.
- - `SSD emulation`: if your storage is on an SSD drive, (like the `ssd_disks` thinpool), you can turn this on.
+ - `Async IO`\
+ The official documentation does not explain this value, but it can be summarized as the way to choose which method to use for asynchronous IO.
- - `IO thread`: with this option enabled, IO activity will have its own CPU thread in the VM, which could improve the VM's performance and also will change the `SCSI Controller` of the drive to _VirtIO SCSI single_.
+ > [!NOTE]
+ > [Know more about the async IO property in this Proxmox Forum thread](https://forum.proxmox.com/threads/async-io-settings-for-virtual-disks-documentation-how-to-set.114932/).
- > **BEWARE!**
- > The `Bandwidth` tab allows you to adjust the read/write capabilities of this drive, so only adjust them if you're really sure about how to set them up.
+ > [!IMPORTANT]
+ > **The `Bandwidth` tab allows you to adjust the read/write capabilities of the storage drive**\
+ > Only adjust the bandwidth options if you are really sure about how to set them up.
- Knowing all that, choose the `ssd_disks` thinpool as `Storage`, put a small number as `Disk Size` (such as 10 GiB), and enable the `Discard`, `SSD emulation` and `IO thread` options.
+ Knowing all this, choose the `ssd_disks` thinpool as `Storage`, put a small number as `Disk Size` (such as 10 GiB), and ensure to enable the `Discard` and `SSD emulation` options. Leave the `IO thread` option enabled (as it is by default), and do not change the default value already set in the `Async IO` parameter. Do not change any of the remaining parameters in this dialog.
- 
+ 
- This way, you've configured the `scsi0` drive you see listed in the column at the window's left. If you wanted to add more drives, click on the `Add` button and a new drive will be added with default values.
+ This way, you've configured the `scsi0` drive you see listed in the column at the window's left. If you want to add more drives, click on the `Add` button and a new drive will be added with default values.
-6. The next tab to fill is `CPU`. Since you have enabled the `Advanced` checkbox, you'll see the advanced parameters of this and following steps right away.
+6. The next tab to fill is `CPU`. Since you have enabled the `Advanced` checkbox in the previous `Disks` tab, you'll see the advanced parameters of this and following steps right away:
- 
+ 
The parameters on this tab are very dependant on the real capabilities of your host's CPU. The main parameters you have to care about at this point are:
- - `Sockets`: a way of saying how many CPUs you want to assign to this VM. Just leave it with the default value (`1`).
+ - `Sockets`\
+ A way of saying how many CPUs you want to assign to this VM. Just leave it with the default value (`1`).
- - `Cores`: how many cores you want to give to this VM. When unsure on how many to assign, just put `2` here.
- > **BEWARE!**
- > Never put here a number greater than the real cores count in your CPU, or Proxmox VE won't start your VM!
+ - `Cores`\
+ How many cores you want to give to this VM. When unsure on how many to assign, just put `2` here.
- - `Type`: this indicates the type of CPU you want to emulate, and the closer it is to the real CPU running your system the better. There's a type `host` which'll make the VM's CPU have exactly the same flags as the real CPU running your Proxmox VE platform, but VMs with such CPU type will only run on CPUs that include the same flags. So, if you migrated such VM to a new Proxmox VE platform that runs on a CPU lacking certain flags expected by the VM, the VM won't run there.
+ > [!IMPORTANT]
+ > Never put here a number greater than the real cores count in your CPU, or Proxmox VE won't start your VM!
- - `Enable NUMA`: if your host supports [**NUMA**](https://en.wikipedia.org/wiki/Non-uniform_memory_access), enable this option.
+ - `Type`\
+ This indicates the type of CPU you want to emulate, and the closer it is to the real CPU running your system the better. There's a type `host` which'll make the VM's CPU have exactly the same flags as the real CPU running your Proxmox VE platform, but VMs with that `host` CPU type will only run on CPUs that include the same flags. So, if you migrated such VM to a new Proxmox VE platform that runs on a CPU lacking certain flags expected by the VM, the VM won't run there.
- - `Extra CPU Flags`: flags to enable special CPU options on your VM. Only enable the ones actually available in the CPU `Type` you chose, otherwise the VM **won't run**. If you choose the type `Host`, you can see the flags available in your real CPU in the `/proc/cpuinfo` file within your Proxmox VE host.
+ - `Enable NUMA`\
+ If your host supports [**NUMA**](https://en.wikipedia.org/wiki/Non-uniform_memory_access), enable this option.
- ~~~bash
+ - `Extra CPU Flags`\
+ Flags to enable special CPU options on your VM. Only enable the ones actually available in the CPU `Type` you chose, otherwise the VM **won't run**. If you choose the type `Host`, you can see the flags available in your real CPU in the `/proc/cpuinfo` file within your Proxmox VE host.
+
+ ~~~sh
$ less /proc/cpuinfo
~~~
- This file lists the specifications of each core on your CPU. For instance, the first core (called `processor` in the file) on my Intel Pentium J2900 is detailed as follows.
+ This file lists the specifications of each core on your CPU. For instance, the first core (called `processor` in the file) on the host where I'm running this guide's whole Proxmox VE setup is detailed as follows:
~~~properties
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 55
- model name : Intel(R) Pentium(R) CPU J2900 @ 2.41GHz
+ model name : Intel(R) Celeron(R) CPU J1900 @ 1.99GHz
stepping : 8
microcode : 0x838
- cpu MHz : 1440.392
+ cpu MHz : 2211.488
cache size : 1024 KB
physical id : 0
siblings : 4
@@ -178,397 +232,418 @@ First, you need to configure the VM itself.
fpu_exception : yes
cpuid level : 11
wp : yes
- flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid tsc_adjust smep erms dtherm ida arat md_clear
+ flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow flexpriority ept vpid tsc_adjust smep erms dtherm ida arat vnmi md_clear
vmx flags : vnmi preemption_timer invvpid ept_x_only flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest
- bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
- bogomips : 4833.33
+ bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only mmio_unknown
+ bogomips : 4000.00
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
~~~
- The flags available in the CPU are listed on the `flags` line, and below you can see also the `bugs` list.
+ The flags available in the CPU are listed on the `flags`, `vmx flags` and `bugs` lists.
+
+ Being aware of the capabilities of the real CPU used to run the Proxmox VE VM used in this guide, this form is filled like this:
- Being aware of the capabilities of the real CPU used in this guide series, this form could be filled like shown below.
+ 
- 
+7. The next tab is `Memory`, with its advanced parameters already shown:
-7. The next tab is `Memory`, also with advanced parameters shown.
+ 
- 
+ The parameters to set in this dialog are:
- The parameters to set here are:
+ - `Memory (MiB)`\
+ The maximum amount of RAM this VM will be allowed to use.
- - `Memory (MiB)`: the maximum amount of RAM this VM will be allowed to use.
- - `Minimum memory (MiB)`: the minimum amount of RAM Proxmox VE must guarantee for this VM.
+ - `Minimum memory (MiB)`\
+ The minimum amount of RAM Proxmox VE must guarantee for this VM.
If the `Minimum memory` is a different (thus lower) value than the `Memory` one, Proxmox VE will use `Automatic Memory Allocation` to dynamically balance the use of the host RAM among the VMs you may have in your system, which you can also configure with the `Shares` attribute in this form. Better check the documentation to understand [how Proxmox VE handles the VMs' memory](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_memory).
- 
+ 
- With the arrangement above, the VM will start with 1 GiB and should only be able to take as much as 1.5 GiB from the host's available RAM.
+ With the arrangement above, the VM will start with 1 GiB and will only be able to take as much as 2 GiB from the host's available RAM.
-8. The next tab to go to is `Network`, which shows too its advanced parameters.
+8. The next tab to go to is `Network`, which also has advanced parameters:
- 
+ 
- Notice how the `Bridge` parameter is set by default with the `vmbr0` Linux bridge. We'll come back to this parameter later but, for now, you don't need to configure anything in this step, the default values are correct for this VM.
+ Notice how the `Bridge` parameter is set by default with the `vmbr0` Linux bridge. You will come back to this parameter later but, for now, you do not need to configure anything in this step, the default values are correct for this Debian template VM.
-9. The last tab you'll reach is `Confirm`.
+9. The last tab you'll reach is `Confirm`:
- 
+ 
Here you'll be able to give a final look to the configuration you've assigned to your new VM before you create it. If you want to readjust something, just click on the proper tab or press `Back` to reach the step you want to change.
- Also notice the `Start after created` check. If enabled, it'll make Proxmox VE boot up your new VM right after its creation.
+ Also notice the `Start after created` check. Do NOT enable it, since it'll make Proxmox VE boot up your new VM right after its creation, something you don't want at this point.
-10. Click on `Finish` and the creation should proceed. Check the `Tasks` log at the bottom of your web console to see its progress.
+10. Click on `Finish` and the creation should proceed. Check the `Tasks` log at the bottom of your web console to see its progress:
- 
+ 
- Notice how the new VM appear in the `Server View` tree of your PVE node. Click on it to see its `Summary` view, as you can also see in the capture above.
+ Notice how the new VM appears in the `Server View` tree of your PVE node. Click on it to see its `Summary` view as shown in the capture above.
-The configuration file for the VM is stored at `/etc/pve/nodes/pve/qemu-server` (notice that **it's relative to the `pve` node**) as `[VM ID].cfg`. So, for this new VM that has the VM ID `100`, the file is `/etc/pve/nodes/pve/qemu-server/100.cfg` and looks like below.
+The configuration file for the new VM is stored at `/etc/pve/nodes/pve/qemu-server` (notice that **it is related to the `pve` node**) as `[VM ID].conf`. So, for this new VM that has the VM ID `100`, the file is `/etc/pve/nodes/pve/qemu-server/100.conf`:
~~~properties
agent: 1
balloon: 1024
boot: order=scsi0;ide2;net0
cores: 2
-cpu: host,flags=+md-clear;+spec-ctrl
-ide2: hdd_templates:iso/debian-11.1.0-amd64-netinst.iso,media=cdrom
-memory: 1512
-meta: creation-qemu=6.1.0,ctime=1637241576
+cpu: host
+ide2: hdd_templates:iso/debian-13.0.0-amd64-netinst.iso,media=cdrom,size=754M
+memory: 2048
+meta: creation-qemu=10.0.2,ctime=1756890694
name: debiantpl
-net0: virtio=CE:AE:AB:C6:8B:7F,bridge=vmbr0,firewall=1
+net0: virtio=BC:24:11:3E:B9:39,bridge=vmbr0,firewall=1
numa: 0
ostype: l26
scsi0: ssd_disks:vm-100-disk-0,discard=on,iothread=1,size=10G,ssd=1
scsihw: virtio-scsi-single
-smbios1: uuid=88136df8-90b0-4dc5-9ae7-2b302dd559c9
+smbios1: uuid=8fce9b8d-d716-4e7b-9817-3d727b40eb9f
sockets: 1
-vmgenid: 4e4b10c7-f323-48d8-87c7-6a2c4fdc787c
+vmgenid: 08b95c71-2feb-4338-8970-c3cfba8a6e94
~~~
-### _Adding an extra network device to the new VM_
+### Adding an extra network device to the new VM
-Proxmox VE doesn't allow you to assign, in the VM creation wizard, more than one device of any kind. So, if you want to have an extra network device in your VM, you have to do it **after** you've created the VM in Proxmox VE. And why the extra network device? To allow your future K3s cluster's nodes to communicate directly with each other through the other Linux bridge you already created in the [**G017** guide](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md).
+In the VM creation wizard, Proxmox VE does not allow you to configure more than one network device. To add an extra network device in your VM, **you have to do it after you've created the VM in Proxmox VE**. And why the extra network device? To allow your future K3s cluster's nodes to communicate directly with each other through the other Linux bridge you already created in the [**G017** chapter](G017%20-%20Virtual%20Networking%20~%20Network%20configuration.md). Let's add that extra network device to your new Debian 12 template VM.
-So, let's add that extra network device to your new VM.
+1. Go to the `Hardware` tab of your new VM:
-1. Go to the `Hardware` tab of your new VM.
+ 
- 
+2. Click on `Add` to see the list of devices you can aggregate to the VM, then choose `Network Device`:
-2. Click on `Add` to see the list of devices you can aggregate to the VM, then choose `Network Device`.
+ 
- 
+3. You'll see that the raised window is exactly the same as the `Network` tab you saw while creating the VM:
-3. You'll see that the raised window is exactly the same as the `Network` tab you saw while creating the VM.
+ 
- 
+ Notice that the `Advanced` options are also enabled, probably because the Proxmox VE web administration remembers that check as enabled from the creation wizard. Here you have to adjust only two parameters:
- You have to adjust two fields:
+ - `Bridge`\
+ You must set the `vmbr1` bridge.
- - `Bridge` parameter: here you must set the `vmbr1` bridge.
+ - `Firewall`\
+ Since this network device is strictly meant for internal networking, you won't need the firewall active here. Disable this checkbox.
- - `Firewall` checkbox: since this network device is strictly meant for internal networking, you won't need the firewall active here. Disable this checkbox.
+ 
- 
+4. Click on `Add` and you'll see that the new network device appears immediately as `net1` at the end of your VM's hardware list:
-4. Click on `Add` and you'll see that the new network device appears immediately as `net1` at the end of your VM's hardware list.
+ 
- 
+### Installing Debian on the new VM
-### _Installing Debian on the new VM_
+At this point, your Debian template VM has the minimal virtual hardware setup you need for installing Debian in it.
-At this point, your new VM has the minimal virtual hardware setup you need for installing Debian in it.
+1. Go back to the `Summary` tab of your new VM so you can see its status and performance statistics, then press the `Start` button to start the VM up:
-1. Go back to the `Summary` tab of your new VM and press the `Start` button to start the VM up.
+ 
- 
+ Right after starting the VM, click on the `>_ Console` button to raise a `noVNC` shell window.
- Right after it, click on the `>_ Console` button to raise a `noVNC` shell window.
+2. In the `noVCN` shell window, you should end up seeing the Debian installer boot menu:
-2. In the `noVCN` shell window you should see the following screen.
+ 
- 
+3. In this menu, **you must choose the `Install` option** to run the installation of Debian in text mode:
-3. In this menu, you **must** choose the `Install` option to run the installation of Debian in text mode.
+ 
- 
+ It will take a few seconds to load the next step's screen.
-4. The next screen will ask you about what language you want to use in the installation process and apply to the installed system.
+4. The next screen asks you about what language you want to use in the installation process and apply to the installed system:
- 
+ 
Just choose whatever suits you and press `Enter`.
-5. The following step is about your geographical location. This will determine your VM's timezone and locale.
+5. The following step is about your geographical location. This will determine your VM's timezone and locale:
- 
+ 
Again, highlight whatever suits you and press `Enter`.
-6. If you choose an unexpected combination of language and location, the installer will ask you about what locale to apply on your system.
+6. If you choose an unexpected combination of language and location, the installer will ask you about what locale to apply on your system:
- 
+ 
- Oddly enough, it only shows the options you see in the screenshot above so, in case of doubt, just stick with the default `United States - en_US.UTF-8` option.
+ Oddly enough, it only offers the options shown in the screenshot above. In case of doubt, just stick with the default `United States - en_US.UTF-8` option.
-7. Next, you'll have to choose the keyboard configuration that suits you better.
+7. Next, you'll have to choose your preferred keyboard configuration:
- 
+ 
-8. At this point it will show some progress bars while the installer retrieves components and scans the VM hardware.
+8. At this point it will show some progress bars while the installer retrieves additional components and scans the VM's hardware:
- 
+ 
-9. After some seconds you'll end seeing the following screen about the network configuration.
+9. After a few seconds you'll reach the screen about configuring the network:
- 
+ 
- Since this VM has two virtual Ethernet network cards, the installer must know which one to use as the primary network device. Leave the default option (the card with the lowest `ens##` number) and press `Enter`.
+ Since this VM has two virtual Ethernet network cards, the installer must know which one to use as the primary network device. Leave the default option (the card with the lowest `ens##` number, like the `ens18` in the snapshot) and press `Enter`.
-10. Next, you'll see how the installer tries to setup the network in a few progress bars.
+10. Next, you'll see how the installer tries to setup the network in a few progress bars:
- 
+ 
-11. If the previous network autosetup process is successful, you'll end up in the following screen.
+11. If the previous network autosetup process is successful, you'll end up in the following screen:
- 
+ 
- In the text box, type in the `hostname` for this system, bearing in mind that this VM will become just a template to build others. Preferably, input the same name you used previously in the creation of the VM, which in this guide was `debiantpl`.
+ In the text box, type in the `hostname` for this system, bearing in mind that this VM will become just a template to build others. Preferably, input the same name you used previously in the creation of the VM, which in this guide is `debiantpl`.
-12. In the next step you'll have to specify a domain name.
+12. In the next step you'll have to specify a domain name:
- 
+ 
- Here use the same one you set for your Proxmox VE system.
+ Here use the same one you set for your Proxmox VE system, which in this guide is `homelab.cloud`.
-13. The following screen is about setting up the password for the `root` user.
+13. The following screen is about setting up the password for the `root` user:
- 
+ 
- Since this VM is going to be just a template, there's no need for you to type a difficult or long password.
+ Since this VM is going to be just a template, there's no need here for you to type a difficult or long password.
-14. The installer will ask you to confirm the `root` password.
+14. The installer will ask you to confirm the `root` password:
- 
+ 
-15. The next step is about creating a new user that you should use instead of the `root` one.
+15. The next step is about creating a new user that you should use instead of the `root` one:
- 
+ 
- In this screen you are expected to type the new user's full name, but since this is going to be your administrative one, input something more generic like `System Manager User` for instance.
+ In this screen you are expected to type the new user's full name, but since this is going to be your administrative one, input something more generic like `System Manager` for instance.
-16. The following screen is about typing a username for the new user.
+16. The following screen is about typing a username for the new user:
- 
+ 
- By default, the installer will take the first word you set in the full name and use it (in lowercase) as username. In this guide, this user will be called `mgrsys`, following the same criteria used for creating the alternative manager user for the Proxmox VE host.
+ By default, the installer will take the first word you set as the user's full name and use it (in lowercase) as username. In this guide, this user will be called `mgrsys`, following the same criteria used for creating the alternative manager user for the Proxmox VE host.
-17. On this step you input the password for this new user. Again, since this VM is going to be a template, don't put a complex password here.
+17. On this step you input the password for this new administrative user. Again, since this VM is going to be just a template, do not enter a complex password here:
- 
+ 
-18. You'll have to confirm the new user's password.
+18. You'll have to confirm the new user's password:
- 
+ 
-19. This step might not show up in your case, but it's just about choosing the right timezone within the country you chose before.
+19. This step it's just about setting up the clock of this VM. First, the Debian installer will try to find a proper time server:
- 
+ 
-20. After seeing a few more loading bars, you'll reach the step regarding the disk partitioning.
+ Then, the installer may or may not ask your specific timezone within the country you picked earlier:
- 
+ 
- Be sure of choosing the **SECOND** guided option to give the VM's disk a more flexible partition structure with the LVM system, like the one you have in your Proxmox VE host.
+20. After a few more loading bars, you'll advance to the step for disk partitioning:
- 
+ 
-21. The following screen is about choosing the storage drive to partition.
+ **Be sure of choosing the SECOND guided option** to give the VM's disk a more flexible partition structure with the LVM system, like the one you have in your Proxmox VE host.
- 
+ 
- There's only one disk attached to this VM, so there's no other option but the one shown.
+21. The following screen is about choosing the storage drive to partition:
-22. Next, the installer will ask you about how to set up the `home` and other directories.
+ 
- 
+ There is only one disk attached to this VM, so there's no other option but the one shown.
- This VM is going to be a template for servers, so you shouldn't ever need to mount a separate partition for the `home` directory. Something else could be said about the `var` or the `tmp` directories, whose contents can grow notably. But, since you can increase the size of the VM's storage easily from Proxmox VE, just leave the default highlighted option and press `Enter`.
+22. Next, the installer asks you which partition schema you want to apply:
-23. The next screen is the confirmation of the disk partitioning you've setup in the previous steps.
+ 
- 
+ This VM is going to be a template for servers, so you shouldn't ever need to mount a separate partition for the `home` directory. Something else could be said about directories like `var` or `srv`, or even the swap, whose contents can potentially grow notably. But, since you can increase the size of the VM's storage easily from Proxmox VE, just leave the default highlighted option and press `Enter`.
+
+23. The next screen is the confirmation of the disk partitioning you've setup in the previous steps:
+
+ 
If you are sure that the partition setup is what you wanted, highlight `Yes` and press `Enter`.
-24. The following step asks you about the size assigned to the LVM group in which the system partitions are going to be created.
+24. The following step asks you about the size assigned to the LVM group in which the system partitions are going to be created:
- 
+ 
Unless you know better, just stick with the default value and move on.
-25. After a few loading screens, you'll reach the disk partitioning final confirmation screen.
+25. The installer will apply the partition scheme selected and, after seeing some more fast progress windows, you'll reach the disk partitioning final confirmation screen:
- 
+ 
Choose `Yes` to allow the installer to finally write the changes to disk.
-26. After the partitioning is finished, you'll reach the progress bar of the base system installation.
+26. After the partitioning is finished, you'll see the progress bar for the base system installation:
- 
+ 
- The installer will take a bit in finishing this task.
+ The Debian installer will need a bit of time to finish this task.
-27. When the base system is installed, you'll see the following screen.
+27. When the base system is installed, the following dialog appears:
- 
+ 
- You don't have any additional media to use, so just answer `No` to this screen.
+ Since you do not have any additional media to use, just answer `No` to this dialog.
-28. The following step is about setting the right mirror servers location for your `apt` configuration.
+28. The following step is about setting the right mirror servers location for your `apt` configuration:
- 
+ 
- By default, the option highlighted will be the same country you chose at the beginning of this installation. Chose the country that suits you best.
+ By default, the option highlighted will be the same country you chose at the beginning of this installation. Pick the country that suits you best.
-29. The next screen is about choosing a concrete mirror server for your `apt` settings.
+29. The next screen is about choosing a concrete mirror server for your `apt` settings:
- 
+ 
Stick with the default option, or change it if you identify a better alternative for you in the list.
-30. A window will arise asking you to input your `HTTP proxy information`, if you happen to be connecting through one.
+30. A window will arise asking you to input your `HTTP proxy information`, if your PVE node happens to be connecting through one:
- 
+ 
In this guide it's assumed that you're not using a proxy, so that field should be left blank.
-31. The installer will take a bit while configuring the VM's `apt` system.
+31. The installer will take a moment to configure the VM's `apt` system:
- 
+ 
-32. When the `apt` configuration has finished, you'll see the following question about allowing some script to get some usage statistics of packages on your system.
+32. When the `apt` configuration has finished, you'll see the following question about allowing a script to get some usage statistics of packages on your system:
- 
+ 
- Choose whatever you like here, although bear in mind that the security restrictions that you'll have to apply later to this Debian VM system may end blocking that script's functionality.
+ Choose whatever you like here, although bear in mind that the security restrictions you'll have to apply in a later chapter to this Debian VM system may end blocking that script's functionality.
-33. After another loading bar, you'll reach the `Software selection` screen.
+33. After another loading bar, you'll reach the `Software selection` screen:
- 
+ 
- You'll see that, by default, the installer is setting the VM to be a graphical environment. You only need the two last options enabled (`SSH server` and `standard system utilities`), so change them so they look like below and then press `Continue`.
+ You'll see that, by default, the installer is setting the VM to be a graphical environment. Since your VM is going to be the basic template for server VMs, you only need enabled the two last options (`SSH server` and `standard system utilities`). Change the options here to make them look like below and then press `Continue`.
- 
+ 
-34. Another progress windows will show up and the installer will proceed with the remainder of the installation process.
+34. Another progress windows will show up and the installer will proceed with the remainder of the software installation process:
- 
+ 
-35. In the installation process, the installer will ask you if you want to install the GRUB boot loader in the VM's primary storage drive.
+35. After finishing the software installation, the installer will ask you if you want to install the GRUB boot loader in the VM's primary storage drive:
- 
+ 
The default `Yes` option is the correct one, so just press `Enter` on this screen.
-36. Next, the installer will ask you on which drive you want to install GRUB.
-
- 
+36. Next, the installer will ask you on which drive you want to install GRUB:
- Highlight the `/dev/sda` option and press `Enter` to continue the process.
+ 
- 
+ Highlight the `/dev/sda` option and press `Enter` to continue the process:
-37. After installing the GRUB boot loader, you'll see for a few seconds how the installer finishes the process.
+ 
- 
+37. After configuring the GRUB boot loader, the installer will perform the necessary tasks to finish the Debian installation:
-38. After the installation has finished, the installer will warn you about removing the media you used to launch the whole procedure.
+ 
- 
+38. After the installation has finished, the installer will warn you about removing the media you used to launch the whole procedure:
- > **BEWARE!**
- > Keep calm, **DON'T** press `Enter` yet and read the following steps.
+ > [!WARNING]
+ > **Keep calm, _DO NOT_ press `Enter` yet and read the next steps**\
+ > If you `Continue`, the VM will reboot and, if the installer media is still in place, the installer will boot up again.
- If you `Continue`, the VM will reboot and, if the installer media is still in place, the installer will boot up again.
+ 
-39. Go back to your Proxmox VE web console, and open the `Hardware` tab of your VM. Then, choose the `CD/DVD Drive` item and press the `Edit` button.
+39. Go back to your Proxmox VE web console, and open the `Hardware` tab of your VM. Then, choose the `CD/DVD Drive` item and press the `Edit` button (or just double click on the item):
- 
+ 
-40. You'll see the Edit window for the `CD/DVD Drive` you chose.
+40. You'll see the `Edit` window for the `CD/DVD Drive` you chose:
- 
+ 
Here, choose the `Do not use any media` option and click on `OK`.
- 
+ 
-41. Back in the Hardware screen, you'll see how the CD/DVD Drive is now set to `none`.
+41. Back in the Hardware screen, you'll see how the CD/DVD Drive is now set to `none`:
- 
+ 
- > **BEWARE!**
- > This doesn't mean that the change has been applied to the still running VM. Usually, changes like these will require a reboot of the VM.
+ > [!IMPORTANT]
+ > **This does not mean that the change has been applied to the still running VM**\
+ > Usually, changes like these will require a reboot of the VM.
-42. Now that the VM's CD/DVD Drive is configured to be empty, you can go back to the noVNC shell and press on `Enter` to finish the Debian installation. If everything goes as it should, the VM will reboot into the GRUB screen of your newly installed Debian system.
+42. Now that the VM's CD/DVD drive is configured to be empty, you can go back to the noVNC shell and press on `Enter` to finish the Debian installation. If everything goes as it should, the VM will reboot into the GRUB screen of your newly installed Debian system:
- 
+ 
-43. Press Enter with the default highlighted option or allow the timer to reach `0`. Then, after the usual system booting shell output, you should reach the login.
+43. Press Enter with the default highlighted option or allow the timer to reach `0`. Then, after the usual system booting shell output, you should reach the login:
- 
+ 
44. As a final verification, login either with `root` or with `mgrsys` to check that they work as expected.
## Note about the VM's `Boot Order` option
-Go to the `Options` tab of your new Debian VM.
+Go to the `Options` tab of your new Debian VM:
-
+
-In the capture above, you can see highlighted the `Boot Order` list currently enabled in this VM. If you press on the `Edit` button, you'll be able to edit this `Boot Order` list.
+In the capture above, you can see highlighted the `Boot Order` list currently enabled in this VM. If you press on the `Edit` button, you'll be able to edit this `Boot Order` list:
-
+
-Notice how PVE has already enabled the bootable hardware devices (hard disk, CD/DVD drive and network device) that were configured in the VM creation process. Also see how the network device added later, `net1`, is **NOT** enabled by default.
+Notice how PVE has already enabled the bootable hardware devices (hard disk, CD/DVD drive and network device) that were configured in the VM creation process. Also see how the network device added later, `net1`, **is NOT enabled by default**.
-> **BEWARE!**
+> [!IMPORTANT]
+> **New bootable devices in VMs are not enabled by default**\
> When you modify the bootable hardware devices of a VM, Proxmox VE **WON'T** enable automatically any new bootable device in the `Boot Order` list. You must revise or modify it whenever you make changes to the hardware devices available in a VM.
## Relevant system paths
-### _Directories_
+### Directories
- `/etc/pve`
- `/etc/pve/nodes/pve/qemu-server`
-### _Files_
+### Files
- `/etc/pve/user.cfg`
- `/etc/pve/nodes/pve/qemu-server/100.cfg`
## References
-### _Debian_
+### Debian
- [Debian](https://www.debian.org/)
-- [How to Install a Debian 10 (Buster) Minimal Server](https://www.howtoforge.com/tutorial/debian-minimal-server/)
+- [How to Install a Debian 12 (Bookworm) Minimal Server](https://www.howtoforge.com/tutorial/debian-minimal-server/)
+
+### Virtual Machines on Proxmox VE
+
+- [PVE admin guide. QEMU/KVM Virtual Machines](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_virtual_machines)
+ - [CPU](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_cpu)
+ - [Memory](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_memory)
+ - [Network Device](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_network_device)
+
+- [PVE admin guide. Permission Management. Pools](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pveum_pools)
+
+- [Proxmox Forum. Async IO Settings for Virtual Disks: Documentation? How to Set?](https://forum.proxmox.com/threads/async-io-settings-for-virtual-disks-documentation-how-to-set.114932/)
+
+- [Reddit. Proxmox. Best practices on setting up Proxmox on a new server](https://www.reddit.com/r/Proxmox/comments/oz81qq/best_practices_on_setting_up_proxmox_on_a_new/)
+
+- [Non-uniform memory access](https://en.wikipedia.org/wiki/Non-uniform_memory_access)
-### _Virtual Machines on Proxmox VE_
+### Units of measurement for storage
-- [PVE admin guide. Qemu/KVM Virtual Machines](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_virtual_machines)
-- [PVE admin guide. User management. Pools](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#pveum_pools)
-- [PVE admin guide. Qemu/KVM Virtual Machines ~ CPU options](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_cpu)
-- [PVE admin guide. Qemu/KVM Virtual Machines ~ Memory options](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_memory)
-- [PVE admin guide. Qemu/KVM Virtual Machines ~ Network Device options](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#qm_network_device)
-- [Best practices on setting up Proxmox on a new server](https://www.reddit.com/r/Proxmox/comments/oz81qq/best_practices_on_setting_up_proxmox_on_a_new/)
+- [Wikipedia. Gigabyte](https://en.wikipedia.org/wiki/Gigabyte#Definition)
+ - [Definition. Base 2 (binary)](https://en.wikipedia.org/wiki/Gigabyte#Base_2_(binary))
## Navigation
diff --git a/G021 - K3s cluster setup 04 ~ Debian VM configuration.md b/G021 - K3s cluster setup 04 ~ Debian VM configuration.md
index 931d81f..e325cbb 100644
--- a/G021 - K3s cluster setup 04 ~ Debian VM configuration.md
+++ b/G021 - K3s cluster setup 04 ~ Debian VM configuration.md
@@ -1,257 +1,312 @@
# G021 - K3s cluster setup 04 ~ Debian VM configuration
-Now you have a functional Debian VM but, as you did with your Proxmox VE host, it needs to be configured. Therefore, this guide will show you most of the same procedures detailed among the [**G003**](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources%2C%20updates%20and%20extra%20tools.md) to [**G016**](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md) guides, but in an condensed manner, while also adding some extra steps needed for setting up particular aspects on this VM.
-
-## Suggestion about IP configuration in your network
-
-Before you go further configuring your new VM or creating some more, you should consider organizing the IPs in your network. With this I mean that you should assign static IPs to all your devices present in your network, including the VMs you'll create in your Proxmox VE standalone node. This will allow you to know to what IPs to connect to through ssh easily, rather than being forced to check every time what IP is assigned to your VMs and Proxmox VE Host, and also avoid potential IP conflicts among your devices and VMs.
-
-I suggest you to figure out first, in a document or spreadsheet, the IP distribution you want within your network. This way you can see how to divide the IPs among all your present devices and future VMs. For example, you could have your devices in the `192.168.1.100 - 192.168.1.150` range and your VMs in the `192.168.1.10 - 192.168.1.40` one. Then, when you've figured out the organization of your network, apply it in your router or gateway.
+- [You have to configure your new Debian VM](#you-have-to-configure-your-new-debian-vm)
+- [Suggestion about the IP organization within your LAN](#suggestion-about-the-ip-organization-within-your-lan)
+- [Adding the `apt` sources for _non-free_ packages](#adding-the-apt-sources-for-non-free-packages)
+- [Installing extra packages](#installing-extra-packages)
+- [The QEMU guest agent comes enabled in Debian](#the-qemu-guest-agent-comes-enabled-in-debian)
+ - [Discovering the QEMU guest agent configuration](#discovering-the-qemu-guest-agent-configuration)
+- [Hardening the VM's access](#hardening-the-vms-access)
+ - [Enabling `sudo` to the administrative user](#enabling-sudo-to-the-administrative-user)
+ - [Assigning a TOTP code to the administrative user](#assigning-a-totp-code-to-the-administrative-user)
+ - [SSH key pair for the administrative user](#ssh-key-pair-for-the-administrative-user)
+- [Hardening the `sshd` service](#hardening-the-sshd-service)
+ - [Create group for SSH users](#create-group-for-ssh-users)
+ - [Backup of `sshd` configuration files](#backup-of-sshd-configuration-files)
+ - [Changes to the `/etc/pam.d/sshd` file](#changes-to-the-etcpamdsshd-file)
+ - [Changes to the `/etc/ssh/sshd_config` file](#changes-to-the-etcsshsshd_config-file)
+- [Configuring Fail2Ban for SSH connections](#configuring-fail2ban-for-ssh-connections)
+- [Disabling the `root` user login](#disabling-the-root-user-login)
+- [Configuring the VM with `sysctl`](#configuring-the-vm-with-sysctl)
+ - [TCP/IP stack hardening](#tcpip-stack-hardening)
+ - [Network optimizations](#network-optimizations)
+ - [Memory optimizations](#memory-optimizations)
+ - [Kernel optimizations](#kernel-optimizations)
+- [Reboot the VM](#reboot-the-vm)
+- [Disabling transparent hugepages on the VM](#disabling-transparent-hugepages-on-the-vm)
+- [Regarding the microcode `apt` packages for CPU vulnerabilities](#regarding-the-microcode-apt-packages-for-cpu-vulnerabilities)
+ - [The Debian installer has installed the microcode package](#the-debian-installer-has-installed-the-microcode-package)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories on Debian VM](#directories-on-debian-vm)
+ - [Files on Debian VM](#files-on-debian-vm)
+- [References](#references)
+ - [QEMU](#qemu)
+ - [About `sudo`](#about-sudo)
+ - [Disabling `root` login](#disabling-root-login)
+ - [Microcode packages on VMs](#microcode-packages-on-vms)
+- [Navigation](#navigation)
+
+## You have to configure your new Debian VM
+
+Now you have a functional Debian VM but, as you did with your Proxmox VE host, it needs to be configured. This chapter will show you most of the same setup procedures detailed in the chapters between [**G003**](G003%20-%20Host%20configuration%2001%20~%20Apt%20sources%2C%20updates%20and%20extra%20tools.md) and [**G016**](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md), but in an condensed manner. It will also add some extra steps needed for setting up particular aspects on this VM.
+
+## Suggestion about the IP organization within your LAN
+
+Before you go further configuring your new VM or creating some more, you should consider organizing the IPs in your network. With this I mean that you should assign static IPs (if you haven't done it already) to your Proxmox VE server and your VMs, including your new one. This will allow you to know to what IPs to connect to through SSH easily, rather than being forced to check every time what IP is assigned to your VMs and Proxmox VE host. Also will help you avoid potential IP conflicts among your devices and VMs. I suggest you to figure out first, in a document or spreadsheet, the IP distribution you want within your network. This way you can see how to divide the IPs among all your present devices and future VMs.
+
+I'll tell you how I've done it in my own LAN as an example. I set my router's LAN as a `10.0.0.0/8` network, which gives a very big range of IP numbers. Then, I only set static IPs in specific subranges (just defined in a spreadsheet) for my Proxmox VE server and the VMs I run in it. My Proxmox VE standalone node's IP is in a particular subrange (`10.1.0.0`), while all my Proxmox-based VMs are in a different one (`10.4.0.0`). Since the range of IPs available in a `10.0.0.0` network is enormous for a home LAN, I can afford leaving my user devices getting random IPs (with randomized MACs for better security) without worrying about possible IP conflicts.
## Adding the `apt` sources for _non-free_ packages
It might happen that you need to install `apt` packages in your VM that are _non-free_ for Debian standards. To allow that, you need to enable the right `apt` sources in your Debian VM.
-1. Start the VM, then open a `noVNC` shell and log in the VM as `root`, or open a ssh session and log in as your **other user** (the `sshd` server won't allow you to log in as `root` using a password). If you choose the ssh method, you'll have to become `root` with the `su` command.
+1. Start the VM, then open a `noVNC` shell and log in the VM as `root`, or open a SSH session and log in as your **other user** (the `sshd` server won't allow you to log in as `root` using a password). If you choose the SSH method, you'll have to become `root` with the `su` command:
- ~~~bash
+ ~~~sh
$ su root
~~~
-2. Then, `cd` to `/etc/apt/sources.list.d`.
+2. Then, `cd` to `/etc/apt/sources.list.d`:
- ~~~bash
+ ~~~sh
$ cd /etc/apt/sources.list.d
~~~
-3. Create a new file called `debian-nonfree.list`.
+3. Create a new file called `debian-nonfree.list`:
- ~~~bash
+ ~~~sh
$ touch debian-nonfree.list
~~~
-4. Edit the `debian-nonfree.list` file, filling it with the lines below.
+4. Edit the `debian-nonfree.list` file, filling it with the lines below:
- ~~~bash
- deb http://deb.debian.org/debian bullseye non-free
- deb-src http://deb.debian.org/debian bullseye non-free
+ ~~~sh
+ deb http://deb.debian.org/debian trixie non-free
+ deb-src http://deb.debian.org/debian trixie non-free
- deb http://deb.debian.org/debian-security/ bullseye-security non-free
- deb-src http://deb.debian.org/debian-security/ bullseye-security non-free
+ deb http://deb.debian.org/debian-security/ trixie-security non-free
+ deb-src http://deb.debian.org/debian-security/ trixie-security non-free
- deb http://deb.debian.org/debian bullseye-updates non-free
- deb-src http://deb.debian.org/debian bullseye-updates non-free
+ deb http://deb.debian.org/debian trixie-updates non-free
+ deb-src http://deb.debian.org/debian trixie-updates non-free
~~~
- > **BEWARE!**
- > This sources list is only for Debian 11 Bullseye!
+ > [!WARNING]
+ > This sources list is only for Debian 13 "trixie"!
-5. Save the file and update `apt`.
+5. Save the file and update `apt`:
- ~~~bash
+ ~~~sh
$ apt update
~~~
## Installing extra packages
-The Debian OS you have running in your VM is rather barebones, so let's install some packages that can be useful to have in a server. Some of them will be also necessary in the following steps described in this guide. As `root`, execute the following `apt` command.
+The Debian OS you have running in your VM is rather barebone, so let's install some packages that can be useful to have in a server. Some of them will be also necessary in the following steps described in this chapter. As `root`, execute the following `apt` command.
-~~~bash
+~~~sh
$ apt install -y ethtool fail2ban gdisk htop libpam-google-authenticator net-tools nut-client sudo tree vim
~~~
-## The QEMU guest agent comes enabled in Debian 11
+## The QEMU guest agent comes enabled in Debian
-To allow the Proxmox VE platform to control properly the VMs, you need to have in them the QEMU guest agent service. Debian 11 already comes with it installed and running, so you can go to the web console and check the `Status` block in your VM's `Summary` tab.
+To grant the Proxmox VE platform a better control of its VMs, you need to have deployed in them the QEMU guest agent service. Debian already comes with it installed and running, so you can go to the web console and check the `Status` block in your VM's `Summary` tab:
-
+
-The web console now shows the v4 and v6 IPs of the VM's current main network card and, if you click on `More`, you'll be able to see all the MACs and IPs assigned to all the network devices currently present in your VM.
+The web console now shows the v4 and v6 IPs of the VM's current main network card. If you click on `More`, you will see all the MACs and IPs assigned to all the network devices currently present in your VM:
-
+
-In the list above, you should recognize the network devices currently attached to your VM: the `localhost` (called `lo`), the `ens18` (`net0` for Proxmox VE in the VM's `Hardware` tab) and `ens19` (`net1`) ones. Any change to the network devices active in the VM will be also shown here.
+In the list above, you should recognize the network devices currently attached to your VM:
-Thanks to this agent, you can execute web console actions like `Shutdown` or execute VM snapshots properly.
+- The `localhost`, called `lo`.
+- The `ens18`, named `net0` for Proxmox VE in the VM's `Hardware` tab.
+- The `ens19`, named `net1` for Proxmox VE in the VM's `Hardware` tab.
-A last detail to take into account here is the configuration of this QEMU agent. Although in this case it works just with the default values, you should know where to find its configuration files.
+Any change to the network devices active in the VM will also be shown in this list.
-- `/etc/qemu` : directory for the agent configuration files.
+Thanks to this QEMU agent, you can execute PVE web console actions like `Shutdown` or execute VM snapshots properly.
-- `/etc/qemu/qemu-ga.conf` : this is the configuration file for the agent. Oddly enough, you won't find one created, meaning the agent is working with default values set either in some other file or just hardcoded in the program.
+### Discovering the QEMU guest agent configuration
-- `/usr/sbin/qemu-ga` : the path to the agent program itself. For some (probably security) reason, it's setup in such a way that you won't be able to execute it like any other command.
+Another important detail to take into account is the configuration of this QEMU agent. Although in the case of your first Debian VM it works well just with the default values, you should know where to find its configuration files:
-So, if you want to know what concrete configuration has your agent, do the following.
+- `/etc/qemu`
+ Directory for the agent configuration files.
-1. As `root`, cd to `/usr/sbin`.
+- `/etc/qemu/qemu-ga.conf`
+ This is the configuration file for the agent. Oddly enough, you will not find one created in your Debian VM, meaning the agent is working with default values set either in some other file or just hardcoded in the program.
- ~~~bash
+- `/usr/sbin/qemu-ga`
+ The path to the agent program itself. Probably for security reason, it is setup in such a way that you will not be able to execute it like a regular command.
+
+If you want to know what is the concrete configuration of the QEMU agent running in your VM:
+
+1. As `root`, cd to `/usr/sbin`:
+
+ ~~~sh
$ cd /usr/sbin
~~~
-2. Execute the `qemu-ga` command as follows.
+2. Execute the `qemu-ga` command as follows:
- ~~~bash
+ ~~~sh
$ ./qemu-ga -D
~~~
-The previous command will return you an output like the following.
+3. The previous command will return you an output like the following:
-~~~properties
-[general]
-daemon=false
-method=virtio-serial
-path=/dev/virtio-ports/org.qemu.guest_agent.0
-pidfile=/var/run/qemu-ga.pid
-statedir=/var/run
-verbose=false
-retry-path=false
-blacklist=
-~~~
+ ~~~properties
+ [general]
+ daemon=false
+ method=virtio-serial
+ path=/dev/virtio-ports/org.qemu.guest_agent.0
+ pidfile=/var/run/qemu-ga.pid
+ statedir=/var/run
+ verbose=false
+ retry-path=false
+ block-rpcs=
+ allow-rpcs=
+ ~~~
-The lines returned are in the format proper for the agent's `/etc/qemu/qemu-ga.conf` configuration file.
+ The lines returned have the same format used in the agent's `/etc/qemu/qemu-ga.conf` configuration file.
## Hardening the VM's access
-The user you created in the Debian installation process, which in this guide series is called `mgrsys`, needs its login to be hardened with TFA and a ssh key pair, while also enabling it to use the `sudo` command. This way, that user will become a proper administrative user for your system. On the other hand, after properly setting up that user, you won't really need to use `root` any more. So, in this section you'll also see how to completely disable the `root` login access to the VM.
+The user you created in the Debian installation process, which in this guide is called `mgrsys`, needs its login to be hardened with TFA and a SSH key pair, while also enabling it to use the `sudo` command. This way, that user will become a proper administrative user for your system. On the other hand, after properly setting up that user, you won't really need to use `root` any more in this VM. This section explains how to completely disable the `root` login access to the VM.
-### _Enabling `sudo` to the administrative user_
+### Enabling `sudo` to the administrative user
-1. Log in as `root` in a `noVNC` shell on your VM, then add the `mgrsys` user to the `sudo` group.
+1. Log in as `root` in a `noVNC` shell on your VM, then add the `mgrsys` user to the `sudo` group:
- ~~~bash
+ ~~~sh
$ adduser mgrsys sudo
~~~
- > **BEWARE!**
- > You won't be able to execute the `adduser` command from a SSH shell with `mgrsys`, even after becoming `root` with `su` as you've just done in the previous section. You must be in a **noVNC** shell or it won't work.
+ > [!WARNING]
+ > **You will not be able to execute the `adduser` command from a SSH shell with `mgrsys`**\
+ > I does not matter if you become `root` with `su` as you've just done in the previous section. You must be root within a **noVNC** shell or the `adduser` command won't work.
-2. Now login as the `mgrsys` user and test that `sudo` is working with a harmless command like `ls`.
+2. Now login as the `mgrsys` user with a regular SSH shell, then test that `sudo` is working with a harmless command like `ls`:
- ~~~bash
+ ~~~sh
$ sudo ls -al
~~~
- The command will output a warning about using `sudo`, then ask you the user's password.
-
- ~~~bash
- We trust you have received the usual lecture from the local System
- Administrator. It usually boils down to these three things:
-
- #1) Respect the privacy of others.
- #2) Think before you type.
- #3) With great power comes great responsibility.
+ The command will just ask you your `mgrsys`'s password.
+ ~~~sh
[sudo] password for mgrsys:
~~~
Type the password and `ls` will be executed.
-### _Assigning a TOTP code to the administrative user_
+### Assigning a TOTP code to the administrative user
-1. As your administrative user, create a TOTP token with the `google-authenticator` program as follows.
+Having a TOTP code hardens the login of your administrative user:
- ~~~bash
- $ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i debiantpl.deimos.cloud -l mgrsys@debiantpl
+1. As your administrative user, create a TOTP token with the `google-authenticator` program as follows:
+
+ ~~~sh
+ $ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i debiantpl.homelab.cloud -l mgrsys@debiantpl
~~~
- > **BEWARE!**
- Remember to replace the values at the `-i` (issuer) and `-l` (label) options with your own!
+ > [!IMPORTANT]
+ > Remember to replace the values at the `-i` (issuer) and `-l` (label) options with your own!
-2. Copy all the codes given by the `google-authenticator` command in a safe location, like a password manager.
+2. Copy all the codes given by the `google-authenticator` command in a safe location, like a password manager:
-> **BEWARE!**
-> The configuration for the TOTP code is saved in the HOME directory of the user, in a `.google_authenticator` file.
+> [!NOTE]
+> The configuration for the TOTP code is saved in the administrative user's `HOME` directory, in a `.google_authenticator` file.
-### _SSH key pair for the administrative user_
+### SSH key pair for the administrative user
-It's much better if you login with a SSH key pair, so let's create one for your administrative user.
+It is much better if you login as your administrative user with a SSH key pair:
-1. Logged in as your administrative user, execute the `ssh-keygen` command.
+1. Logged in as your administrative user, execute the `ssh-keygen` command:
- ~~~bash
- $ ssh-keygen -t rsa -b 4096 -C "mgrsys@debiantpl"
+ ~~~sh
+ $ ssh-keygen -t ed25519 -a 250 -C "mgrsys@debiantpl"
~~~
- > **BEWARE!**
- The `ssh-keygen` command will ask you for a passphrase, but you can leave it empty. Also take into account that you'll use TOTP codes when connecting through ssh so, if you put a passphrase to the ssh private key, you'll have to login using both the TOTP code and the passphrase.
+ > [!NOTE]
+ > **The `ssh-keygen` command will ask you for a passphrase, but you can leave it empty**\
+ > Ideally, you want to set up a passphrase for your key pair. Still, take into account that you will also use TOTP codes when connecting through SSH after finishing this chapter. This means that, when you login, you will have to enter both the TOTP code and the passphrase, if you also specified it. Depending on the SSH client you use, this may be cumbersome depending on the time limit you set to enter the login credentials.
-2. Authorize the public key of your newly generated pair.
+2. Authorize the public key of your newly generated pair:
- ~~~bash
+ ~~~sh
$ cd .ssh/
$ touch authorized_keys ; chmod 600 authorized_keys
- $ cat id_rsa.pub >> authorized_keys
+ $ cat id_ed25519.pub >> authorized_keys
~~~
-3. Export this key pair and save it in a safe location. Remember that you'll need to generate the `.ppk` file from the private key so you can connect from Windows clients. Check out the [**G901** appendix guide](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md) to see how.
+3. Export this key pair and keep it in a safe location. Remember that you will need to generate the `.ppk` file from the private key so you can connect from Windows clients. Check out the [**G901** appendix chapter](G901%20-%20Appendix%2001%20~%20Connecting%20through%20SSH%20with%20PuTTY.md) to see how.
+
+> [!WARNING]
+> **You won't be able to login as the administrative user with its new SSH key pair yet!**\
+> Since the `publickey` method is still not enabled in the `sshd` service's configuration, the SSH server in your Proxmox VE node will reject your key pair if you attempt to login with it.
+>
+> Just use the password and the TOTP code for now at this point. In the next [Hardening the `sshd` service](#hardening-the-sshd-service) section you will finally enable the `publickey` method, allowing you login remotely with your key pair.
## Hardening the `sshd` service
-As you did in the [**G007**](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md) and [**G009** guides](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md), you'll need to change two configuration files.
+As you did in the [**G007**](G007%20-%20Host%20hardening%2001%20~%20TFA%20authentication.md) and [**G009** chapters](G009%20-%20Host%20hardening%2003%20~%20SSH%20key%20pairs%20and%20sshd%20service%20configuration.md), you need to change two configuration files:
- `/etc/pam.d/sshd`
- `/etc/ssh/sshd_config`
-Also, you'll need to create a new `pam` group for grouping the users that will connect through ssh to the VM, like the administrative user `mgrsys`.
+Also, you'll need to create a new `pam` group for grouping the users that will connect through SSH to the VM, like the administrative user `mgrsys`.
+
+### Create group for SSH users
-### _Create group for ssh users_
+In a shell opened as your `mgrsys` user:
-1. Create a new group called `sshauth`.
+1. Create a new group called `sshauth`:
- ~~~bash
+ ~~~sh
$ sudo addgroup sshauth
~~~
-2. Add the administrative user to this group.
+2. Add the administrative user to this group:
- ~~~bash
+ ~~~sh
$ sudo adduser mgrsys sshauth
~~~
-### _Backup of `sshd` configuration files_
+### Backup of `sshd` configuration files
-Open a terminal as your `mgrsys` user and make a backup of the current `sshd` related configuration.
+Open a terminal as your `mgrsys` user and make a backup of the current `sshd` related configuration:
-~~~bash
+~~~sh
$ cd /etc/pam.d ; sudo cp sshd sshd.orig
$ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
~~~
-### _Changes to the `/etc/pam.d/sshd` file_
+### Changes to the `/etc/pam.d/sshd` file
-1. Comment out the `@include common-auth` line found at at its top.
+1. Comment out the first `@include common-auth` line found at at its top:
- ~~~bash
+ ~~~sh
# Standard Un*x authentication.
#@include common-auth
~~~
-2. Append the following lines.
+2. Append the following lines:
- ~~~bash
+ ~~~sh
# Enforcing TFA with Google Authenticator tokens
auth required pam_google_authenticator.so
~~~
-### _Changes to the `/etc/ssh/sshd_config` file_
+### Changes to the `/etc/ssh/sshd_config` file
-1. Edit the `/etc/ssh/sshd_config` file and replace **all** its content with the following one.
-
- ~~~bash
- # $OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $
+1. Edit the `/etc/ssh/sshd_config` file and **replace all its content** with this:
+ ~~~sh
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
- # This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
+ # This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/bin:/usr/games
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
+ Include /etc/ssh/sshd_config.d/*.conf
+
#Port 22
AddressFamily inet
#ListenAddress 0.0.0.0
@@ -294,13 +349,15 @@ $ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
- # To disable tunneled clear text passwords, change to no here!
+ # To disable tunneled clear text passwords, change to "no" here!
PasswordAuthentication no
#PermitEmptyPasswords no
- # Change to yes to enable challenge-response passwords (beware issues with
- # some PAM modules and threads)
- ChallengeResponseAuthentication yes
+ # Change to "yes" to enable keyboard-interactive authentication. Depending on
+ # the system's configuration, this may involve passwords, challenge-response,
+ # one-time passwords or some combination of these and other methods.
+ # Beware issues with some PAM modules and threads.
+ KbdInteractiveAuthentication yes
# Kerberos options
#KerberosAuthentication no
@@ -316,13 +373,13 @@ $ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
- # be allowed through the ChallengeResponseAuthentication and
+ # be allowed through the KbdInteractiveAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
- # PAM authentication via ChallengeResponseAuthentication may bypass
- # the setting of "PermitRootLogin without-password".
+ # PAM authentication via KbdInteractiveAuthentication may bypass
+ # the setting of "PermitRootLogin prohibit-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
- # and ChallengeResponseAuthentication to 'no'.
+ # and KbdInteractiveAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
@@ -340,7 +397,7 @@ $ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
- #PidFile /var/run/sshd.pid
+ #PidFile /run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
@@ -349,8 +406,8 @@ $ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
# no default banner path
#Banner none
- # Allow client to pass locale environment variables
- AcceptEnv LANG LC_*
+ # Allow client to pass locale and color environment variables
+ AcceptEnv LANG LC_* COLORTERM NO_COLOR
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
@@ -373,45 +430,45 @@ $ cd /etc/ssh ; sudo cp sshd_config sshd_config.orig
AuthenticationMethods none
~~~
-2. Save the file and restart the `sshd` service.
+2. Save the file and restart the `sshd` service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart sshd.service
~~~
-3. Try to login opening a new non-shared ssh session with your `mgrsys` user. Also, you can try to log in as `root` and verify that it's not possible to connect with that user at all through ssh.
+3. Try to login opening a new non-shared SSH session with your `mgrsys` user. Also, you can try to log in as `root` and verify that it is not possible to connect with that user at all through SSH.
## Configuring Fail2Ban for SSH connections
-Fail2Ban is already enabled for SSH connections in the VM, but it needs a more refined configuration, as you did back in the [**G010** guide](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md).
+Fail2Ban is already enabled for SSH connections in the VM, but it needs a more refined configuration, as you did back in the [chapter **G010**](G010%20-%20Host%20hardening%2004%20~%20Enabling%20Fail2Ban.md).
-1. As `mgrsys`, `cd` to `/etc/fail2ban/jail.d` and create an empty file called `01_sshd.conf`.
+1. As `mgrsys`, `cd` to `/etc/fail2ban/jail.d` and create an empty file called `01_sshd.conf`:
- ~~~bash
+ ~~~sh
$ cd /etc/fail2ban/jail.d ; sudo touch 01_sshd.conf
~~~
-2. Edit the `01_sshd.conf` file by inserting the configuration lines below.
+2. Edit the `01_sshd.conf` file by inserting the configuration lines below:
- ~~~bash
+ ~~~sh
[sshd]
enabled = true
port = 22
maxretry = 3
~~~
- > **BEWARE!**
- > Remember to set the `maxretry` parameter the same as the `MaxAuthTries` in the `sshd` configuration, so they correlate.
+ > [!IMPORTANT]
+ > Remember to set the `maxretry` parameter with the same value as the `MaxAuthTries` in the `sshd` configuration!
-3. Save the changes and restart the fail2ban service.
+3. Save the changes and restart the fail2ban service:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart fail2ban.service
~~~
-4. Check the current status of the fail2ban service with the `fail2ban-client` command.
+4. Check the current status of the fail2ban service with the `fail2ban-client` command:
- ~~~bash
+ ~~~sh
$ sudo fail2ban-client status
Status
|- Number of jail: 1
@@ -420,63 +477,64 @@ Fail2Ban is already enabled for SSH connections in the VM, but it needs a more r
## Disabling the `root` user login
-Now that your VM has an administrative `sudo` user like `mgrsys` and a hardened ssh service, you can disable the `root` user login altogether.
+Now that your Debian VM has an administrative `sudo` user like `mgrsys` and a hardened SSH service, you can disable the `root` user login altogether:
-1. As your administrative user, `cd` to `/etc` and check the `passwd` files present there.
+1. As your administrative user, `cd` to `/etc` and check the `passwd` files present there:
- ~~~bash
+ ~~~sh
$ cd /etc/
$ ls -al pass*
- -rw-r--r-- 1 root root 1452 Apr 21 13:47 passwd
- -rw-r--r-- 1 root root 1452 Apr 21 13:47 passwd-
+ -rw-r--r-- 1 root root 1285 Sep 4 11:52 passwd
+ -rw-r--r-- 1 root root 1239 Sep 3 14:00 passwd-
~~~
The `passwd-` is a backup made by some commands (like `adduser`) that modify the `passwd` file. Other files like `group`, `gshadow`, `shadow` and `subgid` also have backups of this kind.
-2. Edit the `passwd` file, by only changing the `root` line to make it look as shown below.
+2. Edit the `passwd` file, by only changing the `root` line to make it look as shown below:
- ~~~bash
+ ~~~sh
root:x:0:0:root:/root:/usr/sbin/nologin
~~~
- With this setup, any program that requires a shell for login won't be able to do so with the `root` user.
+ With this setup, any program that requires a shell for login will not be able to do so with the `root` user.
-3. Then, lock the `root` password.
+3. Then, lock the `root` password:
- ~~~bash
+ ~~~sh
$ sudo passwd -l root
~~~
- A user with the password locked can't login: the `passwd -l` command has corrupted, by putting a `!` character, the `root` password hash stored in the `/etc/shadow` file.
+ A user with the password locked cannot login at all. The `passwd -l` command corrupts, by putting a `!` character, the `root` password hash stored in the `/etc/shadow` file.
- ~~~bash
- root:!$7$HVw1KYN.qAC.lOMC$zb3vRm1oqqdR.gITdV.Lce9XuTjkv7CZ2z4R7diVsduplK.cAGeByZc1Gk3wfhQA6pzuzls3VT9/GhcjehiX70:18739:0:99999:7:::
+ ~~~sh
+ root:!$7$HVw1KYN.qAC.lOMC$zb3vRm1oqqdR.gITdV.Lce9XuTjkv7CZ2z4R7diVsduplK.cAGeByZc1Gk3wfhQA6pzuzls3VT9/GhcjehiX70:20336:0:99999:7:::
~~~
-To check that you cannot login with the `root` user, open a noVNC terminal on the VM from the web console and try to login as `root`. You'll get a `Login incorrect` message back everytime you try.
+To check that you cannot login with the `root` user, open a noVNC terminal on the VM from the web console and try to login as `root`. You'll get a `Login incorrect` message back every time you try.
## Configuring the VM with `sysctl`
-Next thing to do is to harden and improve the configuration of the VM with `sysctl` settings, as you did in the [**G012**](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md) and [**G015**](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md) guides for your Proxmox VE host. Since your VM is also running a Debian system, the `sysctl` values applied here will be mostly the same as the ones applied to your PVE node.
+Next thing to do is to harden and improve the configuration of the VM with `sysctl` settings, as you did in the chapters [**G012**](G012%20-%20Host%20hardening%2006%20~%20Network%20hardening%20with%20sysctl.md) and [**G015**](G015%20-%20Host%20optimization%2001%20~%20Adjustments%20through%20sysctl.md) for your Proxmox VE host. Since your VM is also running a Debian system, the `sysctl` values applied here will be mostly the same as the ones applied to your PVE node.
-> **BEWARE!**
-> This `sysctl` configuration is kind of generic but oriented to support virtualization and containers, as the Proxmox VE platform does. Still, later in this guide series, you'll have to change some of this settings in the VMs you'll use as nodes of your Kubernetes cluster.
+> [!NOTE]
+> **This `sysctl` configuration is kind of generic but oriented to support virtualization and containers, as the Proxmox VE platform does**\
+> In later chapters of this guide, you will have to change some of these settings in the VMs you will use as nodes of your Kubernetes cluster.
So, as `mgrsys`, `cd` to `/etc/sysctl.d/` and apply the configuration files detailed in the following subsections.
-~~~bash
+~~~sh
$ cd /etc/sysctl.d
~~~
-### _TCP/IP stack hardening_
+### TCP/IP stack hardening
-1. Create a new empty file called `80_tcp_hardening.conf`.
+1. Create a new empty file called `80_tcp_hardening.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 80_tcp_hardening.conf
~~~
-2. Edit `80_tcp_hardening.conf`, adding the following content to it.
+2. Edit `80_tcp_hardening.conf`, adding the following content to it:
~~~properties
## TCP/IP stack hardening
@@ -494,22 +552,18 @@ $ cd /etc/sysctl.d
net.ipv4.tcp_fin_timeout = 10
# IP loose spoofing protection or source route verification.
+ # Complements the rule set in /usr/lib/sysctl.d/pve-firewall.conf for all interfaces.
# Set to "loose" (2) to avoid unexpected networking problems in usual scenarios.
- net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.default.rp_filter = 2
# Ignore ICMP echo requests, or pings.
- # Commented by default since it might be needed to do pings to this host.
+ # Commented by default since Proxmox VE or any other monitoring tool might
+ # need to do pings to this host.
# Uncomment only if you're sure that your system won't need to respond to pings.
# net.ipv4.icmp_echo_ignore_all = 1
# net.ipv6.icmp.echo_ignore_all = 1
- # Protect against tcp time-wait assassination hazards,
- # drop RST packets for sockets in the time-wait state.
- net.ipv4.tcp_rfc1337 = 1
-
# Disable source packet routing; this system is not a router.
- net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
# Ignore send redirects; this system is not a router.
@@ -521,8 +575,10 @@ $ cd /etc/sysctl.d
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
- net.ipv6.conf.all.accept_redirects = 0
- net.ipv6.conf.default.accept_redirects = 0
+
+ # Protect against tcp time-wait assassination hazards,
+ # drop RST packets for sockets in the time-wait state.
+ net.ipv4.tcp_rfc1337 = 1
# Only retry creating TCP connections twice.
# Minimize the time it takes for a connection attempt to fail.
@@ -544,21 +600,21 @@ $ cd /etc/sysctl.d
# net.ipv4.conf.default.log_martians = 1
~~~
-3. Save the `80_tcp_hardening.conf` file and apply it in your system.
+3. Save the `80_tcp_hardening.conf` file and apply it in your system:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 80_tcp_hardening.conf
~~~
-### _Network optimizations_
+### Network optimizations
-1. Create a new empty file called `85_network_optimizations.conf`.
+1. Create a new empty file called `85_network_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_network_optimizations.conf
~~~
-2. Edit the `85_network_optimizations.conf` file, adding the following content to it.
+2. Edit the `85_network_optimizations.conf` file, adding the following content to it:
~~~properties
## NETWORK optimizations
@@ -693,21 +749,21 @@ $ cd /etc/sysctl.d
net.unix.max_dgram_qlen = 1024
~~~
-3. Save the `85_network_optimizations.conf` file and apply the changes.
+3. Save the `85_network_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_network_optimizations.conf
~~~
-### _Memory optimizations_
+### Memory optimizations
-1. Create a new empty file called `85_memory_optimizations.conf`.
+1. Create a new empty file called `85_memory_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_memory_optimizations.conf
~~~
-2. Edit the `85_memory_optimizations.conf` file, adding the following content to it.
+2. Edit the `85_memory_optimizations.conf` file, adding the following content to it:
~~~properties
## Memory optimizations
@@ -773,21 +829,21 @@ $ cd /etc/sysctl.d
vm.max_map_count = 262144
~~~
-3. Save the `85_memory_optimizations.conf` file and apply the changes.
+3. Save the `85_memory_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_memory_optimizations.conf
~~~
-### _Kernel optimizations_
+### Kernel optimizations
-1. Create a new empty file called `85_kernel_optimizations.conf`.
+1. Create a new empty file called `85_kernel_optimizations.conf`:
- ~~~bash
+ ~~~sh
$ sudo touch 85_kernel_optimizations.conf
~~~
-2. Edit the `85_kernel_optimizations.conf` file, adding the following content to it.
+2. Edit the `85_kernel_optimizations.conf` file, adding the following content to it:
~~~properties
## Kernel optimizations
@@ -800,12 +856,6 @@ $ cd /etc/sysctl.d
# Process Scheduler related settings
#
- # Determines how long a migrated process has to be running before the kernel
- # will consider migrating it again to another core. So, a higher value makes
- # the kernel take longer before migrating again an already migrated process.
- # Value in MILLISECONDS.
- kernel.sched_migration_cost_ns = 5000000
- #
# This setting groups tasks by TTY, to improve perceived responsiveness on an
# interactive system. On a server with a long running forking daemon, this will
# tend to keep child processes from migrating away as soon as they should.
@@ -816,11 +866,11 @@ $ cd /etc/sysctl.d
# than the number of containers
kernel.keys.maxkeys = 2000
- # increase kernel hardcoded defaults by a factor of 512 to allow running more
+ # Increase kernel hardcoded defaults by a factor of 512 to allow running more
# than a very limited count of inotfiy hungry CTs (i.e., those with newer
# systemd >= 240). This can be done as the memory used by the queued events and
# watches is accounted to the respective memory CGroup.
-
+ #
# 2^23
fs.inotify.max_queued_events = 8388608
# 2^16
@@ -829,9 +879,9 @@ $ cd /etc/sysctl.d
fs.inotify.max_user_watches = 4194304
~~~
-3. Save the `85_kernel_optimizations.conf` file and apply the changes.
+3. Save the `85_kernel_optimizations.conf` file and apply the changes:
- ~~~bash
+ ~~~sh
$ sudo sysctl -p 85_kernel_optimizations.conf
~~~
@@ -839,54 +889,54 @@ $ cd /etc/sysctl.d
Although you've applied the changes with the `sysctl -p` command, restart the VM.
-~~~bash
+~~~sh
$ sudo reboot
~~~
-Then, login as `mgrsys` and check the log files (such as `syslog`) under the `/var/log` directory to look for possible errors or warnings related to your changes.
+Then, login as `mgrsys` and check the system's journal (`journalctl` command) to look for possible errors or warnings related to your changes.
## Disabling transparent hugepages on the VM
-Back in the [**G016** guide](G016%20-%20Host%20optimization%2002%20~%20Disabling%20transparent%20hugepages.md), you disabled the transparent hugepages in your Proxmox VE host. Now you'll do the same in this VM.
+Back in the [**G016** chapter](G016%20-%20Host%20optimization%2002%20~%20Disabling%20the%20transparent%20hugepages.md), you disabled the transparent hugepages in your Proxmox VE host. Now you'll do the same in this VM.
-1. Check the current status of the transparent hugepages.
+1. Check the current status of the transparent hugepages:
- ~~~bash
+ ~~~sh
$ cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
~~~
Is set as `always` active, which means that probably is already in use. Check it's current usage then.
- ~~~bash
+ ~~~sh
$ grep AnonHuge /proc/meminfo
- AnonHugePages: 4096 kB
+ AnonHugePages: 6144 kB
~~~
- In this case, 4 MiB are currently in use for transparent hugepages.
+ In this case, 6 MiB are currently in use for transparent hugepages.
-2. To switch the `/sys/kernel/mm/transparent_hugepage/enabled` value to `never`, first `cd` to `/etc/default/` and make a backup of the original `grub` file.
+2. To switch the `/sys/kernel/mm/transparent_hugepage/enabled` value to `never`, first `cd` to `/etc/default/` and make a backup of the original `grub` file:
- ~~~bash
+ ~~~sh
$ cd /etc/default/ ; sudo cp grub grub.orig
~~~
-3. Edit the `grub` file, modifying the `GRUB_CMDLINE_LINUX=""` line as follows.
+3. Edit the `grub` file, modifying the `GRUB_CMDLINE_LINUX=""` line as follows:
~~~properties
GRUB_CMDLINE_LINUX="transparent_hugepage=never"
~~~
-4. Update the grub and reboot the system.
+4. Update the grub and reboot the system:
- ~~~bash
+ ~~~sh
$ sudo update-grub
$ sudo reboot
~~~
-5. After the reboot, check the current status of the transparent hugepages.
+5. After the reboot, check the current status of the transparent hugepages:
- ~~~bash
+ ~~~sh
$ cat /sys/kernel/mm/transparent_hugepage/enabled
always madvise [never]
$ grep AnonHuge /proc/meminfo
@@ -895,11 +945,35 @@ Back in the [**G016** guide](G016%20-%20Host%20optimization%2002%20~%20Disabling
## Regarding the microcode `apt` packages for CPU vulnerabilities
-In the [**G013** guide](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md), you applied a microcode package to mitigate vulnerabilities found within your host's CPU. You could think that you also need to apply such package in the VM, but installing it in a VM is useless, since the hypervisor won't allow the VM to apply such microcode to the real CPU installed in your Proxmox VE host. So, don't worry about CPU microcode packages in VMs.
+In the [chapter **G013**](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%20vulnerabilities.md), you applied a microcode package to mitigate vulnerabilities found within your host's CPU. You could think that you also need to apply such package in the VM, but installing it in a VM is useless, since the hypervisor won't allow the VM to apply such microcode to the real CPU installed in your Proxmox VE host. In short, never install CPU microcode packages in VMs.
+
+### The Debian installer has installed the microcode package
+
+While monitoring the installation of the Debian VM, I noticed that the installer installs the microcode package. This is certainly incorrect, so you should uninstall it as follows:
+
+1. Purge the microcode package with `apt`:
+
+ ~~~sh
+ $ sudo apt -y purge intel-microcode
+ ~~~
+
+ Replace `intel-microcode` for `amd-microcode` if your microcode package is the one for AMD CPUs.
+
+2. To autoremove any related packages to the purged microcode one that are no longer required in your Debian VM, use `apt` again:
+
+ ~~~sh
+ $ sudo apt -y autoremove
+ ~~~
+
+3. Reboot the VM:
+
+ ~~~sh
+ $ sudo reboot
+ ~~~
## Relevant system paths
-### _Directories on Debian VM_
+### Directories on Debian VM
- `/dev`
- `/etc`
@@ -914,7 +988,7 @@ In the [**G013** guide](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%
- `$HOME`
- `$HOME/.ssh`
-### _Files on Debian VM_
+### Files on Debian VM
- `/etc/apt/sources.list.d/debian-nonfree.list`
- `/etc/default/grub`
@@ -936,28 +1010,27 @@ In the [**G013** guide](G013%20-%20Host%20hardening%2007%20~%20Mitigating%20CPU%
- `/usr/sbin/qemu-ga`
- `$HOME/.google_authenticator`
- `$HOME/.ssh/authorized_keys`
-- `$HOME/.ssh/id_rsa`
-- `$HOME/.ssh/id_rsa.pub`
+- `$HOME/.ssh/id_ed25519`
+- `$HOME/.ssh/id_ed25519.pub`
## References
-### _QEMU guest agent_
+### [QEMU](https://www.qemu.org/)
-- [Proxmox wiki ~ Qemu-guest-agent](https://pve.proxmox.com/wiki/Qemu-guest-agent)
-- [QEMU wiki ~ Features/GuestAgent](https://wiki.qemu.org/Features/GuestAgent)
-- [QEMU](https://www.qemu.org/)
+- [QEMU. Wiki. Features/GuestAgent](https://wiki.qemu.org/Features/GuestAgent)
+- [Proxmox VE. Wiki. Qemu-guest-agent](https://pve.proxmox.com/wiki/Qemu-guest-agent)
-### _About `sudo`_
+### About `sudo`
- [Debian superuser rights (sudo, visudo)](https://serverspace.us/support/help/debian-superuser-rights-sudo-visudo/)
-### _Disabling `root` login_
+### Disabling `root` login
- [4 Ways to Disable Root Account in Linux](https://www.tecmint.com/disable-root-login-in-linux/)
- [Should I disable the root account on my Debian PC for security?](https://unix.stackexchange.com/a/383309)
- [Who creates /etc/{group,gshadow,passwd,shadow}-?](https://unix.stackexchange.com/questions/27717/who-creates-etc-group-gshadow-passwd-shadow)
-### _Microcode packages on VMs_
+### Microcode packages on VMs
- [What if you have a microcode package installed on the VM?](https://www.reddit.com/r/debian/comments/bqk2z0/is_it_necessary_to_install_intelmicrocode_to/eo5zt6i?utm_source=share&utm_medium=web2x&context=3)
diff --git a/G022 - K3s cluster setup 05 ~ Connecting the VM to the NUT server.md b/G022 - K3s cluster setup 05 ~ Connecting the VM to the NUT server.md
index a9cb1cb..3a83ca8 100644
--- a/G022 - K3s cluster setup 05 ~ Connecting the VM to the NUT server.md
+++ b/G022 - K3s cluster setup 05 ~ Connecting the VM to the NUT server.md
@@ -1,20 +1,48 @@
# G022 - K3s cluster setup 05 ~ Connecting the VM to the NUT server
-In the [**G004** guide](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) you set up a standalone NUT server so your PVE node could monitor the UPS unit it is plugged in. Now that you started creating VMs, it'll be very convenient to make them aware of your UPS unit so, when there's a power cut and the UPS kicks in, you can automate a proper shutdown sequence for your whole server setup. Think that your VMs, in this context, will be like real computers plugged in to the same UPS unit as your Proxmox VE host, so they'll be also directly affected to whatever happens to your PVE host's power supply.
-
-## Reconfiguring the NUT `master` server on your **Proxmox VE host**
-
-First, you need to change the configuration of the NUT `master` server running in your Proxmox VE host, so it can also serve the NUT `slave` clients in any of your VMs.
-
-### _Changing the `nut.conf` file_
-
-1. Open a shell as `mgrsys` on your Proxmox VE host, then `cd` to `/etc/nut` and make a backup of the `nut.conf` file.
-
- ~~~bash
+- [Make your VMs aware of your UPS unit with NUT](#make-your-vms-aware-of-your-ups-unit-with-nut)
+- [Reconfiguring the NUT server on your Proxmox VE host](#reconfiguring-the-nut-server-on-your-proxmox-ve-host)
+ - [Changing the `nut.conf` file](#changing-the-nutconf-file)
+ - [Reconfiguring the UPS driver with the `ups.conf` file](#reconfiguring-the-ups-driver-with-the-upsconf-file)
+ - [Adding access control rules in the `upsd.conf` file](#adding-access-control-rules-in-the-upsdconf-file)
+ - [Adding a NUT client user in the `upsd.users` file](#adding-a-nut-client-user-in-the-upsdusers-file)
+ - [Declaring executable actions related to concrete NUT events](#declaring-executable-actions-related-to-concrete-nut-events)
+ - [Opening the `upsd` port on the Proxmox VE node](#opening-the-upsd-port-on-the-proxmox-ve-node)
+- [Configuring the NUT client on your Debian VM](#configuring-the-nut-client-on-your-debian-vm)
+- [Checking the connection between the VM NUT client and the PVE node NUT server](#checking-the-connection-between-the-vm-nut-client-and-the-pve-node-nut-server)
+- [Testing a Forced ShutDown sequence (`FSD`) with NUT](#testing-a-forced-shutdown-sequence-fsd-with-nut)
+ - [FSD event shutdown sequence](#fsd-event-shutdown-sequence)
+ - [Executing the FSD test](#executing-the-fsd-test)
+- [Relevant system paths](#relevant-system-paths)
+ - [Directories on Debian VM](#directories-on-debian-vm)
+ - [Files on Debian VM](#files-on-debian-vm)
+ - [Directories on Proxmox VE host](#directories-on-proxmox-ve-host)
+ - [Files on Proxmox VE host](#files-on-proxmox-ve-host)
+- [References](#references)
+ - [NUT (Network UPS Tool) configuration](#nut-network-ups-tool-configuration)
+ - [Articles about NUT configuration](#articles-about-nut-configuration)
+ - [Proxmox VE](#proxmox-ve)
+- [Navigation](#navigation)
+
+## Make your VMs aware of your UPS unit with NUT
+
+In the [chapter **G004**](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) you set up a standalone NUT server so your PVE node could monitor the UPS unit it is plugged in. Now that you have started creating VMs, it will be very convenient for you to make them aware of your UPS unit. When there's a power cut and the UPS kicks in, you can automate a proper shutdown sequence for your whole server setup. Think that your VMs, in this context, will be like real computers plugged in to the same UPS unit as your Proxmox VE host, so they will be also directly affected to whatever happens to your PVE host's power supply.
+
+In conclusion, you have to make your VMs monitor the UPS unit supporting your setup. You can do this by connecting the NUT client you already have installed in your first Debian VM to your Proxmox VE NUT server.
+
+## Reconfiguring the NUT server on your Proxmox VE host
+
+First, you need to change the configuration of the NUT server running in your Proxmox VE host, so it can also serve the NUT client enabled in your VMs.
+
+### Changing the `nut.conf` file
+
+1. Open a shell as `mgrsys` on your Proxmox VE host, then `cd` to `/etc/nut` and make a backup of the `nut.conf` file:
+
+ ~~~sh
$ cd /etc/nut ; sudo cp nut.conf nut.conf.bkp
~~~
-2. Edit the `nut.conf` file and change the value of the `MODE` parameter to `netserver`.
+2. Edit the `nut.conf` file and change the value of the `MODE` parameter to `netserver`:
~~~properties
MODE=netserver
@@ -22,58 +50,62 @@ First, you need to change the configuration of the NUT `master` server running i
3. Save the changes on the `nut.conf` file.
-### _Reconfiguring the UPS driver with the `ups.conf` file_
+### Reconfiguring the UPS driver with the `ups.conf` file
-1. Remain at `/etc/nut`, and make a backup of the `ups.conf` file.
+1. Remain at `/etc/nut`, and make a backup of the `ups.conf` file:
- ~~~bash
+ ~~~sh
$ sudo cp ups.conf ups.conf.bkp
~~~
-2. Edit the `ups.conf` file. At its end, you should have just the configuration block for your UPS unit you configured previously in the [**G004** guide](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#connecting-your-ups-with-your-pve-node-using-nut).
+2. Edit the `ups.conf` file. At its end, you should have just the configuration block for your UPS unit you configured previously in the [chapter **G004**](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md#connecting-your-ups-with-your-pve-node-using-nut):
~~~properties
- [apc]
- driver = usbhid-ups
- port = auto
- desc = "APC Back-UPS ES 700"
+ [eaton]
+ driver = usbhid-ups
+ port = auto
+ desc = "Eaton 3S700D"
~~~
- Let's add to this block a few more parameters, so it looks as the following.
+ Let's add to this block a few more parameters, so it looks as the following:
~~~properties
- [apc]
- driver = usbhid-ups
- port = auto
- desc = "APC Back-UPS ES 700"
- offdelay = 60
- ondelay = 70
- lowbatt = 80
+ [eaton]
+ driver = usbhid-ups
+ port = auto
+ desc = "Eaton 3S700D"
+ offdelay = 60
+ ondelay = 70
+ lowbatt = 80
~~~
Here's a brief explanation of the three new parameters added to the `[upsunit]` configuration.
- - `offdelay` : Value in seconds, default is 20 seconds. Time that passes between the `upsdrvctl` shutdown command and the moment the UPS shuts itself down.
+ - `offdelay`\
+ Value in seconds, default is 20 seconds. Time that passes between the `upsdrvctl` shutdown command and the moment the UPS shuts itself down.
- - `ondelay` : Value in seconds, default is 30 seconds. Time that must pass between the `upsdrvctl` shutdown command and the moment when the UPS will react to the return of wall power and turn on the power to the system.
- > **BEWARE!**
- > This `ondelay` value **must be greater** than the `offdelay` number.
+ - `ondelay`\
+ Value in seconds, default is 30 seconds. Time that must pass between the `upsdrvctl` shutdown command and the moment when the UPS will react to the return of wall power and turn on the power to the system.
- - `lowbatt` : Value in percentage. Percentage of battery charge remaining in the UPS unit that should be considered as "low charge".
+ > [!WARNING]
+ > This `ondelay` value **must be greater** than the `offdelay` number!
- To set up those parameters properly, you must be fully aware of the real capacity of your UPS unit to hold your system running in case of a power cut. In my case, my UPS unit can only hold for just very few minutes before discharging completely, so I'm forced to set very conservative values (in particular in the `lowbatt` parameter). Of course, how long your UPS unit can run, depends a lot on how much (or how fast) energy will be drained from its battery when in use. Also, bear in mind the battery degradation over time (even if it's never used), and how long does your server take to shutdown **gracefully** (including your VMs and services running within them).
+ - `lowbatt`\
+ Value in percentage. Percentage of battery charge remaining in the UPS unit that should be considered as "low charge".
+
+ To set up those parameters properly, you must be fully aware of the real capacity of your UPS unit to hold your system running in case of a power cut. In my case, my UPS unit can only hold for just a few minutes before discharging completely, so I'm forced to set very conservative values (in particular in the `lowbatt` parameter). Of course, how long your UPS unit can run depends on how much (or how fast) energy will be drained from its battery when in use. Also, bear in mind the battery degradation over time (even if it's never used), and **how long does your server take to shutdown gracefully** (including your VMs and services running within them).
3. Save the changes on the `ups.conf` file.
-### _Adding access control rules in the `upsd.conf` file_
+### Adding access control rules in the `upsd.conf` file
-1. Still at `/etc/nut`, make a backup of the `upsd.conf` file.
+1. Still at `/etc/nut`, make a backup of the `upsd.conf` file:
- ~~~bash
+ ~~~sh
$ sudo cp upsd.conf upsd.conf.bkp
~~~
-2. Edit the `upsd.conf` file, jump to the `LISTEN` lines and modify them as follows.
+2. Edit the `upsd.conf` file, jump to the `LISTEN` lines and modify them as follows:
~~~properties
# LISTEN []
@@ -82,41 +114,41 @@ First, you need to change the configuration of the NUT `master` server running i
# LISTEN ::1 3493
~~~
- By setting `0.0.0.0` as listening address, you're making the `upsd` service listen for traffic from **all** IP v4 sources available in your Proxmox VE host.
+ By setting `0.0.0.0` as listening address, you're making the `upsd` service listen for traffic **from all IP v4 sources available in your Proxmox VE host**. This is necessary to allow the NUT server to respond to the NUT clients running in any of your VMs.
3. Save the changes on the `upsd.conf` file.
-### _Adding a NUT `slave` client user in the `upsd.users` file_
+### Adding a NUT client user in the `upsd.users` file
-1. Still at `/etc/nut`, make a backup of the `upsd.users` file.
+1. Still at `/etc/nut`, make a backup of the `upsd.users` file:
- ~~~bash
+ ~~~sh
$ sudo cp upsd.users upsd.users.bkp
~~~
-2. Edit the `upsd.users` file by **appending** the following block.
+2. Edit the `upsd.users` file by appending the following block:
~~~properties
[upsmonclient]
password = s3c4R3_Kly3Nt_p4sSw0rD!
- upsmon slave
+ upsmon secondary
~~~
- Notice the `upsmon slave` line, which indicates that the `upsmonclient` will be the user of `slave` client systems.
+ Notice the `upsmon secondary` line, which indicates that the `upsmonclient` will be the common user for all monitor client systems (the VMs running in your Proxmox VE server).
3. Save the changes on the `upsd.users` file.
-After adding this user, your NUT system will have three different NUT users: one administrator user, one monitor user to be used only locally in the `master` NUT server, and other monitor user only to be used remotely from `slave/client` NUT systems such as the VMs you'll create later on your PVE node.
+After adding this user, your NUT system will have three different NUT users: one administrator user, one monitor user to be used only locally in the `primary` NUT server, and other monitor user only to be used remotely from `secondary` client NUT systems such as the VMs you'll create later on your PVE node.
-### _Declaring executable actions related to concrete NUT events_
+### Declaring executable actions related to concrete NUT events
-1. Yet remaining at `/etc/nut`, now make a backup of the current `upsmon.conf` file.
+1. Still in the `/etc/nut` directory, make a backup of the current `upsmon.conf` file:
- ~~~bash
+ ~~~sh
$ sudo cp upsmon.conf upsmon.conf.bkp
~~~
-2. Edit the `upsmon.conf` file, adding **below** the customized `SHUTDOWNCMD` parameter a new `NOTIFYCMD` line.
+2. Edit the `upsmon.conf` file, adding under the customized `SHUTDOWNCMD` parameter a new `NOTIFYCMD` line:
~~~properties
SHUTDOWNCMD "logger -t upsmon.conf \"SHUTDOWNCMD calling /sbin/shutdown to shut down system\" ; /sbin/shutdown -h +0"
@@ -125,7 +157,7 @@ After adding this user, your NUT system will have three different NUT users: one
In `NOTIFYCMD`, the configured `upssched` command is a NUT program that provides a rich set of actions to execute in response to events detected by `upsmon`.
-3. Also, modify the customized `NOTIFYFLAG` lines for the `ONLINE`, `ONBATT` and `LOWBATT` events by adding a +EXEC flag.
+3. Also, modify the customized `NOTIFYFLAG` lines for the `ONLINE`, `ONBATT` and `LOWBATT` events by adding a +EXEC flag:
~~~properties
NOTIFYFLAG ONLINE SYSLOG+EXEC
@@ -135,16 +167,16 @@ After adding this user, your NUT system will have three different NUT users: one
The `+EXEC` flag makes `upsmon` call the program configured in the `NOTIFYCMD` parameter.
-4. Save the changes on the `upsmon.conf` file.
+4. Save the changes on the `upsmon.conf` file:
-5. Rename the `upssched.conf` file to `upssched.conf.orig`, and create a new empty `upssched.conf` file.
+5. Rename the `upssched.conf` file to `upssched.conf.orig`, and create a new empty `upssched.conf` file:
- ~~~bash
+ ~~~sh
$ sudo mv upssched.conf upssched.conf.orig
$ sudo touch upssched.conf ; sudo chmod 640 upssched.conf
~~~
-6. Edit the `upssched.conf` file and put in it the following content.
+6. Edit the new `upssched.conf` file and put in it the following script:
~~~properties
# upssched.conf
@@ -152,26 +184,34 @@ After adding this user, your NUT system will have three different NUT users: one
PIPEFN /var/run/nut/upssched.pipe
LOCKFN /var/run/nut/upssched.lock
- AT ONLINE apc@localhost EXECUTE online
- AT ONBATT apc@localhost EXECUTE onbatt
- AT LOWBATT apc@localhost EXECUTE lowbatt
+ AT ONLINE eaton@localhost EXECUTE online
+ AT ONBATT eaton@localhost EXECUTE onbatt
+ AT LOWBATT eaton@localhost EXECUTE lowbatt
~~~
- The parameters above mean the following.
+ The parameters in this script are explained next:
+
+ - `CMDSCRIPT`
+ Points to a user script that will be executed by `upssched` in response to the UPS events notified by the `upsmon` NUT monitor service.
+
+ > [!IMPORTANT]
+ > **This script is provided by the system administrator, which is you in this case**\
+ > Do not confuse this script with the `/usr/sbin/upssched` command, which is the NUT program that will call your script. On the other hand, the name `upssched-cmd` is the assumed standard in the NUT community: **do not change it**.
+
+ - `PIPEFN`\
+ Socket file used for communication between `upsmon` and `upssched`.
- - `CMDSCRIPT` : points to a user script that will be executed by `upssched` in response to the UPS events notified by the `upsmon` NUT monitor service.
- > **BEWARE!**
- > This script is provided by the system administrator, you in this case. Don't confuse it with the `/usr/sbin/upssched` command, which is the NUT program that will call your script. On the other hand, the name `upssched-cmd` is the assumed standard in the NUT community, so **don't change it**.
+ > [!IMPORTANT]
+ > The directory containing this file should be accessible only by the NUT software and nothing else.
- - `PIPEFN` : socket file used for communication between `upsmon` and `upssched`.
- > **BEWARE!**
- > The directory containing this file should be accessible only by the NUT software and nothing else.
+ - `LOCKFN`\
+ file required by the upsmon NUT daemon to avoid race conditions.
- - `LOCKFN` : file required by the upsmon NUT daemon to avoid race conditions.
- > **BEWARE!**
- > The directory should be the same as for the `PIPEFN` file.
+ > [!IMPORTANT]
+ > The directory should be the same as for the `PIPEFN` file.
- - `AT` lines: declarations for actions to `EXECUTE` with the `CMDSCRIPT` only `AT` the events defined in this configuration file. These declarations follow the pattern below:
+ - `AT` lines\
+ Declarations for actions to `EXECUTE` with the `CMDSCRIPT` only `AT` the events defined in this configuration file. These declarations follow the pattern below:
~~~properties
AT notifytype UPSunit-name command
@@ -181,24 +221,24 @@ After adding this user, your NUT system will have three different NUT users: one
- `notifytype` is the code identifying a NUT-monitored event.
- - `UPSunit-name` is the name of the monitored UPS unit. This argument admits the use of the wildcard `*` to refer to all available UPS units, although it's not recommended; you'll usually prefer to have particular rules for each of those UPS units.
+ - `UPSunit-name` is the name of the monitored UPS unit. This argument admits the use of the wildcard `*` to refer to all available UPS units, although it is not recommended. You usually will prefer to have particular rules for each of those UPS units.
- `command` specifies what to do when the event happens. The `EXECUTE` command is just one of several available.
-7. Create an empty `/usr/sbin/upssched-cmd` file with the right permissions.
+7. Create an empty `/usr/sbin/upssched-cmd` file with the right permissions:
- ~~~bash
+ ~~~sh
$ sudo touch /usr/sbin/upssched-cmd ; sudo chmod 755 /usr/sbin/upssched-cmd
~~~
-8. Edit the new `upssched-cmd` so it has the following shell script code.
+8. Edit the new `upssched-cmd` so it has the following shell script code:
- ~~~bash
+ ~~~sh
#!/bin/bash -u
# upssched-cmd
logger -i -t upssched-cmd Calling upssched-cmd $1
- UPS="apc"
+ UPS="eaton"
STATUS=$( upsc $UPS ups.status )
CHARGE=$( upsc $UPS battery.charge )
CHMSG="[$STATUS]:$CHARGE%"
@@ -215,199 +255,232 @@ After adding this user, your NUT system will have three different NUT users: one
notify-send-all "$MSG"
~~~
- > **BEWARE!**
- > This script is just an example of what you can do which, in this case, is just some logging and sending messages regarding a particular set of UPS events. Notice how it has one input argument (`$1`), and the options appearing in the `case` block are used in the `AT` declarations defined in the `upssched.conf` file.
+ Do not forget specifying your own UPS unit in the `UPS` parameter of this script.
-9. Save the `upssched-cmd` file and restart the NUT related services already running in your Proxmox VE host.
+ > [!NOTE]
+ > **This script is just an example of what you can do**\
+ > In this case, is just some logging and sending messages regarding a particular set of UPS events. Notice how it has one input argument (`$1`), and the options appearing in the `case` block are used in the `AT` declarations defined in the `upssched.conf` file.
- ~~~bash
+9. Save the `upssched-cmd` file and restart the NUT related services already running in your Proxmox VE host:
+
+ ~~~sh
$ sudo systemctl restart nut-server.service nut-monitor.service
~~~
-### _Opening the `upsd` port on the Proxmox VE node_
+### Opening the `upsd` port on the Proxmox VE node
-You need to make the `upsd` port (the NUT standard `3493` in this guide) accessible in your PVE host, so the NUT client monitors in your VMs can connect to it. To achieve this, you'll need to set up proper firewall rules in Proxmox VE, both at the Datacenter and PVE node levels of the PVE web console.
+You need to make the `upsd` port (in this guide is the standard NUT `3493` port) accessible in your PVE host, so the NUT client monitors in your VMs can connect to it. To achieve this, you'll need to set up proper firewall rules in Proxmox VE, both at the Datacenter and PVE node levels of the PVE web console.
-1. First, get the IP of your, at this point, sole Debian VM. You can see it in the VM's `Summary` screen, in the `Status` block.
+1. First get the IP of your, at this point, sole Debian VM. You can see it in the VM's `Summary` screen, in the `Status` block:
- 
+ 
This VM only has one external IP assigned, by your router or gateway, to the only network card that it's currently enabled in the VM.
-2. Lets give an `Alias` to the VM's IP. Go to `Datacenter > Firewall > Alias` and click on `Add`.
+2. Lets give an `Alias` to the VM's IP. Go to `Datacenter > Firewall > Alias` and click on `Add`:
- 
+ 
-3. You'll see the following form.
+3. You'll get the following form:
- 
+ 
Let's fill it in a meaningful manner:
- - `Name`: careful with this field, not any string will be considered valid here.
- - Use only alphanumerical characters plus the '_' symbol.
- - Use only letters from the english alphabet.
- > **BEWARE!**
- > This behavior is not explained in the Proxmox VE official documentation.
+ - `Name`\
+ Careful with this field, not any string will be considered valid here.
+
+ - Only use alphanumerical characters plus the `_` symbol.
+ - Only use letters from the English alphabet.
+
+ > [!NOTE]
+ > These particularities are not explained in the Proxmox VE official documentation.
- - `IP/CIDR`: this value can be just a single IP (like `1.2.3.4`) or a network range (as `1.2.3.0/24`). For this case, you'll just input the VM's main IP here (the one assigned to `net0`).
+ To fill this field, better follow a criteria like `[vm_hostname]_[vm_network_device_name]`. Then, for the sole VM existing in your PVE system at this point, this value would be `debiantpl_net0`.
- - `Comment`: you can use this field to put more specific info about the alias.
+ - `IP/CIDR`\
+ This value can be just a single IP (like `1.2.3.4`) or a network range (as `1.2.3.0/24`). For this case, you'll just input the VM's main IP here (the one assigned to the VM's `net0` network device).
- 
+ - `Comment`\
+ You can use this field to put more specific info about the alias.
-4. After clicking on `Add`, you'll see how the new alias appears listed.
+ 
- 
+4. After clicking on `Add`, you'll see how your new alias is listed:
-5. Next, create an IP set to hold all the IPs related to the Kubernetes cluster you'll build in a later guide. Go to the `Firewall` at the `Datacenter` level, open the `IPSet` screen and click on `Create`.
+ 
- 
+5. Next, create an IP set to hold all the IPs related to the Kubernetes cluster you will build in a later chapter. Go to the `Firewall` at the `Datacenter` level, open the `IPSet` screen and click on `Create`:
-6. A simple form will raise.
+ 
- 
+6. A simple form will appear:
- Let's give the new IP set a meaningful `Name` such as `k3s_nodes_net0_ips`.
+ 
- 
+ Let's give the new IP set a meaningful `Name` such as `k3s_nodes_net0_ips`:
+
+ 
After filling the form, just click on `OK` to create the new IP set.
-7. The `IPSet` screen will now show the new IP set on its left side. Select the new `k3s_nodes_net0_ips` IP set, then click on the now enabled `Add` button over the right side.
+7. The `IPSet` screen will now show the new IP set on its left side. Select the new `k3s_nodes_net0_ips` IP set, then click on the now enabled `Add` button over the right side:
- 
+ 
-8. You'll meet a form where to input the IP to add to the IP set.
+8. You'll meet a form where to input the IP to add to the IP set:
- 
+ 
This form has three fields.
- - `IP/CIDR`: here you can type an IP, or just choose an aliased IP from the unfolded list.
+ - `IP/CIDR`\
+ Here you can type an IP, or just choose an aliased IP from the unfolded list.
+
+ - `nomatch`\
+ Enabling this option explicitly excludes from the set the IP put in the `IP/CIDR` field.
- - `nomatch`: enabling this option explicitly excludes from the set the IP put in the `IP/CIDR` field.
+ - `Comment`\
+ Any string you might want to type here, although try to make it meaningful.
- - `Comment`: any string you might want to type here, although try to make it meaningful.
+ Choose your previously aliased IP so the form looks like below:
- Choose your previously aliased IP so the form looks like below.
+ 
- 
+9. Click on `Create` and you'll see your aliased IP added to the set on the `IP/CIDR` list on the right:
-9. Click on `Create` and you'll see your aliased IP added to the set on the `IP/CIDR` list on the right.
+ 
- 
+ Notice in the snapshot how Proxmox VE has added the `dc/` prefix to the IP alias added to the IP set. That prefix probably stands for the term "DataCenter", and seems to indicate the scope this alias belongs to (I have not found an explanation for this prefix in the Proxmox VE documentation).
-10. Next step is to create a `Security Group`. A security group is just a way of grouping together firewall rules, making it easier to handle them later. So, go to `Datacenter > Firewall > Security Group` in your PVE web console. Then, click on `Create`.
+10. Next step is to create a `Security Group`. A security group is just a way of grouping together firewall rules, making it easier to handle them later. So, go to `Datacenter > Firewall > Security Group` in your PVE web console. Then, click on `Create`:
- 
+ 
-11. A simple form will raise.
+11. A simple form will raise:
- 
+ 
- This group will hold the firewall rules related to the NUT port, so let's give it a `Name` like `nut_port_accept_in`.
+ This group will hold the firewall rules related to the NUT port, so let's give it a meaningful `Name` like `nut_port_accept_in`:
- > **BEWARE!**
- > Careful with the `Name` field here, as it happened with the IP alias name field before, you can only use alphanumerical english characters plus the '_' symbol.
+ > [!WARNING]
+ > **Careful with the `Name` field in this form**\
+ > Like the IP alias `Name` field from before, the `Name` field here can only use alphanumerical English characters plus the `_` symbol.
- 
+ 
-12. Click on the `Create` button, and see how your new security group appears on the left half of the `Security Group` page. Then, select the new group and click on `Add` on the right half.
+12. Click on the `Create` button, and see how your new security group appears on the left half of the `Security Group` page. Then, select the new group and click on `Add` on the right half:
- 
+ 
-13. In the editor that appears, you're going to define a firewall rule to allow access to the NUT `3493` port from the IP set you defined a few steps before.
+13. In the editor that appears, you're going to define a firewall rule to allow access to the NUT `3493` port from the IP set you defined a few steps before:
- 
+ 
- The explanation for each field is the following.
+ Here is the explanation for each field:
- - `Direction`: this only has two options, either the rule is about `IN`coming or `OUT`going connections. Choose `in` to allow incoming connections to the NUT server.
+ - `Direction`\
+ This field only offers two options, either the rule is about `IN`coming or `OUT`going connections. Choose `in` to allow incoming connections to the NUT server.
- - `Action`: three possibilities here, `ACCEPT` the connection, `DROP` or `REJECT` it. In this case, choose `ACCEPT`.
- > **BEWARE!**
- > The difference between `DROP` and `REJECT` is that `DROP` rejects connections silently, whereas `REJECT` makes the firewall answer back a rejection code. For stopping connection attempts, always use `DROP` unless you really need to make the rejection noticeable.
+ - `Action`\
+ There are three options available here, `ACCEPT`, `DROP` or `REJECT` the connection. In this case, choose `ACCEPT`.
- - `Enable`: as its name implies, ticking this option enables the rule. Leave it off for now.
+ > [!NOTE]
+ > **Difference between the `DROP` and `REJECT` actions**\
+ > The difference between `DROP` and `REJECT` is that `DROP` rejects connections silently, whereas `REJECT` makes the firewall answer back a rejection code. For stopping connection attempts, always use `DROP` unless you really need to make the rejection noticeable.
- - `Macro`: this is a list of services known by Proxmox VE. If you choose one, some fields of this rule editor will be filled automatically. Leave it empty, since it doesn't have an option for NUT.
+ - `Enable`\
+ As its name implies, ticking this option enables the rule. Leave it off for now.
- - `Protocol`: this a list of known net protocols, from which you'll have to choose the right one. For NUT, choose `tcp`.
+ - `Macro`\
+ This is a list of services known by Proxmox VE. If you choose one, some fields of this rule editor will be filled automatically. Leave it empty, since it does not have an option for NUT.
- - `Source`: indicates the IP where the connection comes from. Be mindful that, depending on the value of the `Direction` field, `Source` can refer to an external origin (an INcoming connection) or to your own system (an OUTgoing connection). This field also offers a list of IP sets and IP aliases, so you just have to click on which one you want rather than typing IPs on the field.
+ - `Protocol`\
+ This a list of known net protocols, from which you'll have to choose the right one. For NUT, choose `tcp`.
- 
+ - `Source`\
+ Indicates the IP where the connection comes from. Be mindful that, depending on the value of the `Direction` field, `Source` can refer to an external origin (an INcoming connection) or to your own system (an OUTgoing connection). This field also offers a list of IP sets and IP aliases, so you just have to click on which one you want rather than typing IPs on the field.
+
+ 
For the NUT rule, choose the IP set you defined before, the `k3s_nodes_net0_ips`. This way, any other IP you put into that IP set will be already covered by this firewall rule.
- - `Source port`: which port on the source can the connection come from. Leave it empty for accepting any port.
+ - `Source port`\
+ Which port on the source can the connection come from. Leave it empty for accepting any port.
- - `Destination`: is the IP the incoming or outgoing connection wants to reach. This field is of the same kind as `Source`. For the NUT rule, leave it empty for now (unless you've already defined an alias or IP set for your PVE node's IPs).
+ - `Destination`\
+ Is the IP the incoming or outgoing connection wants to reach. This field is of the same kind as `Source`. For the NUT rule, leave it empty for now (unless you've already defined an alias or IP set for your PVE node's IPs).
- - `Dest. port`: port in which the connection wants to connect. Specify the NUT port here: `3493` (or the whichever one you have put in your NUT configuration).
+ - `Dest. port`\
+ Port in which the connection wants to connect. Specify the NUT port here: `3493` (or the whichever one you have set in your NUT configuration).
- - `Comment`: any meaningful string you might like to put here.
+ - `Comment`\
+ Any meaningful string you might like to put here.
- - `Log level`: if you want this rule to log the connections that go through it, change the default `nolog` value to any other that suits the severity of the event. For NUT, it shouldn't be more than `info`, usually.
+ - `Log level`\
+ This is the only `Advanced` option available in this form. If you want the rule to log the connections that go through it, change the default `nolog` value to any other that suits the severity of the event. For NUT, it shouldn't be more than `info`, usually.
- 
+ 
-14. Click on `Add`, and see the updated `Rules` list of your Security Group on the right side of the page.
+14. Click on `Add`, and see the updated `Rules` list of your Security Group on the right side of the page:
- 
+ 
Notice that the rule is NOT active, since the checkbox on the `On` column is NOT ticked. Leave it that way for now.
-15. Go to your PVE node, and browse to the now ruleless `Firewall` section and click on `Insert: Security Group`.
+15. Go to your PVE node, and browse to the now ruleless `Firewall` section and click on `Insert: Security Group`:
- 
+ 
-16. A form will raise where you'll be able to define a firewall rule that applies a security group.
+16. A form will raise where you'll be able to define a firewall rule that applies a security group:
- 
+ 
- Here's a brief explanation for each field.
+ Here's a brief explanation for each field:
- - `Security Group`: here you must choose a preexisting security group defined at the `Datacenter` level. In this case, you only have the one you've created before: `nut_port_accept_in`.
+ - `Security Group`\
+ Here you must choose a preexisting security group defined at the `Datacenter` level. In this case, you only have the one you've created before, `nut_port_accept_in`.
- - `Interface`: this field **only admits the name** (not IPs or MACs) of an existing node network interface, or just being left empty. In this case you'll need to put the name of the `vmbr0` bridge, since it's the network device through which your Proxmox VE host communicates.
+ - `Interface`\
+ This field **only admits the name** (not IPs or MACs) of an existing node network interface, or just being left empty. In this case you'll need to enter the name of the `vmbr0` bridge, since it's the network device through which your Proxmox VE host communicates.
- - `Enable`: by ticking this on, you enable this rule on the firewall.
+ - `Enable`\
+ Tick this checkbox on to enable the rule on the firewall.
- - `Comment`: any string you may want to put here, like a brief explanation of what the rule does.
+ - `Comment`\
+ Any meaninful string you may want to put here, like a brief explanation of what the rule does.
- 
+ 
-17. Click on `Add` and the new rule will appear immediately in the Firewall of your node.
+17. Click on `Add` and the new rule will be listed immediately in your `pve` node's firewall:
- 
+ 
-## Configuring the NUT `slave` client on your Debian VM
+## Configuring the NUT client on your Debian VM
-The previous section has been all about configuring the server side of your NUT setup. Now you'll deal with the client side of it in your Debian VM. Remember that, in your VM, you only installed the `nut-client` package of NUT, since you won't need the server components for a pure client configuration.
+The previous section has been all about configuring the server side of your NUT setup. Now you'll deal with the client side of it in your Debian VM. Remember that, in your VM, you only installed the `nut-client` package of NUT, since you won't need the server components for just monitoring the system's UPS unit.
-1. Open a shell in your Debian VM as `mgrsys`, then `cd` to `/etc/nut` and make a backup of the `nut.conf` file.
+1. Open a shell in your Debian VM as `mgrsys`, then `cd` to `/etc/nut` and make a backup of the `nut.conf` file:
- ~~~bash
+ ~~~sh
$ cd /etc/nut/ ; sudo cp nut.conf nut.conf.orig
~~~
-2. Edit the `nut.conf` file and just change the value of the `MODE` parameter to `netclient`.
+2. Edit the `nut.conf` file and just change the value of the `MODE` parameter to `netclient`:
~~~properties
MODE=netclient
~~~
-3. Save the changes to `nut.conf`, then make a backup of the `upsmon.conf` file.
+3. Save the changes to `nut.conf`, then make a backup of the `upsmon.conf` file:
- ~~~bash
+ ~~~sh
$ sudo cp upsmon.conf upsmon.conf.orig
~~~
-4. Edit `upsmon.conf` as follows.
+4. Edit `upsmon.conf` as follows:
- - Search for and comment out the active lines of the `RBWARNTIME` and `SHUTDOWNCMD` parameters.
+ - Search for and comment out the active lines of the `RBWARNTIME` and `SHUTDOWNCMD` parameters:
~~~properties
# --------------------------------------------------------------------------
@@ -429,18 +502,33 @@ The previous section has been all about configuring the server side of your NUT
#
# upsmon runs this command when the system needs to be brought down.
#
- # This should work just about everywhere ... if it doesn't, well, change it.
+ # This should work just about everywhere ... if it doesn't, well, change it,
+ # perhaps to a more complicated custom script.
+ #
+ # Note that while you experiment with the initial setup and want to test how
+ # your configuration reacts to power state changes and ultimately when power
+ # is reported to go critical, but do not want your system to actually turn
+ # off, consider setting the SHUTDOWNCMD temporarily to do something benign -
+ # such as posting a message with 'logger' or 'wall' or 'mailx'. Do be careful
+ # to plug the UPS back into the wall in a timely fashion.
+ #
+ # For Windows setup use something like:
+ # SHUTDOWNCMD "C:\\WINDOWS\\system32\\shutdown.exe -s -t 0"
+ # If you have command line using space character you have to add double quote to them, like this:
+ # SHUTDOWNCMD "\"C:\\Program Files\\some command.bat\" -first_arg -second_arg"
+ # Or use the old DOS 8.3 file name, like this:
+ # SHUTDOWNCMD "C:\\PROGRA~1\\SOMECO~1.bat -first_arg -second_arg"
#SHUTDOWNCMD "/sbin/shutdown -h +0"
~~~
- - **Append** the following lines to the `upsmon.conf` file.
+ - **Append** the following lines to the `upsmon.conf` file:
~~~properties
# --------------------------------------------------------------------------
# Customized settings
- MONITOR apc@192.168.1.107 1 upsmonclient s3c4R3_Kly3Nt_p4sSw0rD! slave
+ MONITOR eaton@10.1.0.1 1 upsmonclient s3c4R3_Kly3Nt_p4sSw0rD! secondary
SHUTDOWNCMD "logger -t upsmon.conf \"SHUTDOWNCMD calling /sbin/shutdown to shut down system\" ; /sbin/shutdown -h +0"
NOTIFYCMD /usr/sbin/upssched
@@ -469,16 +557,18 @@ The previous section has been all about configuring the server side of your NUT
RBWARNTIME 7200 # 2 hours
~~~
- The configuration above is mostly the same as the one you have in the `upsmon.conf` on your PVE node, save the MONITOR line which defines to what UPS unit to connect and with what user. Notice that in that MONITOR line you should set the user you've defined, previously in this guide, as an unprivileged NUT `slave` user in your PVE host's `upsd.users` file. This is much safer than using the fully-privileged administrator user just for monitoring your UPS unit from a VM. Also see how an IP is being used to refer to the UPS unit to monitor, this IP is the NUT server IP. Also, instead of an IP you could also use the NUT server's `hostname` if your network is able to resolve it to the right IP.
+ The configuration above is mostly the same as the one you have in the `upsmon.conf` on your PVE node, save the MONITOR line which defines to what UPS unit to connect and with what user. In that MONITOR line you must set the client user you've defined, previously in this chapter, as an unprivileged NUT secondary user in your PVE host's `upsd.users` file. This is much safer than using the fully-privileged administrator user just for monitoring your UPS unit from a VM.
+
+ Also see how an IP refers to the UPS unit to monitor, this IP is the NUT server IP. Instead of an IP you could also use the NUT server's `hostname`, but only if your network is able to resolve it to the right IP.
-5. Save the `upsmon.conf` file, then rename the `upssched.conf` file to `upssched.conf.orig`, and create a new empty `upssched.conf` file.
+5. Save the `upsmon.conf` file, then rename the `upssched.conf` file to `upssched.conf.orig`, and create a new empty `upssched.conf` file:
- ~~~bash
+ ~~~sh
$ sudo mv upssched.conf upssched.conf.orig
$ sudo touch upssched.conf ; sudo chmod 640 upssched.conf ; sudo chgrp nut upssched.conf
~~~
-6. Edit the `upssched.conf` adding the following lines.
+6. Edit the `upssched.conf` adding the following lines:
~~~properties
# upssched.conf
@@ -486,27 +576,27 @@ The previous section has been all about configuring the server side of your NUT
PIPEFN /var/run/nut/upssched.pipe
LOCKFN /var/run/nut/upssched.lock
- AT ONLINE apc@192.168.1.107 EXECUTE online
- AT ONBATT apc@192.168.1.107 EXECUTE onbatt
- AT LOWBATT apc@192.168.1.107 EXECUTE lowbatt
+ AT ONLINE eaton@10.1.0.1 EXECUTE online
+ AT ONBATT eaton@10.1.0.1 EXECUTE onbatt
+ AT LOWBATT eaton@10.1.0.1 EXECUTE lowbatt
~~~
Like with the previous configuration file, here you can see that this file is almost the same `upssched.conf` file as the one set up previously in your PVE host. The only difference here is that, in the `AT` declarations, where there was a call to the `localhost` now you must put the NUT server IP or its `hostname`.
-7. Save the `upssched.conf` file. Then create an empty `/usr/sbin/upssched-cmd` file with the right permissions.
+7. Save the `upssched.conf` file. Then create an empty `/usr/sbin/upssched-cmd` file with the right permissions:
- ~~~bash
+ ~~~sh
$ sudo touch /usr/sbin/upssched-cmd ; sudo chmod 755 /usr/sbin/upssched-cmd
~~~
-8. Edit the new `/usr/sbin/upssched-cmd` so it has the following shell script code.
+8. Edit the new `/usr/sbin/upssched-cmd` so it has the following shell script code:
- ~~~bash
+ ~~~sh
#!/bin/bash -u
# upssched-cmd
logger -i -t upssched-cmd Calling upssched-cmd $1
- UPS="apc@192.168.1.107"
+ UPS="eaton@10.1.0.1"
STATUS=$( upsc $UPS ups.status )
CHARGE=$( upsc $UPS battery.charge )
CHMSG="[$STATUS]:$CHARGE%"
@@ -523,93 +613,107 @@ The previous section has been all about configuring the server side of your NUT
notify-send-all "$MSG"
~~~
- This script is almost the same one you already configured in your PVE host. The only thing that changes is the `UPS` variable, which now also needs the IP of the NUT server specified.
+ This script is essentially the same one you already configured in your PVE host. The only thing that changes is the `UPS` variable, which now also needs the IP of the NUT server specified.
-9. Save the `upssched-cmd` file and start the NUT related services already running in your VM.
+9. Save the `upssched-cmd` file and start the NUT related services already running in your VM:
- ~~~bash
+ ~~~sh
$ sudo systemctl restart nut-client.service nut-monitor.service
~~~
-## Checking the connection between the VM NUT `slave` client and the PVE node NUT `master` server
+## Checking the connection between the VM NUT client and the PVE node NUT server
-You have everything configured, so it's time you test the NUT connection between your VM and the Proxmox VE node. To do this, execute the `upsc` command on your VM as follows.
+You have everything configured, so it's time you test the NUT connection between your Debian VM and the Proxmox VE node. To do this, execute the `upsc` command on your VM as follows:
-~~~bash
-$ sudo upsc apc@192.168.1.107
+~~~sh
+$ sudo upsc eaton@10.1.0.1
~~~
-And this command should return the following output.
+And this command should return the following output:
-~~~bash
+~~~sh
Error: Connection failure: Connection timed out
~~~
-Yes, there's a problem with the connection. You can also see this error in the `/var/log/syslog` file in your VM, in which you should find a number of lines like the following one.
+Yes, there's a problem with the connection. You can also see this error in repeated logs written in the journal of your VM. To see those entries and many other that you cannot see as a regular user, you must open the journal as `root` or, better, use `sudo` when being `mgrsys`:
+
+~~~sh
+$ sudo journalctl
+~~~
+
+This is the only way you will be able to see all the logs being registered in the journal of your VM (and this is also true for your Proxmox VE server). In particular, for this NUT connectivity issue, look for lines like this one:
~~~log
-Nov 19 19:44:27 debiantpl upsmon[593]: UPS [apc@192.168.1.107]: connect failed: Connection failure: Connection timed out
+Sep 05 20:01:46 debiantpl nut-monitor[903]: UPS [eaton@10.1.0.1]: connect failed: Connection failure: Connection timed out
~~~
-The NUT monitor daemon on your VM tries to connect every few seconds to the NUT server on your PVE node, but something is wrong and the connection times out. So, what's missing? Just a small detail, regarding the security group you created in the firewall at the `Datacenter` level of your Proxmox VE platform. So, go back to the PVE web console, browse to `Datacenter > Firewall > Security Group` and select the group you created there before, the `nut_port_accept_in` one.
+The NUT monitor daemon on your VM tries to connect every few seconds to the NUT server on your PVE node, but something is wrong and the connection times out. What's missing? Just a small detail, regarding the security group you created in the firewall at the `Datacenter` level of your Proxmox VE platform.
-
+Go back to your PVE web console, browse to `Datacenter > Firewall > Security Group` and select the group you created there before, the `nut_port_accept_in` one:
-You only declared one rule in the security group, and I told you to leave it disabled. This way, I've shown you a bit how the relationship between levels within Proxmox VE works regarding firewall rules. Remember that, at your `pve` node tier, you have created and **enabled** a firewall rule that used the security group itself; but only the rules **enabled in the security group** will be also enforced by the firewall. So, to enable access to the NUT port in your PVE host, you must also activate the rule you prepared right for it. To do so, just click on the checkbox the rule has at the `On` column.
+
-
+You only declared one rule in the security group, and I told you to leave it disabled. This way, I've shown you a bit of how the relationship between levels within Proxmox VE works regarding firewall rules. Remember that, at your `pve` node tier, you have created and **enabled** a firewall rule that used the security group itself; but only the rules **enabled within the security group** will be also enforced by the firewall. So, to enable access to the NUT port in your PVE host, you must also activate the rule you prepared exactly for it. To do so, just click on the checkbox the rule has at the `On` column:
-The rule will be enforced by the firewall right away, so you can already check if now you can communicate with the NUT server from your VM.
+
-~~~bash
-$ sudo upsc apc@192.168.1.107
+The rule will be enforced by the firewall right away. You can check if now you can communicate with the NUT server from your VM:
+
+~~~sh
+$ sudo upsc eaton@10.1.0.1
Init SSL without certificate database
battery.charge: 100
battery.charge.low: 80
-battery.charge.warning: 50
-battery.date: not set
+battery.runtime: 3360
+battery.type: PbAc
+device.mfr: EATON
+device.model: Eaton 3S 700
+device.serial: Blank
+device.type: ups
...
-ups.vendorid: 051d
+ups.vendorid: 0463
~~~
-Now you should get a proper answer from the NUT server, in this case information about your UPS unit. On the other hand, and maybe a few seconds after you enabled the rule in the firewall, in the `/var/log/syslog` of your VM you should see the following line once.
+Now you should get a proper answer from the NUT server, in this case information about your UPS unit. On the other hand, and maybe a few seconds after you enabled the rule in the firewall, in the journal of your VM you should see the following line once:
~~~log
-Nov 19 19:47:49 debiantpl upsmon[593]: UPS apc@192.168.1.107: Communications (re-)established.
+Sep 05 20:19:26 debiantpl nut-monitor[903]: UPS eaton@10.1.0.1: Communications (re-)established.
~~~
-The NUT monitor in your VM now is properly connected to the NUT server in your PVE node. On the other hand, in the `/var/log/syslog` file of your PVE host you should also find an output like below.
+The NUT monitor in your VM now is properly connected to the NUT server in your PVE node. On the other hand, in the journal (opened with `sudo`) of your PVE host you should also find an output like below:
~~~log
-Nov 19 19:47:49 pve upsd[923]: User upsmonclient@192.168.1.10 logged into UPS [apc]
+Sep 05 20:19:26 pve nut-server[1617]: User upsmonclient@10.4.0.1 logged into UPS [eaton]
~~~
-So, the NUT connection on the server side is also verified. Mind you that, on the server side, you won't see the connection failures, since those happened due to the firewall blocking the connection.
+This log verifies the NUT client connection on the server side. Mind that, on the server side, you will not get logs about the connection failures. Those happened due to the firewall blocking all possible incoming connections from NUT clients, so they are only seeing by the clients.
+
+As a final note on this matter, browse in your PVE web console to the `Datacenter > Firewall`:
-As a final note on this matter, browse in your PVE web console to the `Datacenter Firewall`.
+
-
+As you can see in this page, there are no visible rules whatsoever. This may look strange since you created the security group at the `Datacenter` tier, and let's not forget the rules enabled in the `iptables` running underneath, but those are not show here either.
-As you can see, there are no visible rules whatsoever, although you created the security group at the `Datacenter` tier, and let's not forget the rules enabled in the `iptables` running underneath, but those are not show here either. Essentially, from the firewall point of view, the datacenter and node levels are the same logical network "host" zone, although each level have their own firewall configuration page (with different options available, mind you). Just be aware of this particularity of Proxmox VE whenever you have to deal with its firewall.
+How I understand this is that, from the firewall point of view, the datacenter and node levels are the same logical network "host" zone, although each level have their own firewall configuration page (with different options available, mind you). Just be aware of this particularity of Proxmox VE whenever you have to deal with its firewall.
## Testing a Forced ShutDown sequence (`FSD`) with NUT
To test if your whole NUT setup works as expected, you can provoke a Forced ShutDown (`FSD`) event with just one NUT command. But first you must understand what's going to happen after provoking the `FSD` event.
-### _FSD event shutdown sequence_
+### FSD event shutdown sequence
-1. The `upsmon` **master** daemon, running in your Proxmox VE host, notices and sets `FSD` (the _forced shutdown_ flag) to tell all **slave** systems that it will soon power down the load.
+1. The `upsmon` `primary` daemon, running in your Proxmox VE host, notices and sets `FSD` (the _forced shutdown_ flag) to tell all `secondary` systems that it will soon power down the load:
-2. The `upsmon` **slave** daemon in your VM sees the `FSD` event and:
+2. The `upsmon` `secondary` daemon in your VM sees the `FSD` event and:
- 1. Generate a `NOTIFY_SHUTDOWN` event.
- 2. Wait `FINALDELAY` seconds, by default 5.
- 3. Call its shutdown command specified by `SHUTDOWNCMD`.
- 4. Disconnect from `upsd`.
+ 1. Generates a `NOTIFY_SHUTDOWN` event.
+ 2. Waits `FINALDELAY` seconds, by default 5.
+ 3. Calls its shutdown command specified by `SHUTDOWNCMD`.
+ 4. Disconnects from `upsd`.
-3. The `upsmon` **master** daemon waits up to `HOSTSYNC` seconds (by default 15) for the **slaves** to disconnect from `upsd`. If any are connected after this time, `upsmon` stops waiting and proceeds with the shutdown process.
+3. The `upsmon` `primary` daemon waits up to `HOSTSYNC` seconds (by default 15) for the `secondary` systems to disconnect from `upsd`. If any are connected after this time, `upsmon` stops waiting and proceeds with the shutdown process.
-4. The `upsmon` **master**:
+4. The `upsmon` `primary`:
1. Generates a `NOTIFY_SHUTDOWN` event.
2. Waits `FINALDELAY` seconds, by default 5.
@@ -626,100 +730,93 @@ To test if your whole NUT setup works as expected, you can provoke a Forced Shut
There are a few takeaways to consider from this sequence.
-- **Timing is crucial**: you need to leave enough time for your **client** (`slaves` for NUT) systems, like your VM, to shutdown properly. At this point, you only have one VM that does nothing but keeping itself running, so it can shutdown in a few seconds. Something else will be when, in later guides, you build a Kubernetes cluster with two quite service-loaded VMs. You'll need to adjust the value of the `HOSTSYNC` parameter (declared in the `/etc/nut/upsmon.conf` file), both in the **master** NUT server and in the **slave** systems, to suit it to the longest period of time required to shutdown safely your VMs, while also keeping in mind the limitations of the battery on your UPS unit. Thus, you'll need to test your whole system to measure how much time does your setup requires to shutdown safely.
+- **Timing is crucial**\
+ You need to leave enough time for your **client** (`secondary` for NUT) systems, like your VM, to shutdown properly. At this point, you only have one VM that does nothing but keeping itself running, so it can shutdown in a few seconds. Something else will be when, in later chapters, you build a Kubernetes cluster with two quite service-loaded VMs. You'll need to adjust the value of the `HOSTSYNC` parameter (declared in the `/etc/nut/upsmon.conf` file), both in the `primary` NUT server and in the `secondary` systems, to suit it to the longest period of time required to shutdown safely your VMs, while also keeping in mind the limitations of the battery on your UPS unit. Thus, you'll need to test your whole system to measure how much time does your setup requires to shutdown safely.
-- **Use of the SHUTDOWNCMD and NOTIFYCMD parameters**: you can prepare shell scripts able to do certain actions, like stopping services in a certain order, and then set them in the `SHUTDOWNCMD` or the `NOTIFYCMD` parameter (both declared in the `/etc/nut/upsmon.conf` file). This is exactly what you've seen previously in this guide, when you created the `/usr/sbin/upssched-cmd` script and set it in the NOTIFYCMD parameter. In fact, you could just improve the `upssched-cmd` script and associate it with more events.
+- **Use of the SHUTDOWNCMD and NOTIFYCMD parameters**\
+ You can prepare shell scripts able to do certain actions, like stopping services in a certain order, and then set them in the `SHUTDOWNCMD` or the `NOTIFYCMD` parameter (both declared in the `/etc/nut/upsmon.conf` file). This is exactly what you've seen previously in this chapter, when you created the `/usr/sbin/upssched-cmd` script and set it in the NOTIFYCMD parameter. In fact, you could just improve the `upssched-cmd` script and associate it with more events.
-- **The UPS unit will restart**: with this `FSD` sequence, the last thing the UPS unit will do is restart. Bear this in mind if you have other unrelated devices plugged in the same UPS unit.
+- **The UPS unit will restart**\
+ With this `FSD` sequence, the last thing the UPS unit will do is restart. Bear this in mind if you have other unrelated devices plugged in the same UPS unit.
-### _Executing the FSD test_
+### Executing the FSD test
-1. First, as `mgrsys`, open one shell terminal on your Proxmox VE host and another one on your VM, then put them side to side.
+1. First, as `mgrsys`, open a shell terminal on your Proxmox VE host. Then, execute the following `upsmon` command:
-2. On the **Proxmox VE host** shell, execute the following `upsmon` command.
-
- ~~~bash
+ ~~~sh
$ sudo upsmon -c fsd
~~~
-3. Right after you execute this command, pay attention to the **VM** shell. You should immediately see the following output.
-
- ~~~bash
- Broadcast message from nut@debiantpl (somewhere) (Fri Nov 19 20:00:04 2021):
-
- Executing automatic power-fail shutdown
- ~~~
-
-4. On the Proxmox VE shell, you'll get the following output.
-
- ~~~bash
- Network UPS Tools upsmon 2.7.4
-
- Broadcast message from nut@pve (somewhere) (Fri Nov 19 20:00:09 2021):
+ This command only prints the name and version of the upsmon service:
- Executing automatic power-fail shutdown
+ ~~~sh
+ Network UPS Tools upsmon 2.8.1
~~~
-5. You'll lose connection to both terminals, first from the VM, then from the Proxmox VE system.
+2. You'll lose connection to the Debian VM first, then from the Proxmox VE system.
-6. A few seconds later, you should see (blinking leds) or hear (internal switching) your UPS unit rebooting.
+3. Your UPS unit may react in some way when it reboots, like with blinking leds or by doing some internal (audible) switching.
-7. Switch your Proxmox VE server back on, and then also your VM.
+4. Switch your Proxmox VE server back on, and then also your VM.
-8. Like before, connect with a shell terminal to each system. Then, open the `/var/log/syslog` file on both terminals.
+5. Open a shell terminal to each system. Then, open the journal on both terminals:
- ~~~bash
- $ sudo less /var/log/syslog
+ ~~~sh
+ $ sudo journalctl
~~~
-9. On the **Proxmox VE** system terminal, look for lines like the following ones.
+6. **In the Proxmox VE system journal**, look for log lines like these:
~~~log
...
- Nov 19 20:00:01 pve upsmon[932]: Signal 10: User requested FSD
- Nov 19 20:00:01 pve upsd[923]: Client upsmon@127.0.0.1 set FSD on UPS [apc]
+ Sep 05 20:38:13 pve nut-monitor[1623]: Signal 10: User requested FSD
+ Sep 05 20:38:13 pve nut-server[1617]: Client upsmon@127.0.0.1 set FSD on UPS [eaton]
+ Sep 05 20:38:13 pve upsd[1617]: Client upsmon@127.0.0.1 set FSD on UPS [eaton]
...
- Nov 19 20:00:09 pve upsmon[932]: Executing automatic power-fail shutdown
- Nov 19 20:00:09 pve upsmon[932]: Auto logout and shutdown proceeding.
+ Sep 05 20:38:21 pve nut-monitor[1623]: Executing automatic power-fail shutdown
+ Sep 05 20:38:21 pve nut-monitor[1623]: Auto logout and shutdown proceeding.
+ Sep 05 20:38:21 pve nut-monitor[32897]: Network UPS Tools upsmon 2.8.1
...
~~~
- Those lines mark the beginning of the `FSD` sequence in the Proxmox VE host. Some seconds later, you should find this line.
+ Those lines mark the beginning of the `FSD` sequence in the Proxmox VE host. Some seconds later, you should encounter this other line:
- ~~~bash
- Nov 19 20:00:14 pve upsmon.conf: SHUTDOWNCMD calling /sbin/shutdown to shut down system
+ ~~~sh
+ Sep 05 20:38:26 pve upsmon.conf[32964]: SHUTDOWNCMD calling /sbin/shutdown to shut down system
~~~
- That line indicates when the shutdown begins in the Proxmox VE server. Below it you'll see the log of VMs and services being stopped, storages being unmounted and so on.
+ That line indicates when the shutdown begins in the Proxmox VE server. Around it you'll also see the logs of VMs and services being stopped, storages being unmounted and so on.
-10. On the **VM** terminal, search for log lines like the ones next.
+7. **In your Debian VM journal**, search for log lines like these:
- ~~~bash
- Nov 19 20:00:04 debiantpl upsmon[593]: UPS apc@192.168.1.107: Forced shutdown in progress.
- Nov 19 20:00:04 debiantpl upsmon[593]: Executing automatic power-fail shutdown
- Nov 19 20:00:04 debiantpl upsmon[593]: Auto logout and shutdown proceeding.
+ ~~~sh
+ Sep 05 20:38:16 debiantpl nut-monitor[903]: UPS eaton@10.1.0.1: Forced shutdown in progress.
+ Sep 05 20:38:16 debiantpl nut-monitor[903]: Executing automatic power-fail shutdown
+ Sep 05 20:38:16 debiantpl nut-monitor[1085]: Network UPS Tools upsmon 2.8.1
+ Sep 05 20:38:16 debiantpl nut-monitor[903]: Auto logout and shutdown proceeding.
+ Sep 05 20:38:16 debiantpl nut-monitor[1088]: Network UPS Tools upsmon 2.8.1
~~~
- That's the beginning of the `FSD` sequence on the VM. Not far below it, you should see this other line.
+ That's the beginning of the `FSD` sequence on the VM. Not far below it, you should get across this other line:
- ~~~bash
- Nov 19 20:00:09 debiantpl upsmon.conf: SHUTDOWNCMD calling /sbin/shutdown to shut down system
+ ~~~sh
+ Sep 05 20:38:21 debiantpl upsmon.conf[1090]: SHUTDOWNCMD calling /sbin/shutdown to shut down system
~~~
- It's exactly like in the Proxmox VE server, and also indicates that the shutdown process of the VM truly starts at this point.
+ It is exactly like in the Proxmox VE server, and also indicates that the shutdown process of the Debian VM truly starts at this point.
-With this test, you can check out the validity of your NUT configuration and your associated shell scripts. Also, you may consider using this mechanism to shut down your whole system (VMs included) with just one command, but you must be aware that Proxmox VE also comes with capabilities regarding the automatic starting and stopping of its VMs.
+With this test, you can check out the validity of your NUT configuration and your associated shell scripts. Also, you may consider using this mechanism to shut down your whole system (VMs included) with just one command. Rather than using this command, be aware that Proxmox VE also comes with more convenient features regarding the automatic starting and stopping of its VMs.
## Relevant system paths
-### _Directories on Debian VM_
+### Directories on Debian VM
- `/etc/nut`
- `/usr/sbin`
- `/var/log`
- `/var/run/nut`
-### _Files on Debian VM_
+### Files on Debian VM
- `/etc/nut/nut.conf`
- `/etc/nut/nut.conf.orig`
@@ -733,14 +830,14 @@ With this test, you can check out the validity of your NUT configuration and you
- `/var/run/nut/upssched.pipe`
- `/var/run/nut/upssched.lock`
-### _Directories on Proxmox VE host_
+### Directories on Proxmox VE host
- `/etc/nut`
- `/usr/sbin`
- `/var/log`
- `/var/run/nut`
-### _Files on Proxmox VE host_
+### Files on Proxmox VE host
- `/etc/nut/nut.conf`
- `/etc/nut/nut.conf.bkp`
@@ -762,15 +859,18 @@ With this test, you can check out the validity of your NUT configuration and you
## References
-### _NUT configuration_
+### [NUT (Network UPS Tool)](https://networkupstools.org/) configuration
+
+- [NUT User manual](https://networkupstools.org/docs/user-manual.chunked/index.html)
+ - [Shutdown design](https://networkupstools.org/docs/user-manual.chunked/ar01s06.html#Shutdown_design)
+ - [Testing shutdowns](https://networkupstools.org/docs/user-manual.chunked/ar01s06.html#Testing_shutdowns)
+
+- [NUT config examples document on GitHub](https://github.com/networkupstools/ConfigExamples/releases/tag/book-3.0-20230319-nut-2.8.0)
-- [NUT (Network UPS Tool)](https://networkupstools.org/)
-- [NUT User manual (chunked)](https://networkupstools.org/docs/user-manual.chunked/index.html)
-- [NUT User manual (chunked). Shutdown design](https://networkupstools.org/docs/user-manual.chunked/ar01s06.html#Shutdown_design)
-- [NUT User manual (chunked). Testing shutdowns](https://networkupstools.org/docs/user-manual.chunked/ar01s06.html#Testing_shutdowns)
- [NUT documentation and scripts](http://rogerprice.org/NUT/)
-- [NUT config examples document on GitHub](https://github.com/networkupstools/ConfigExamples/releases/tag/book-2.0-20210521-nut-2.7.4)
-- [Monitorización de un SAI con GNU/Debian Linux](http://index-of.co.uk/SISTEMAS-OPERATIVOS/NUT%20Debian%20UPS%20Monitor.pdf) (in spanish)
+
+### Articles about NUT configuration
+
- [Instalar y configurar NUT por SNMP](https://blog.ichasco.com/instalar-y-configurar-nut-por-snmp/) (in spanish)
- [Monitoring a UPS with nut on Debian or Ubuntu Linux](https://blog.shadypixel.com/monitoring-a-ups-with-nut-on-debian-or-ubuntu-linux/)
- [Can't get upsmon service started to monitor (and respond to) remote UPS](https://serverfault.com/questions/865147/cant-get-upsmon-service-started-to-monitor-and-respond-to-remote-ups)
@@ -780,9 +880,10 @@ With this test, you can check out the validity of your NUT configuration and you
- [Configuring NUT for the Eaton 3S UPS on Ubuntu Linux](https://srackham.wordpress.com/2013/02/27/configuring-nut-for-the-eaton-3s-ups-on-ubuntu-linux/)
- [UPS HowTo](https://tldp.org/HOWTO/UPS-HOWTO/)
-### _Proxmox VE firewall_
+### [Proxmox VE](https://pve.proxmox.com/)
-- [Proxmox VE Administrator guide. 13. Proxmox VE Firewall](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pve_firewall)
+- [Proxmox VE Administrator guide](https://pve.proxmox.com/pve-docs/pve-admin-guide.html)
+ - [Proxmox VE Firewall](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pve_firewall)
## Navigation
diff --git a/G023 - K3s cluster setup 06 ~ Debian VM template and backup.md b/G023 - K3s cluster setup 06 ~ Debian VM template and backup.md
index 8fd615e..f55b295 100644
--- a/G023 - K3s cluster setup 06 ~ Debian VM template and backup.md
+++ b/G023 - K3s cluster setup 06 ~ Debian VM template and backup.md
@@ -1,217 +1,285 @@
# G023 - K3s cluster setup 06 ~ Debian VM template and backup
-Now that you have a Debian VM configured, you can turn it into a VM template. This way, you'll be able to create new Debian based VMs much faster just by cloning this template.
+- [Turn your Debian VM into a VM template](#turn-your-debian-vm-into-a-vm-template)
+- [Steps for transforming your Debian VM into a VM template](#steps-for-transforming-your-debian-vm-into-a-vm-template)
+- [VM template's backup](#vm-templates-backup)
+ - [Creating the backup of the VM template](#creating-the-backup-of-the-vm-template)
+ - [Restoring the VM template's backup](#restoring-the-vm-templates-backup)
+ - [Considerations about backups](#considerations-about-backups)
+- [Other considerations regarding VM templates](#other-considerations-regarding-vm-templates)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [Markdown](#markdown)
+- [Navigation](#navigation)
-## Turning the Debian VM into a VM template
+## Turn your Debian VM into a VM template
-To do this conversion, browse to your Proxmox VE web console and follow the steps below.
+With your first Debian VM configured, you can turn it into a VM template. This way, you will be able to create new Debian VMs much faster just by cloning this template.
-1. First, stop your VM by clicking on the `Shutdown` button.
+## Steps for transforming your Debian VM into a VM template
- 
+To do this conversion, browse into your Proxmox VE web console and follow the steps below:
- You'll have to confirm the `Shutdown` action.
+1. First, yo must stop your VM by clicking on its `Shutdown` button:
- 
+ 
- It shouldn't take more than a few seconds for your VM to shut down.
+ You'll have to confirm the `Shutdown` action:
- 
+ 
- The VM's Status will change to `stopped`, and other indicators like CPU and memory usage won't show any kind of activity.
+ It shouldn't take more than a few seconds for your VM to shut down:
-2. Click on the `More` button and choose the `Convert to template` option.
+ 
- 
+ The VM's `Status` will change to `stopped`. Other indicators like CPU and memory usage will fall to 0.
-3. The web console will ask for your confirmation of this action.
+2. Click on the `More` button and choose the `Convert to template` option:
- 
+ 
-4. Just click on `Yes`, and Proxmox VE will turn the VM into a template in a few seconds. When the task is finished, the VM's `Summary` page will refresh automatically. There, you'll notice several changes.
+3. The web console will ask for your confirmation of this action:
- 
+ 
- See that the `Summary` no longer shows the VM's status or the usage statistics. Also notice that some tabs are missing below the `Summary` one: `Console`, `Monitor` and `Snapshot` are not available anymore. Further, the `Start`, `Shutdown` and `Console` buttons that used to be at the VM page's top are not there either. This is because VM templates **cannot be started**, they're just **read-only** models that you can clone to create new VMs. Another minor detail that has changed is the icon the VM has in the tree shown at the web console's left.
+4. Just click on `Yes`, and Proxmox VE will turn the VM into a template in a few seconds. When the task is finished, the VM's `Summary` page will change automatically into this:
-5. It's very advisable to leave a proper description in the `Notes` text block available in the `Summary` view.
+ 
- - Click on the `Notes`' gear icon.
+ See that the `Summary` no longer shows the VM's status or the usage statistics. Also notice that some tabs are no longer available under the `Summary` one such as `Console`, `Monitor` or `Snapshot`. Also, the `Start`, `Shutdown` and `Console` buttons that used to be at the VM page's top are not there either. This is because **VM templates cannot be started**. VM templates are just **read-only models** that you can clone to create new VMs. Another minor detail that has changed is the icon the VM has in the tree shown at the web console's left.
- 
+5. It is better to leave a proper description of the template in the `Notes` text block available in its `Summary` view:
- - You'll get an editor window in which you can type anything you want, and even use **Markdown** syntax.
+ - Click on the `Notes`' gear icon:
- 
+ 
- - For instance, you could type something like the following there.
+ - You will get an editor window where you can type anything you want, and even use **Markdown** syntax:
- > **BEWARE!**
- > The text below is formatted with **Markdown** so, if you use it as a template for your notes, be mindful of, among other things, the double spacing used at the end of each line. Double spacing in Markdown [forces a hard line break](https://spec.commonmark.org/0.30/#hard-line-breaks), equivalent to a `
` tag in html.
+ 
- ~~~markdown
- # Debian VM TEMPLATE
- Template created: 2021-11-19
- OS: **Debian 11 Bullseye**
- Root login disabled: yes
- Sysctl configuration: yes
- Transparent hugepages disabled: yes
- SSH access: yes
- TFA enabled: yes
- QEMU guest agent working: yes
- Fail2Ban working: yes
- NUT (UPS) client working: yes
- Utilities apt packages installed: yes
- ~~~
+ - For instance, you could type something like the following there:
- As you can see, you can use it as a reminder of what's inside your VM template.
+ > [!NOTE]
+ > **The text snippet below is formatted in Markdown**\
+ > If you use it as a template for your notes, be mindful of, among other things, the `\` character used at the end of each line. Double spacing and `\` in Markdown [forces a hard line break](https://spec.commonmark.org/0.31.2/#hard-line-breaks), equivalent to a `
` tag in html.
- - When you have the text ready, just click on `OK` and the `Notes` block will be updated in the `Summary` view.
+ ~~~markdown
+ # Debian VM TEMPLATE
+ Template created: 2025-09-06\
+ OS: Debian 13 "trixie"\
+ Root login disabled: yes\
+ Sysctl configuration: yes\
+ Transparent hugepages disabled: yes\
+ SSH access: yes\
+ Key-pair for SSH access: yes\
+ TFA enabled: yes\
+ QEMU guest agent working: yes\
+ Fail2Ban working: yes\
+ NUT (UPS) client working: yes\
+ Utilities apt packages installed: yes
+ ~~~
- 
+ As you can see, you can use it as a reminder of what is inside your VM template.
+
+ - When you have the text ready, just click on `OK` and the `Notes` block will be updated in the `Summary` view:
+
+ 
## VM template's backup
-It's convenient to have a backup of your VM template, just in case anything happens. For VMs and containers, the kind of backup you can do in Proxmox VE is a **vzdump**. These dumps have to be saved in a storage configured for it but, since you already configured a special Proxmox VE directory for that (in the external usb storage drive) in the [**G019** guide](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#setting-up-the-directories), now you just have to worry about doing the backup itself.
+It is convenient to have a backup of your VM template, just in case anything happens. For VMs and containers, the kind of backup you can do in Proxmox VE is a **vzdump**. These dumps have to be saved in a storage configured for it but, since you already configured a specific Proxmox VE directory for that (in the external USB storage drive) in the [chapter **G019**](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#setting-up-the-directories), now you just have to worry about doing the backup itself.
+
+### Creating the backup of the VM template
+
+1. In the Proxmox VE web console, browse to the `Backup` view of your Debian VM template:
-### _Creating the backup of the VM template_
+ 
-1. In the Proxmox VE web console, browse to the `Backup` view of your VM template.
+ At this point, you'll find this view empty of backups. The two main things you must notice here are the `Backup now` button and the `Storage` unfoldable list on the right. There's only the `hddusb_bkpvzdumps` storage available for VM dumps, which you configured as the sole directory for holding vzdumps, back in the [chapter **G019**](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#setting-up-the-directories). Now you can take advantage of it and, since its already selected as the storage of choice for VM dumps, just press on the `Backup now` button.
- 
+2. An editor window appears for creating a new backup:
- At this point, you'll find this view empty of backups. The two main things you must notice here are the `Backup now` button and the `Storage` unfoldable list on the right. There's only the `hddusb_bkpvzdumps` storage available for VM dumps, which you configured as the sole directory for holding vzdumps, back in the [**G019** guide](G019%20-%20K3s%20cluster%20setup%2002%20~%20Storage%20setup.md#setting-up-the-directories). Now you can take advantage of it and, since its already selected as the storage of choice for VM dumps, just press on the `Backup now`.
+ 
-2. An editor window appears for creating a new backup.
+ There you have the following parameters to fill:
- 
+ - `Storage`\
+ It is the same list you have available in the `Backup` view.
- There you have the following parameters to fill.
+ - `Mode`\
+ Indicates how you want to execute the backup on your VM. In case of running VMs, you will have to consider if you want to execute the backup in parallel with the VM still running (`Snapshot` and `Suspend` modes, which also use the QEMU guest agent if available in the VM) or stopping it while the backup is being done (`Stop` mode). For a VM template backup, it shouldn't matter which mode you choose, although the only one that truly makes sense to use in this case is `Stop` mode.
- - `Storage`: it's the same list you have available in the `Backup` view.
+ > [!NOTE]
+ > **The behavior of these backup modes for containers is similar but not equivalent to how they work for VMs**\
+ > Check the Proxmox VE help to see the differences.
- - `Mode`: indicates how you want to execute the backup on your VM. In case of running VMs, you'll have to consider if you want to execute the backup in parallel with the VM still running (`Snapshot` and `Suspend` modes, which also use the QEMU guest agent if available in the VM) or stopping it while the backup is being done (`Stop` mode). For a VM template backup, it shouldn't matter which mode you choose, although the only one that makes sense to use in this case is `Stop` mode.
- > **BEWARE!**
- > The behavior of these backup modes for containers is similar but not equivalent to how they work for VMs. Check the Proxmox VE help to see the differences.
+ - `Compression`\
+ Offers you the possibility of compressing or not the backup of your VM or container. In a scenario with very limited storage like the one used in this guide series, it is mandatory to compress the dumps as much as possible. The default `ZSTD` option is the best option since is not only the fastest algorithm of the four options offered, but is also multi-threaded.
- - `Compression`: offers you the possibility of compressing or not the backup of your VM or container. In a scenario with very limited storage like the one used in this guide series, its mandatory to compress the dumps as much as possible. The default `ZSTD` option is the best option since is not only the fastest algorithm of the three offered, but also is multi-threaded.
+ - `Notification`\
+ This allows you to choose how you want to notify users about this backup. The legacy method uses the `sendmail` service, while the modern method relays on certain Proxmox VE global settings which won't be covered in this guide. The `sendmail` method may not work depending on your network and security configuration.
- - `Send email to`: if you want Proxmox VE to send an email to someone about the execution of this backup, type their email (or emails) here. This may not work depending on your network and security configuration.
+ - `Protected`\
+ When enabled, Proxmox VE will protect this backup from removal actions. This is particularly useful when using automated cleaning processes that free storage by removing old backups, but you want to preserve some specific backups.
- Knowing all that, you may set the configuration for the new backup like below, just changing the `Mode` to `Stop`.
+ - `Notes`\
+ Text field you can use to leave some description of the backup. Notice how you can use certain template variables that will be replaced with their real value when the text is set in the backup.
- 
+ Knowing all that, you may set the configuration for the new backup like below, where the `Mode` has been changed to `Stop`:
-3. Click on the `Backup` button and you'll see a progress window like the one below.
+ 
- 
+ Also notice how I have protected the backup from unwanted removal actions and also left some descriptive text.
- After a while, you should see in the `Output` log informative lines like the ones below.
+3. Click on the `Backup` button and you'll see a task progress window:
+
+ 
+
+ After a short while, the `Output` will print informative log lines like these:
~~~log
- INFO: image drive-scsi0: size=10737418240 zeros=8918171648 saved=1819246592
- INFO: archive file size: 600MB
- INFO: Finished Backup of VM 100 (00:01:15)
- INFO: Backup finished at 2021-11-19 20:36:06
+ INFO: backup is sparse: 8.26 GiB (82%) total zero data
+ INFO: transferred 10.00 GiB in 69 seconds (148.4 MiB/s)
+ INFO: stopping kvm after backup task
+ INFO: archive file size: 834MB
+ INFO: adding notes to backup
+ INFO: marking backup as protected
+ INFO: Finished Backup of VM 100 (00:01:12)
+ INFO: Backup finished at 2025-09-06 20:00:36
INFO: Backup job finished successfully
+ INFO: notified via target `mail-to-root`
TASK OK
~~~
This means that the dump has been done correctly.
-4. Close the status window to return to the `Backup` view. There, you'll see the new backup listed as a vzdump file compressed in `.zst` format.
+4. Close the status window to return to the `Backup` view. There, you'll see the new backup listed as a vzdump file compressed in `vma.zst` format:
+
+ 
+
+ Notice that this backup takes up to 874.99 MiB. This is a very decent compression of the 2.16 GiB taken up by the VM template's `base-100-disk-0` disk image.
- 
+### Restoring the VM template's backup
-Notice that, in this case, the backup takes up to 629.93 MiB, a decent compression of the 2.18 GiB used by the VM template's `base-100-disk-0` disk image.
+Restoring the backup of a VM or VM template is not much more complex than creating them:
-### _Restoring the VM template's backup_
+1. Go back to the `Backup` view of your VM template and select the only backup you have listed there:
-Restoring the backup of a VM or VM template is not much more complex than creating them.
+ 
-1. Go back to the `Backup` view of your VM template and select the only backup you have listed there.
+ When selecting a backup, all the buttons next to `Backup now` become active. The ones that you should pay attention to now are `Restore` and `Show Configuration`.
- 
+2. It may happen that it has been a while since you did the backup, and you don't remember what's inside of it. To help you with this, you can press on `Show Configuration`:
- Notice how all the buttons next to `Backup now` are active. The ones that you should pay attention to are `Restore` and `Show Configuration`.
+ 
-2. It may happen that it has been a while since you did the backup, and you don't remember what's inside of it. To help you with this, you can press on `Show Configuration`.
+ This window shows you the configuration of the VM or VM template saved in the backup, including the notes (rendered as regular text) you may have added to the VM itself. This gives you an idea of what is going to be put back when you restore the backup.
- 
+3. Close the backup's `Configuration` window, then press on `Restore`:
- This window shows you the configuration of the VM or VM template saved in the backup, including the notes you may have added to the VM itself. This gives you an idea of what is going to be put back when you restore the backup.
+ 
-3. Close the backup's `Configuration` window, then press on `Restore`.
+ The fields you see mean the following:
- 
+ - `Source`\
+ The backup file from which you're going to restore the VM or VM template.
- The fields you see mean the following.
+ - `Storage`\
+ The storage where you want to restore the backup. Left by default, the backup will restore the VM hard disks in the locations indicated by the VM's configuration.
- - `Source`: the backup file from which you're going to restore the VM or VM template.
+ - `VM`\
+ The id of the VM you're restoring.
- - `Storage`: the storage where you want to restore the backup. Left by default, the backup will restore the VM hard disks in the locations indicated in its configuration.
+ - `Bandwidth Limit`\
+ This parameter is to restrict the system's storage bandwidth taken up by the restoration process, and limit the impact it will have in your system performance.
- - `VM`: the id of the VM you're restoring.
+ - `Unique`\
+ This is a feature that generates new values to certain attributes of the restored VM, like its network interface MACs.
- - `Bandwidth Limit`: this parameter is to restrict the system's resources taken up by the restoration process, and limit the impact it will have in your system performance.
+ > [!WARNING]
+ > **Careful when using this attribute**\
+ > If you happen to have some configuration that relies on the attributes that get regenerated, like a router assigning static IPs to specific MACs, the new values may not fit and could "break" your setup.
- - `Start after restore`: makes the restored VM start immediately after being restore, although this won't work on a VM template.
+ - `Start after restore`\
+ Makes the restored VM start immediately after being restore, although this won't work on a VM template.
-4. With the default values you're fine, so press on `Restore` and you'll see the following confirmation window.
+ - `Override Settings`\
+ This is where you can give a different name to the restored VM, plus readjust its assigned CPU and RAM capacities. Changing these particular attributes is usually not a problem for Linux-based OSes like Debian, although you must be sure that the readjusted capacities will be enough for the needs of the processes that will run in the restored VM.
- 
+4. In this case the default values are fine, so press on `Restore` and you will see the following confirmation window:
- > **BEWARE!**
+ 
+
+ > [!WARNING]
+ > **Restoring the VM will remove its existing hard disk**\
> The restoration process will **replace** the hard disk you currently have linked to the VM template with the one stored within the backup.
-5. After accepting the confirmation, you'll reach the progress window of the `Restore` process.
+5. After accepting the confirmation, you'll reach the progress window of the `Restore` process:
- 
+ 
- After a while you should see in the output the `TASK OK` message, as a sign of the successful end of the process. Also, in those log lines, you may notice a line like the following.
+ After a while you should see in the output the `TASK OK` message, as a sign of the successful end of the process. Also, among those log entries, you may notice a line like the following:
- ~~~bash
- space reduction due to 4K zero blocks 4.93%
+ ~~~sh
+ space reduction due to 4K zero blocks 2.83%
~~~
- This means that the restoration procedure has found some empty (`zero`) blocks in the backup and, due to that, the space taken up by the restored VM has been reduced by a certain percentage (`4.93%` in the example above).
+ This means that the restoration procedure has found some empty (`zero`) blocks in the backup. Due to that, the space taken up by the restored VM has been reduced by a certain percentage (`2.83%` in the example above).
-### _Considerations about backups_
+### Considerations about backups
-- **Different restore commands for VM and containers**
+- **Different restore commands for VM and containers**\
You can also restore commands through a shell:
- - `qmrestore`: VM restore utility.
- - `pct restore`: container restore utility.
+ - `qmrestore`\
+ VM restore utility.
+
+ - `pct restore`\
+ Container restore utility.
- Check their `man` pages to see how they work.
+ Check their `man` pages to see how they work.
-- **Careful with storages**
- The hard disks attached to VMs or containers could be configured to storages that are no longer available in your system, or that have changed their names. So always check the configuration of the VM or container before you restore it, to see in which storage you can put it back.
+- **Careful with storages**\
+ The hard disks attached to VMs or containers could be configured to storages that are no longer available in your system, or that have changed their names. So always check the configuration of the VM or container before you restore it, to see in which storage you can put it back.
-- **Hardware setup is saved in the backup**
- The hardware configuration of a VM that you see in the Proxmox VE web console is also stored in its backup. So, when you recover the backup of a VM, the hardware configuration will also be recovered, although the PVE web console may ask you about the differences it finds between the current VM configuration and the one stored in the backup.
+- **Hardware setup is saved in the backup**\
+ The hardware configuration of a VM that you see in the Proxmox VE web console is also stored in its backup. So, when you recover the backup of a VM, the hardware configuration will also be recovered, although the PVE web console may ask you about the differences it finds between the current VM configuration and the one stored in the backup.
- > This point is directly related with the previous one.
+ > [!NOTE]
+ > This point is directly related with the previous one.
## Other considerations regarding VM templates
-- **Nature of VM templates**
- It might seem odd that, in the Proxmox VE platform, the VM templates are treated almost as normal VMs. To understand this, you have to think about VM templates just as frozen-in-time VMs. Thanks to this nature, you can clone them in Proxmox VE to create new but similar VMs much faster.
+- **Nature of VM templates**\
+ It might seem odd that, in the Proxmox VE platform, the VM templates are treated almost as normal VMs. To understand this, you have to think about VM templates just as frozen-in-time VMs. Thanks to this nature, you can clone them in Proxmox VE to create new but similar VMs much faster.
+
+- **Proxmox VE does not compress the templates**\
+ Directly related to this "frozen VM" nature, you must bear in mind that a VM template's hard disk will not be compressed or shrunk in any way by Proxmox VE. Whatever storage space was used by the VM disk image, that's exactly the space the template will still take up. The only thing that will change, as you've seen in this guide, is the write permission on the corresponding light volume and its status to inactive.
+
+ In case you were using a `qcow2` image file, how this read-only restriction is enforced will change. Given how the storage setup has been configured already, the `qcow2` format will not be covered in this guide.
-- **Proxmox VE doesn't compress the templates**
- Directly related to this "frozen VM" nature, you must bear in mind that a VM template's hard disk won't be compressed or shrunk in any way by Proxmox VE. Whatever storage space was used by the VM disk image, that's exactly the space the template will still take up. The only thing that will change, as you've seen in this guide, is the write permission on the corresponding light volume and its status to inactive. Now, in case you happened to be using a `qcow2` image file, how this read-only restriction is enforced will change. Given how the storage setup has been configured already, the `qcow2` format won't be covered in this guide series.
+- **Clone VM templates to update them**\
+ Since VM templates are read-only, **you cannot modify them**. If you want to update a template, you have to clone it into a new VM, update that VM and, then, turn the updated VM into a new VM template.
-- **Clone VM templates to update them**
- Since VM templates are read-only, **you can't modify them**. If you want to update a template, you'll have to clone it into a new VM, update that VM and, then, turn the updated VM into a new VM template.
+- **Hardware configuration can be changed**\
+ The part that really is read-only in a template is the storage drive that becomes the disk image, but the hardware setup can still be changed. If you have to do this, you must be careful that the changes do not contradict what the templated VM saved in the image knows about its hardware.
-- **Hardware configuration can be changed**
- The part that really is read-only in a template is the storage drive that becomes the disk image, but the hardware setup can still be changed. If you have to do this, you must be careful that the changes won't contradict what the templated VM saved in the image knows about its hardware. For instance, changing the number of vCPUs or the RAM won't usually give you any trouble, but removing a network device could have concerning consequences.
+ For instance, changing the number of vCPUs or the RAM won't usually give you any trouble. However, removing a network device could have concerning consequences.
## References
-- [CommonMark (Markdown) Spec](https://spec.commonmark.org/)
-- [Proxmox VE documentation. Backup and Restore](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump)
+### [Proxmox VE](https://pve.proxmox.com/)
+
+- [Proxmox VE documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html)
+ - [Backup and Restore](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_vzdump)
+
+- [A Step-by-Step Guide to Restoring Backups in Proxmox VE](https://www.vinchin.com/vm-backup/proxmox-restore-backup.html)
+
+### Markdown
+
+- [CommonMark Spec](https://spec.commonmark.org/)
+ - [Hard line breaks](https://spec.commonmark.org/0.31.2/#hard-line-breaks)
## Navigation
diff --git a/G024 - K3s cluster setup 07 ~ K3s node VM template setup.md b/G024 - K3s cluster setup 07 ~ K3s node VM template setup.md
index 450d468..5ba2c29 100644
--- a/G024 - K3s cluster setup 07 ~ K3s node VM template setup.md
+++ b/G024 - K3s cluster setup 07 ~ K3s node VM template setup.md
@@ -1,133 +1,175 @@
# G024 - K3s cluster setup 07 ~ K3s node VM template setup
-At this point, you have a plain Debian VM template ready. You can use that template to build any server system you want but, to create VMs that work as K3s Kubernetes cluster nodes, further adjustments are necessary. Since those changes are required for any node of your future K3s cluster, you'll want to have a more specialized VM template that comes with all those adjustments already configured.
+- [You need a more specialized VM template for building K3s nodes](#you-need-a-more-specialized-vm-template-for-building-k3s-nodes)
+- [Reasons for a new VM template](#reasons-for-a-new-vm-template)
+- [Creating a new VM based on the Debian VM template](#creating-a-new-vm-based-on-the-debian-vm-template)
+ - [Full cloning of the Debian VM template](#full-cloning-of-the-debian-vm-template)
+- [Setting an static IP for the main network device (`net0`)](#setting-an-static-ip-for-the-main-network-device-net0)
+- [Setting a proper hostname string](#setting-a-proper-hostname-string)
+- [Disabling the swap volume](#disabling-the-swap-volume)
+- [Changing the VG's name](#changing-the-vgs-name)
+- [Setting up the second network card](#setting-up-the-second-network-card)
+- [Setting up sysctl kernel parameters for K3s nodes](#setting-up-sysctl-kernel-parameters-for-k3s-nodes)
+- [Turning the VM into a VM template](#turning-the-vm-into-a-vm-template)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders on the VM](#folders-on-the-vm)
+ - [Files on the VM](#files-on-the-vm)
+- [References](#references)
+ - [Kubernetes](#kubernetes)
+ - [K3s](#k3s)
+ - [Debian and Linux SysOps](#debian-and-linux-sysops)
+ - [Changing the `Hostname`](#changing-the-hostname)
+ - [Disabling the swap](#disabling-the-swap)
+ - [Changing the VG's name of a `root` LV](#changing-the-vgs-name-of-a-root-lv)
+ - [Network interfaces configuration](#network-interfaces-configuration)
+- [Navigation](#navigation)
+
+## You need a more specialized VM template for building K3s nodes
+
+At this point, you have a plain Debian VM template ready. You can use that template to build any virtualized server system you want but, to create VMs that work as K3s Kubernetes cluster nodes, further adjustments are necessary. Since those changes are required for any node of your future K3s cluster, you will want to have a more specialized VM template that comes with all those adjustments already configured.
## Reasons for a new VM template
-Next, I'll list the main reasons or things necessary to do in a new VM, cloned from the Debian VM template you already have, so it can suit better the role of a K3s node.
+Next I'll list the main reasons or things necessary to do in a new VM, cloned from the Debian VM template you already have, so it can suit better the role of a K3s node:
-- **Disabling the swap volume**
-Since its beginnings, Kubernetes hasn't offered support for using swap memory, and demands to have it disabled in order to run. K3s, on the other hand, seems to be able to run with swap enabled but it will warn you about it when you validate its configuration (meaning that it could have problems when a node hits swap).
+- **Disabling the swap volume**\
+ By default, Kubernetes does not allow the use of swap to its workloads mainly because of performance reasons. Also, you cannot configure how swap has to be used in a generic way. You must make an assessment of the needs and particularities of each workload that may require having swap available to safeguard their stability when they run out of memory. Also, mind you that the workloads themselves only ask for memory, not swap. The use of swap is handled by Kubernetes itself, and it is a feature still being improved. Furthermore, [the official Kubernetes documentation advises using an independent physical SSD drive exclusively for swapping on each node, and avoid using the swap that is usually enabled at the root filesystem of any Linux system](https://kubernetes.io/docs/concepts/cluster-administration/swap-memory-management/#use-of-a-dedicated-disk-for-swap).
- On the other hand, at the time of writing this paragraph, support for swap is coming as an _alpha feature_ in the `1.22` version of Kubernetes, and it's expected to graduate to GA level at around the `1.25` version. [Find all the details about this enhancement in the Kubernetes enhancement proposal 2400 github page here](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2400-node-swap). In this guide I'll show you the "traditional" way of dealing with swap in a Kubernetes cluster, that is, by disabling it completely.
+ Given the limitations and scope of the homelab this guide builds, it is better to deal with the swap issue in the "traditional" Kubernetes way: by disabling it completely in the VM.
-- **Renaming the root VG**
-In the LVM storage structure of your VM, the name of the only VG present is based on your Debian 11 VM template's hostname. This is not a problem per se, but could be misleading while doing some system maintenance tasks. Instead, you should change it to a more suitable name fitting for all your future K3s nodes.
+- **Renaming the root VG**\
+ In the LVM storage structure of your Debian VM template, the name of the only VG present is based on your Debian VM template's hostname. This is not a problem per se, but could be misleading while doing some system maintenance tasks. Instead, you should change it to a more suitable name fitting for all your future K3s nodes.
-- **Preparing the second network card**
-The VM template you've setup in the previous guides has two network cards, but only the principal NIC is active. The second one, connected to the isolated `vmbr1` bridge, is currently disabled but you'll need to activate it. This way, the only thing left to adjust on each K3s node will be the IP address.
+- **Preparing the second network card**\
+ The Debian VM template you have setup in the previous guides has two network cards, but only the principal NIC is active. The second one, connected to the isolated `vmbr1` bridge, is currently disabled but you need to activate it. This way, the only thing left to adjust in this regard on each K3s node will be the IP address.
-- **Setting up sysctl parameters**
-With a particular experimental option enabled, K3s requires certain `sysctl` parameters to have concrete values. If they're not set up in such fashion, the K3s service refuses to run.
+- **Setting up sysctl parameters**\
+ By having a particular option enabled, K3s requires certain `sysctl` parameters to have concrete values. If they're not set up in such fashion, the K3s service refuses to run.
-These points affect all the nodes in the K3s cluster. Then, the smart thing to do is to set them right first in a VM which, in turn, will become the template from which you'll clone the final VMs that'll run as nodes of your K3s cluster.
+These aspects affect all the nodes in the K3s cluster. Then, the smart thing to do is to set them right in a VM which, in turn, will become the common template from which you can clone the final VMs that will run as nodes of your K3s cluster.
## Creating a new VM based on the Debian VM template
-### _Full cloning of the Debian VM template_
+This section covers the procedure of creating a new VM cloned from the Debian VM template you already have.
+
+### Full cloning of the Debian VM template
Since in this new VM you're going to modify its filesystem structure, let's fully clone your Debian VM template.
-1. Go to your `debiantpl` template, then unfold the `More` options list. There you'll find the `Clone` option.
+1. Go to your `debiantpl` template, then unfold the `More` options list. There you'll find the `Clone` option:
- 
+ 
-2. Click on `Clone` to see its corresponding window.
+2. Click on `Clone` to see its corresponding window:
- 
+ 
I'll explain the form parameters below.
- - `Target node`: which node in the Proxmox VE cluster you want to place your cloned VM in. In your case you only have one standalone node, `pve`.
+ - `Target node`\
+ Which node in the Proxmox VE cluster you want to place your cloned VM in. In your case you only have one standalone node, `pve`.
+
+ - `VM ID`\
+ The numerical ID that Proxmox VE uses to identify this VM. Notice how the form already assigns the next available number, in this case `101`.
+
+ > [!NOTE]
+ > Proxmox VE does not allow IDs lower than `100`.
- - `VM ID`: the numerical ID that Proxmox VE uses to identify this VM. Notice how the form already assigns the next available number, in this case 101.
- > **BEWARE!**
- > Proxmox VE doesn't allow IDs lower than 100.
+ - `Name`\
+ This string must be a valid FQDN, like `debiantpl.homelab.cloud`.
- - `Name`: this string must be a valid FQDN, like `debiantpl.your.pve.domain`.
- > **BEWARE!**
- > The official Proxmox VE documentation says that this name is `a free form text string you can use to describe the VM`, which contradicts what the web console actually validates as correct.
+ > [!IMPORTANT]
+ > **The official Proxmox VE documentation is misleading about this field**\
+ > The official Proxmox VE documentation says that this name is `a free form text string you can use to describe the VM`, which contradicts what the web console actually validates as correct.
- - `Resource Pool`: here you can indicate to which pool you want to make this VM a member of.
+ - `Resource Pool`\
+ For indicating to which pool this VM has to be a member of.
- - `Mode`: this is a list with two options available.
+ - `Mode`\
+ This option offers two ways of cloning the new VM:
- - `Linked Clone`: this creates a clone that still refers to the original VM, therefore is _linked_ to it. This option can only be used with read-only VMs, or templates, since the linked clone uses the original VM's volume to run, saving in its own image only the differences. Also, linked clones must be stored in the same `Target Storage` where the original VM's storage is.
- > **BEWARE!**
- > Templates cannot be removed as long as they have **linked clones** attached to them.
+ - `Linked Clone`\
+ This creates a clone that still refers to the original VM, therefore is _linked_ to it. This option can only be used with read-only VMs, or templates, since the linked clone uses the original VM's volume to run, saving in its own image only the differences. Also, linked clones must be stored in the same `Target Storage` where the original VM's storage is.
- - `Full Clone`: is a full copy of the original VM, so its not linked to it. Also, this type allows to be put in a different `Target Storage` if required.
+ > [!WARNING]
+ > **VM templates with attached linked clones are not removable**\
+ > Templates cannot be removed as long as they have linked clones attached to them.
- - `Target Storage`: here you can choose where you want to put the new VM, although you can only choose in case of making a **full clone**. In this list there are storage types that won't appear, like directories.
+ - `Full Clone`\
+ Is a full copy of the original VM, so it is not linked to it at all. Also, this type allows to be put in a different `Target Storage` if required.
- - `Format`: depending on the mode and target storage configured, you'll see that this value changes to adapt to those other two parameters. It just indicates in which format is going the new VM's volumes to be stored in the Proxmox VE system.
+ - `Target Storage`\
+ Here you can choose where you want to put the new VM, although you can only choose when making a **full clone**. There are storage types that do not appear in this list, like directories.
-3. Fill the `Clone` form to create a new **full clone** VM as follows.
+ - `Format`\
+ Depending on the mode and target storage configured, this value changes to adapt to those other two parameters. It just indicates in which format is going the new VM's volumes to be stored in the Proxmox VE system.
- 
+3. Fill the `Clone` form to create a new **Full Clone** VM as follows:
- Notice the name I've given (`k3snodetpl`), and that I've chosen explicitly the `ssd_disks` as target storage. I could have left the default `Same as source` option since the template volume is also placed in that storage, but I preferred to be explicit here for the sake of clarity.
+ 
-4. Click on `Clone` when ready and the window will disappear. You'll have to pay attention to the `Tasks` console at the bottom to see how the cloning process goes.
+ Notice that I've given the name of `k3snodetpl` to this full clone, and that I've explicitly chosen the `ssd_disks` as target storage. I could have left the default `Same as source` option since the template volume is also placed in that storage, but I wanted to be explicit here for the sake of clarity.
- 
+4. Click on `Clone` when ready and the form will disappear. Pay attention to the `Tasks` console at the bottom to see how the cloning process goes:
+
+ 
See how the new 101 VM appears with a lock icon in the tree at the left. Also, in its `Summary` view, you can see how Proxmox VE warns you that the VM is still being created with the `clone` operation, and even in the `Notes` you can see a reference to a `qmclone temporary file`.
-5. When you see in the `Tasks` log that the cloning task appears as `OK`, refresh the `Summary` view to see the VM unlocked and fully created.
+5. When you see in the `Tasks` log that the cloning task appears as `OK`, the `Summary` view will show the VM unlocked and fully created:
- 
+ 
- See how all the details in the `Summary` view are the same as what the original template had (like the `Notes` for instance).
+ See how all the details in the `Summary` view are the same as what the original Debian VM template had (like the `Notes` for instance).
-## Set an static IP for the main network device (`net0`)
+## Setting an static IP for the main network device (`net0`)
-Don't forget to set up a static IP for the main network device (the `net0` one) of this VM in your router or gateway, ideally following some criteria. You can see the MACs, in the `Hardware` view, as the value of the `virtio` parameter on each network device attached to the VM.
+Do not forget to set up a static IP for the main network device (the `net0` one) of this VM in your router or gateway, ideally following some criteria. You can see the MACs, in the VM's `Hardware` view, as the value of the `virtio` parameter on each network device attached to the VM.
-
+
## Setting a proper hostname string
-Since this new VM is a clone of the Debian 11 template you prepared before, its hostname is the same one set in the template (`debiantpl`). It's better, for coherence and clarity, to set up a more proper hostname for this particular VM which, in this case, will be called `k3snodetpl`. Then, to change the hostname string on the VM, do the following.
+Since this new VM is a clone of the Debian VM template you prepared before, its hostname is the same one set in the template (`debiantpl`). It is better, for coherence and clarity, to set up a more proper hostname for this particular VM which, in this case, will be called `k3snodetpl`. Then, to change the hostname string on the VM, do the following:
-1. Start the VM, then login as `mgrsys` (with the same credentials used in the Debian VM template). To change the hostname value (`debiantpl` in this case) in the `/etc/hostname` file, better use the `hostnamectl` command.
+1. Start the VM, then login as `mgrsys` (with the same credentials used in the Debian VM template). To change the hostname value (`debiantpl` in this case) in the `/etc/hostname` file, better use the `hostnamectl` command:
- ~~~bash
+ ~~~sh
$ sudo hostnamectl set-hostname k3snodetpl
~~~
If you edit the `/etc/hostname` file directly instead, you'll have to reboot the VM to make it load the new hostname.
-2. Edit the `/etc/hosts` file, where you must replace the old hostname (again, `debiantpl`) with the new one. The hostname should only appear in the `127.0.1.1` line.
+2. Edit the `/etc/hosts` file, where you must replace the old hostname (again, `debiantpl`) with the new one. The hostname should only appear in the `127.0.1.1` line:
~~~properties
- 127.0.1.1 k3snodetpl.deimos.cloud k3snodetpl
+ 127.0.1.1 k3snodetpl.homelab.cloud k3snodetpl
~~~
-To see the change applied, exit your current session and log back. You should see that the new hostname shows up in your shell prompt.
+To see all these changes applied, exit your current session and log back in. You should see that now the new hostname shows up in your shell prompt.
## Disabling the swap volume
-Follow the next steps to remove the swap completely from your VM.
+Follow the next steps to remove the swap completely from your VM:
-1. First disable the currently active swap memory.
+1. First disable the currently active swap memory:
- ~~~bash
+ ~~~sh
$ sudo swapoff -a
~~~
The `swapoff` command disables the swap only temporarily, the system will reactivate it after a reboot. To verify that the swap is actually disabled, check the `/proc/swaps` file.
- ~~~bash
+ ~~~sh
$ cat /proc/swaps
- Filename Type Size Used Priority
+ Filename Type Size Used Priority
~~~
If there are no filenames listed in the output, that means the swap is disabled (although just till next reboot).
-2. Make a backup of the `/etc/fstab` file.
+2. Make a backup of the `/etc/fstab` file:
- ~~~bash
+ ~~~sh
$ sudo cp /etc/fstab /etc/fstab.orig
~~~
@@ -139,77 +181,84 @@ Follow the next steps to remove the swap completely from your VM.
...
~~~
-3. Edit the `/etc/initramfs-tools/conf.d/resume` file, commenting out with a '#' character the line related to the `swap_1` volume.
+3. Edit the `/etc/initramfs-tools/conf.d/resume` file, commenting out with a '#' character the line related to the `swap_1` volume:
~~~properties
#RESUME=/dev/mapper/debiantpl--vg-swap_1
~~~
- > **BEWARE!**
- Notice that I haven't told you to make a backup of this file. This is because the `update-initramfs` command would also read the backup file regardless of having a different name, and that would lead to an error.
+ > [!IMPORTANT]
+ > **Notice that I haven't told you to make a backup of this `resume` file**\
+ > This is because the `update-initramfs` command would also read the backup file regardless of it having a different name, and that would lead to an error.
+ >
+ > Of course, you could consider making the backup in some other folder, but that forces you to employ a particular (and probably forgettable) backup procedure only for this specific file. To sum it up, just be extra careful when modifying this particular file.
-4. Check with `lvs` the name of the swap LVM volume.
+4. Check with `lvs` the name of the swap LVM volume:
- ~~~bash
+ ~~~sh
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root debiantpl-vg -wi-ao---- 8.54g
- swap_1 debiantpl-vg -wi-a----- 976.00m
+ root debiantpl-vg -wi-ao---- <8.69g
+ swap_1 debiantpl-vg -wi-a----- 544.00m
~~~
- In the output above it's the `swap_1` light volume within the `debiantpl-vg` volume group. Now you can use `lvremove` on it to free that space.
+ In the output above, it is the `swap_1` light volume within the `debiantpl-vg` volume group. Use `lvremove` on it to free that space:
- ~~~bash
+ ~~~sh
$ sudo lvremove debiantpl-vg/swap_1
Do you really want to remove active logical volume debiantpl-vg/swap_1? [y/n]: y
- Logical volume "swap_1" successfully removed
+ Logical volume "swap_1" successfully removed.
~~~
- Then, check again with `lvs` that the swap partition is gone.
+ Then, check again with `lvs` that the swap partition is gone:
- ~~~bash
+ ~~~sh
$ sudo lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root debiantpl-vg -wi-ao---- 8.54g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ root debiantpl-vg -wi-ao---- <8.69g
~~~
- Also, see with `vgs` that the `debiantpl-vg` VG has now free space (`VFree` column).
+ Also, see with `vgs` that the `debiantpl-vg` VG now has some free space (`VFree` column):
- ~~~bash
+ ~~~sh
$ sudo vgs
- VG #PV #LV #SN Attr VSize VFree
- debiantpl-vg 1 1 0 wz--n- <9.52g 1000.00m
+ VG #PV #LV #SN Attr VSize VFree
+ debiantpl-vg 1 1 0 wz--n- 9.25g 580.00m
~~~
-5. To expand the `root` LV into the newly freed space, execute `lvextend` like below.
+5. To expand the `root` LV into the newly freed space, execute `lvextend` like this:
- ~~~bash
+ ~~~sh
$ sudo lvextend -r -l +100%FREE debiantpl-vg/root
~~~
- The options mean the following.
- - `-r` calls the `resize2fs` command right after resizing the LV, to also extend the filesystem in the LV over the added space.
- - `-l +100%FREE` indicates that the LV has to be extended over the 100% of free space available in the VG.
+ The `lvextend` options mean the following:
- Check with `lvs` the new size of the `root` LV.
+ - `-r`\
+ Calls the `resize2fs` command right after resizing the LV, to also extend the filesystem in the LV over the added space.
- ~~~bash
+ - `-l +100%FREE`\
+ Indicates that the LV has to be extended over the 100% of free space available in the VG.
+
+ Check with `lvs` the new size of the `root` LV:
+
+ ~~~sh
$ sudo lvs
- LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
- root debiantpl-vg -wi-ao---- <9.52g
+ LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
+ root debiantpl-vg -wi-ao---- 9.25g
~~~
- Also verify that there's no free space left in the `debiantpl-vg` VG.
+ Also verify that there's no free space left in the `debiantpl-vg` VG:
- ~~~bash
+ ~~~sh
$ sudo vgs
- VG #PV #LV #SN Attr VSize VFree
- debiantpl-vg 1 1 0 wz--n- <9.52g 0
+ VG #PV #LV #SN Attr VSize VFree
+ debiantpl-vg 1 1 0 wz--n- 9.25g 0
~~~
-6. The final touch is to modify the `swappiness` sysctl parameter, which you already left set with a low value in the `/etc/sysctl.d/85_memory_optimizations.conf` file. As usual, first make a backup of the file.
+6. The final touch is to modify the `swappiness` sysctl parameter, which you already left set with a low value in the `/etc/sysctl.d/85_memory_optimizations.conf` file. As usual, first make a backup of the file:
- ~~~bash
+ ~~~sh
$ sudo cp /etc/sysctl.d/85_memory_optimizations.conf /etc/sysctl.d/85_memory_optimizations.conf.bkp
~~~
@@ -223,188 +272,205 @@ Follow the next steps to remove the swap completely from your VM.
7. Save the changes, refresh the sysctl configuration and reboot.
- ~~~bash
+ ~~~sh
$ sudo sysctl -p /etc/sysctl.d/85_memory_optimizations.conf
$ sudo reboot
~~~
## Changing the VG's name
-The VG you have in your VM's LVM structure is the same one defined in your Debian VM template, meaning that it was made correlative to the hostname of the original system. Although this is not an issue per se, you'll probably prefer to give to the VG's name a more sensible string. Since this VM will become a template for all the VMs you'll use as K3s nodes, lets give the VG the name `k3snode-vg`. It's generic but still more meaningful for all the K3s nodes you'll create later than the `debiantpl-vg` string.
+The VG you have in your VM's LVM structure is the same one defined in your Debian VM template, meaning that it was made correlative to the hostname of the original system. This is not an issue per se, but it is better to give the VG's name a string that correlates with the VM. Since this VM will become a template for all the VMs you'll use as K3s nodes, lets give the VG the name `k3snode-vg`. It's generic but still more meaningful for all the K3s nodes you'll create later than the `debiantpl-vg` string.
-> **BEWARE!**
+> [!WARNING]
+> **This procedure affects your VM's filesystem**\
> Although this is not a difficult procedure, follow all the next steps carefully, or you may end messing up your VM's filesystem!
1. Using the `vgrename` command, rename the VG with the suggested name `k3snode-vg`.
- ~~~bash
+ ~~~sh
$ sudo vgrename debiantpl-vg k3snode-vg
Volume group "debiantpl-vg" successfully renamed to "k3snode-vg"
~~~
Verify with `vgs` that the renaming has been done.
- ~~~bash
+ ~~~sh
$ sudo vgs
- VG #PV #LV #SN Attr VSize VFree
- k3snode-vg 1 1 0 wz--n- <9.52g 1000.00m
+ VG #PV #LV #SN Attr VSize VFree
+ k3snode-vg 1 1 0 wz--n- 9.25g 0
~~~
-2. Next you must edit the `/etc/fstab` file, replacing only the `debiantpl` string with `k3snode` in the line related to the `root` volume and, if you like, also in the commented `swap_1` line to keep it coherent.
+2. You must edit the `/etc/fstab` file, replacing only the `debiantpl` string with `k3snode` in the line related to the `root` volume and, if you like, also in the commented `swap_1` line to keep it coherent:
- ~~~bash
+ ~~~sh
...
- /dev/mapper/k3snode--vg-root / ext4 errors=remount-ro 0 1
+ /dev/mapper/k3snode--vg-root / ext4 errors=remount-ro 0 1
...
- #/dev/mapper/k3snode--vg-swap_1 none swap sw 0 0
+ #/dev/mapper/k3snode--vg-swap_1 none swap sw 0 0
...
~~~
- > **BEWARE!**
- > Careful of **not** reducing the double dash ('`--`') to only one, just replace the `debiantpl` part with the new `k3snode` string.
+ > [!WARNING]
+ > **Careful of NOT reducing the double dash ('`--`') to just one (`-`)**\
+ > Only replace the `debiantpl` part with the new `k3snode` string.
-3. Next, you must find and change all the `debiantpl` strings present in the `/boot/grub/grub.cfg` file. Although first don't forget to make a backup.
+3. Next, you must find and change all the `debiantpl` strings present in the `/boot/grub/grub.cfg` file. Although first don't forget to make a backup:
- ~~~bash
+ ~~~sh
$ sudo cp /boot/grub/grub.cfg /boot/grub/grub.cfg.orig
~~~
-4. Edit the `/boot/grub/grub.cfg` file (mind you, is **read-only** even for the `root` user) to change all the `debiantpl` name with the new `k3snode` one in lines that contain the string `root=/dev/mapper/debiantpl--vg-root`. To reduce the chance of errors when editing this critical system file, you can do the following.
+4. Edit the `/boot/grub/grub.cfg` file (mind you, is **read-only** even for the `root` user) to change all the `debiantpl` name with the new `k3snode` one in lines that contain the string `root=/dev/mapper/debiantpl--vg-root`. To reduce the chance of errors when editing this critical system file, you can do the following:
- - Check first that `debiantpl--vg-root` **only** brings up the lines with `root=/dev/mapper/debiantpl--vg-root`.
+ - Check first that `debiantpl--vg-root` ONLY brings up the lines with `root=/dev/mapper/debiantpl--vg-root`:
- ~~~bash
+ ~~~sh
$ sudo cat /boot/grub/grub.cfg | grep debiantpl--vg-root
~~~
Run it in another shell or dump the output in a temporal file, but keep the lines for later reference.
- - Apply the required modifications with `sed`.
+ - Apply the required modifications with `sed`:
- ~~~bash
+ ~~~sh
$ sudo sed -i 's/debiantpl--vg-root/k3snode--vg-root/g' /boot/grub/grub.cfg
~~~
If `sed` executes alright, it won't return any output.
- - Verify if all the lines that had the `debiantpl--vg-root` string have it now replaced with the `k3snode--vg-root` one.
+ - Verify if all the lines that had the `debiantpl--vg-root` string have it now replaced with the `k3snode--vg-root` one:
- ~~~bash
+ ~~~sh
$ sudo cat /boot/grub/grub.cfg | grep k3snode--vg-root
~~~
Compare this command's output with the one you got first. You should see the **same** lines as before, but with the string changed.
-5. Now you must update the initramfs with the `update-initramfs` command.
+5. Update the initramfs with the `update-initramfs` command:
- ~~~bash
+ ~~~sh
$ sudo update-initramfs -u -k all
~~~
-6. Reboot the system to load the changes.
+6. Reboot the system to load the changes:
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
-7. Next, you have to execute the `dpkg-reconfigure` command to regenerate the grub in your VM. To get the correct image to reconfigure, just autocomplete the command after typing `linux-image` and then type the one that corresponds with the kernel **currently running** in your VM.
+7. Execute the `dpkg-reconfigure` command to regenerate the grub in your VM. To get the correct image to reconfigure, just autocomplete the command after typing `linux-image` and then type the one that corresponds with the kernel **currently running** in your VM:
- ~~~bash
- $ sudo dpkg-reconfigure linux-image-5.10.0-9-amd64
+ ~~~sh
+ $ sudo dpkg-reconfigure linux-image-6.12.41+deb13-amd64
~~~
-8. Again, reboot the system to load the changes.
+ > [!NOTE]
+ > **The current Kernel is informed in the shell login**\
+ > Right after you log in the VM, the very first line that Debian prints already informs you of its current Kernel among other details. For instance:
+ >
+ > ~~~sh
+ > Linux k3snodetpl 6.12.41+deb13-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.12.41-1 (2025-08-12) x86_64
+ > ~~~
+ >
+ > The Kernel version string you must pay attention to comes after the hostname string (`k3snodetpl`).
+
+8. Reboot the system again to apply the changes:
- ~~~bash
+ ~~~sh
$ sudo reboot
~~~
## Setting up the second network card
-The VM has a second network card that is yet to be configured and enabled, and which is already set to communicate through the isolated `vmbr1` bridge of your Proxmox VE's virtual network. In order to set up this NIC properly, you have to do the following.
+The VM has a second network card that is yet to be configured and enabled, and which is already set to communicate through the isolated `vmbr1` bridge of your Proxmox VE's virtual network. To set up this NIC properly, do the following:
-1. First, you need the name of the second network interface to use it later. Execute the following `ip` command.
+1. Obtain the name of the second network interface with the following `ip` command:
- ~~~bash
+ ~~~sh
$ ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
2: ens18: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether 96:2c:74:8a:e2:04 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:2c:d0:e9 brd ff:ff:ff:ff:ff:ff
altname enp0s18
- inet 192.168.1.11/24 brd 192.168.1.255 scope global dynamic ens18
- valid_lft 85566sec preferred_lft 85566sec
+ altname enxbc24112cd0e9
+ inet 10.4.0.2/8 brd 10.255.255.255 scope global dynamic noprefixroute ens18
+ valid_lft 86162sec preferred_lft 75362sec
3: ens19: mtu 1500 qdisc noop state DOWN group default qlen 1000
- link/ether 3e:f1:88:9c:d1:86 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:eb:73:5c brd ff:ff:ff:ff:ff:ff
altname enp0s19
+ altname enxbc2411eb735c
~~~
- In the output above, the second network device is the one named `ens19`, the one with `state DOWN` and no IP assigned (no `inet` line).
+ In the output above, the second network device is the one named `ens19`, and has the `state DOWN` and no IP assigned (no `inet` line).
-2. Next, you'll have to configure the `ens19` NIC in the `/etc/network/interfaces` file. As usual, first make a backup of the file.
+2. Configure the `ens19` NIC in the `/etc/network/interfaces` file. As usual, first make a backup of the file:
- ~~~bash
+ ~~~sh
$ sudo cp /etc/network/interfaces /etc/network/interfaces.orig
~~~
Then, **append** the following configuration to the `interfaces` file.
- ~~~bash
+ ~~~sh
# The secondary network interface
allow-hotplug ens19
iface ens19 inet static
- address 10.0.0.220
- netmask 255.255.255.0
+ address 172.31.254.1
+ netmask 255.240.0.0
~~~
- Notice that I've set an IP address within the range I decided to use for the secondary NICs of the K3s nodes, but with a high fourth number (`220`) to have plenty of room between this address and the ones given to the VMs you'll create later.
+ Notice that I have set an IP address within the valid private network range I decided to use (`172.16.0.0` to `172.31.255.255`, or `172.16.0.0/12` with netmask `255.240.0.0`) for the secondary NICs of the K3s nodes.
- > **BEWARE!**
- > Remember to put the correct name of the network interface as it appears in your VM when you copy the configuration above!
+ > [!IMPORTANT]
+ > **Do not just blindly copy the configuration above!**\
+ > Ensure you are putting the correct name of the network interface as it appears in your VM when you copy the configuration above!
-3. You can enable the interface with the following `ifup` command.
+3. You can enable the interface with the following `ifup` command:
- ~~~bash
+ ~~~sh
$ sudo ifup ens19
~~~
- The command won't return any output.
+ The command will not return any output.
-4. Use the `ip` command to check out the new network setup.
+4. Use the `ip` command to check out your new network setup:
- ~~~bash
+ ~~~sh
$ ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
2: ens18: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether 96:2c:74:8a:e2:04 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:2c:d0:e9 brd ff:ff:ff:ff:ff:ff
altname enp0s18
- inet 192.168.1.11/24 brd 192.168.1.255 scope global dynamic ens18
- valid_lft 85409sec preferred_lft 85409sec
+ altname enxbc24112cd0e9
+ inet 10.4.0.2/8 brd 10.255.255.255 scope global dynamic noprefixroute ens18
+ valid_lft 84905sec preferred_lft 74105sec
3: ens19: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether 3e:f1:88:9c:d1:86 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:eb:73:5c brd ff:ff:ff:ff:ff:ff
altname enp0s19
- inet 10.0.0.220/24 brd 10.0.0.255 scope global ens19
- valid_lft forever preferred_lft forever
+ altname enxbc2411eb735c
+ inet 172.31.254.1/12 brd 172.31.255.255 scope global ens19
+ valid_lft forever preferred_lft forever
~~~
- Your `ens19` interface is now active with a static IP address. You can also see that, thanks to the QEMU agent, the second IP appears immediately after applying the change in the `Status` block of the VM's `Summary` view, in the Proxmox VE web console.
+ Your `ens19` interface is now active with a static IP address. You can also see that, thanks to the QEMU agent, the second IP appears immediately after applying the change in the `Status` block of the VM's `Summary` view, in the Proxmox VE web console:
- 
+ 
-Thanks to this configuration, now you have an network interface enabled and connected to an isolated bridge. This will help to improve somewhat the hardening of the internal network of the K3s cluster you'll create in upcoming guides.
+Thanks to this configuration, now you have an network interface enabled and connected to an isolated bridge. This will help to improve somewhat the hardening of the internal network of the K3s cluster you will build in the upcoming chapters.
## Setting up sysctl kernel parameters for K3s nodes
-In the installation of the K3s cluster, which you'll do later in the [**G025** guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md), I'll tell you to use the `protect-kernel-defaults` option. With it enabled, you must set certain sysctl parameters to concrete values or the kubelet process executed by the K3s service won't run.
+In the installation of the K3s cluster, which you'll do later in the next [chapter **G025**](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md), I'll tell you to use the `protect-kernel-defaults` option. With it enabled, [you must set certain sysctl parameters to concrete values](https://docs.k3s.io/security/hardening-guide?_highlight=protect&_highlight=kernel&_highlight=defaults#ensure-protect-kernel-defaults-is-set) or the kubelet process executed by the K3s service won't run.
1. Create a new empty file in the path `/etc/sysctl.d/90_k3s_kubelet_demands.conf`.
- ~~~bash
+ ~~~sh
$ sudo touch /etc/sysctl.d/90_k3s_kubelet_demands.conf
~~~
@@ -432,45 +498,36 @@ In the installation of the K3s cluster, which you'll do later in the [**G025** g
kernel.panic_on_oops = 1
~~~
- > **BEWARE!**
- > If you skipped the [_memory optimizations_ step at the G021 guide](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#memory-optimizations), you need to adjust the `vm.overcommit_memory` flag here! Otherwise, the K3s service you'll set up later in the [chapter G025](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-installation-of-the-server-node-k3sserver01) won't be able to start.
+ > [!WARNING]
+ > **If you skipped the [_memory optimizations_ step of the chapter **G021**](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#memory-optimizations), you need to adjust the `vm.overcommit_memory` flag here!**\
+ > Otherwise, the K3s service you will set up later in the next [chapter **G025**](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-installation-of-the-server-node-k3sserver01) will not be able to start.
3. Save the `90_k3s_kubelet_demands.conf` file and apply the changes, then reboot the VM.
- ~~~bash
+ ~~~sh
$ sudo sysctl -p /etc/sysctl.d/90_k3s_kubelet_demands.conf
$ sudo reboot
~~~
## Turning the VM into a VM template
-Now that the VM has been adjusted, you can turn it into a VM template. You have already done this in the previous [**G023** guide](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turning-the-debian-vm-into-a-vm-template), so I'll just remind you that the `Convert to template` action is available as an option in the `More` list of any VM.
+With the VM tuned properly, you can turn it into a VM template. Since this procedure is already covered by the previous [chapter **G023**](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turning-the-debian-vm-into-a-vm-template), here I'll just remind you that the `Convert to template` action is available as an option in the `More` list of any VM:
-> **BEWARE!**
+> [!IMPORTANT]
+> **You cannot turn a VM currently in use into a template**\
> Before executing the conversion, **first shut down the VM you're converting**.
-
-
-Moreover, update the `Notes` text of this VM with any new or extra detail you might think relevant, and don't forget to make a full backup of the template. This is something you also did in the [**G023** guide](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turning-the-debian-vm-into-a-vm-template), so here I'll just remind you where the option is.
+
-
+Moreover, update the `Notes` text of this VM with any new or extra detail you might think relevant, and don't forget to make a full backup of the template. This is something you also did in the [chapter **G023**](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md#turning-the-debian-vm-into-a-vm-template), so here I'll just remind you where the option is:
-Remember that restoring backups can free some space (due to the restoration process detecting and ignoring the empty blocks within the image), so consider restoring the VM template immediately after doing the backup to recover some storage space. This is an action you also saw how to do back in the [**G023** guide](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md).
+
-## Protecting VMs and VM templates in Proxmox VE
-
-There's an interesting option available for VMs in Proxmox VE. Go to the `Options` page of any of your VMs or VM templates, and look for the `Protection` option.
-
-
-
-With this flag enabled, Proxmox VE disables the `remove` and `remove disk` operations on the VM. Very advisable to be enabled, in particular for your VM templates.
-
-> **BEWARE!**
-> When this option is enabled, a VM or VM template cannot be restored from a backup because Proxmox VE protects the VM image from being replaced.
+Remember that restoring backups can free some space (due to the restoration process detecting and ignoring the empty blocks within the image), so consider restoring the VM template immediately after doing the backup to recover some storage space. This is an action you also saw how to do back in the [chapter **G023**](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md).
## Relevant system paths
-### _Folders on the VM_
+### Folders on the VM
- `/boot/grub`
- `/etc`
@@ -479,7 +536,7 @@ With this flag enabled, Proxmox VE disables the `remove` and `remove disk` opera
- `/etc/sysctl.d`
- `/proc`
-### _Files on the VM_
+### Files on the VM
- `/boot/grub/grub.cfg`
- `/boot/grub/grub.cfg.orig`
@@ -497,40 +554,45 @@ With this flag enabled, Proxmox VE disables the `remove` and `remove disk` opera
## References
-### _Debian and Linux SysOps_
+### [Kubernetes](https://kubernetes.io/)
+
+- [Swap memory management](https://kubernetes.io/docs/concepts/cluster-administration/swap-memory-management/)
+
+- [Demystifying Swap in Kubernetes: A Handbook for DevOps Engineers](https://medium.com/@robertbotez/demystifying-swap-in-kubernetes-a-handbook-for-devops-engineers-e5ef934593e3)
-#### **Changing the `Hostname`**
+### [K3s](https://docs.k3s.io/)
+
+- [CIS Hardening Guide](https://docs.k3s.io/security/hardening-guide)
+ - [Host-level Requirements](https://docs.k3s.io/security/hardening-guide#host-level-requirements)
+ - [Ensure `protect-kernel-defaults` is set](https://docs.k3s.io/security/hardening-guide#ensure-protect-kernel-defaults-is-set)
+
+### Debian and Linux SysOps
+
+#### Changing the `Hostname`
- [How to Change Hostname in Debian](https://linuxhandbook.com/debian-change-hostname/)
-#### **Disabling the swap**
+#### Disabling the swap
- [Swap Off - why is it necessary?](https://discuss.kubernetes.io/t/swap-off-why-is-it-necessary/6879/4)
- [How to safely turn off swap permanently and reclaim the space? (on Debian Jessie)](https://unix.stackexchange.com/questions/224156/how-to-safely-turn-off-swap-permanently-and-reclaim-the-space-on-debian-jessie)
- [Permanently Disable Swap for Kubernetes Cluster](https://brandonwillmott.com/2020/10/15/permanently-disable-swap-for-kubernetes-cluster/)
-- [KEP-2400: Node system swap support (enhancement on Kubernetes)](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2400-node-swap)
-- [lvextend command examples in Linux](https://www.thegeekdiary.com/lvextend-command-examples-in-linux/)
-#### **Changing the VG's name of a `root` LV**
+#### Changing the VG's name of a `root` LV
- [Rename LVM Volume Group Holding Root File System Volume](https://oraganism.wordpress.com/2013/03/09/rename-lvm-vg-for-root-fs-lv/)
- [How to fix “volume group old-vg-name not found” at boot after renaming it?](https://unix.stackexchange.com/questions/579720/how-to-fix-volume-group-old-vg-name-not-found-at-boot-after-renaming-it)
-- [Rename a Volume Group on Debian](https://blog.raveland.org/post/rename_vg/)
+- [Rename a Volume Group (LVM / Debian)](https://blog.raveland.org/post/rename_vg/)
- [Unable to change Volume Group name](https://www.linuxquestions.org/questions/linux-newbie-8/unable-to-change-volume-group-name-4175676775/)
-#### **Network interfaces configuration**
+#### Network interfaces configuration
-- [Debian wiki. Network Configuration](https://wiki.debian.org/NetworkConfiguration)
-- [How To Configure VLAN Interface on Debian 10 (Buster)](https://techviewleo.com/how-to-configure-vlan-interface-on-debian/)
+- [Debian. Wiki. Network Configuration](https://wiki.debian.org/NetworkConfiguration)
- [How to setup a Static IP address on Debian Linux](https://linuxconfig.org/how-to-setup-a-static-ip-address-on-debian-linux)
- [Howto: Ubuntu Linux convert DHCP network configuration to static IP configuration](https://www.cyberciti.biz/tips/howto-ubuntu-linux-convert-dhcp-network-configuration-to-static-ip-configuration.html)
- [Debian Linux Configure Network Interface Cards – IP address and Netmasks](https://www.cyberciti.biz/faq/howto-configuring-network-interface-cards-on-debian/)
- [Net.bridge.bridge-nf-call and sysctl.conf](https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf)
-### _K3s configuration_
-
-- [K3s CIS Hardening Guide. Ensure `protect-kernel-defaults` is set](https://rancher.com/docs/k3s/latest/en/security/hardening_guide/#ensure-protect-kernel-defaults-is-set)
-
## Navigation
[<< Previous (**G023. K3s cluster setup 06**)](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G025. K3s cluster setup 08**) >>](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md)
diff --git a/G025 - K3s cluster setup 08 ~ K3s Kubernetes cluster setup.md b/G025 - K3s cluster setup 08 ~ K3s Kubernetes cluster setup.md
index 9d9cae2..8f68f30 100644
--- a/G025 - K3s cluster setup 08 ~ K3s Kubernetes cluster setup.md
+++ b/G025 - K3s cluster setup 08 ~ K3s Kubernetes cluster setup.md
@@ -1,100 +1,218 @@
# G025 - K3s cluster setup 08 ~ K3s Kubernetes cluster setup
-Now that you have a more specialized VM template suited for creating K3s nodes, it's time to build your K3s cluster. First you'll create and set up the VMs you need, then you'll install the K3s software although with a concrete configuration on each of them.
+- [Build your virtualized K3s cluster](#build-your-virtualized-k3s-cluster)
+- [Criteria for the VMs' IPs and hostnames](#criteria-for-the-vms-ips-and-hostnames)
+ - [Criteria for IPs](#criteria-for-ips)
+ - [IP ranges for VM templates](#ip-ranges-for-vm-templates)
+ - [IP ranges for K3s server nodes](#ip-ranges-for-k3s-server-nodes)
+ - [IP ranges for K3s agent nodes](#ip-ranges-for-k3s-agent-nodes)
+ - [Naming convention for hostnames](#naming-convention-for-hostnames)
+- [Creation of VMs based on the K3s node VM template](#creation-of-vms-based-on-the-k3s-node-vm-template)
+ - [VM as K3s server node](#vm-as-k3s-server-node)
+ - [VM as K3s agent node](#vm-as-k3s-agent-node)
+ - [Assign a static IP to each VM](#assign-a-static-ip-to-each-vm)
+- [Preparing the VMs for K3s](#preparing-the-vms-for-k3s)
+ - [Customizing the hostname](#customizing-the-hostname)
+ - [Changing the second network card's IP address](#changing-the-second-network-cards-ip-address)
+ - [Changing the TOTP code](#changing-the-totp-code)
+ - [Changing the SSH key-pair](#changing-the-ssh-key-pair)
+ - [Changing the administrative user's password](#changing-the-administrative-users-password)
+ - [Creating the pending K3s agent node VM](#creating-the-pending-k3s-agent-node-vm)
+ - [Changing hostname and assigning IPs](#changing-hostname-and-assigning-ips)
+ - [Exporting the TOTP codes, the SSH key-pairs and reusing passwords](#exporting-the-totp-codes-the-ssh-key-pairs-and-reusing-passwords)
+- [Firewall setup for the K3s cluster](#firewall-setup-for-the-k3s-cluster)
+ - [Port mapping](#port-mapping)
+ - [Firewall configuration for the K3s node VMs](#firewall-configuration-for-the-k3s-node-vms)
+ - [Allowing access to the host's NUT port for all K3s nodes VMs](#allowing-access-to-the-hosts-nut-port-for-all-k3s-nodes-vms)
+ - [K3s node VMs' firewall setup](#k3s-node-vms-firewall-setup)
+ - [Port `6443` left closed on the server node's `net0` NIC](#port-6443-left-closed-on-the-server-nodes-net0-nic)
+- [Considerations before installing the K3s software](#considerations-before-installing-the-k3s-software)
+ - [The K3s installer's configuration file](#the-k3s-installers-configuration-file)
+- [K3s Server node setup](#k3s-server-node-setup)
+ - [Folder structure for K3s configuration files](#folder-structure-for-k3s-configuration-files)
+ - [Enabling graceful shutdown on the server node](#enabling-graceful-shutdown-on-the-server-node)
+ - [The `kubelet.conf` file](#the-kubeletconf-file)
+ - [Cleanup pods script](#cleanup-pods-script)
+ - [The `k3sserver01` node's `config.yaml` file](#the-k3sserver01-nodes-configyaml-file)
+ - [Installation of your K3s server node](#installation-of-your-k3s-server-node)
+ - [K3s installation command](#k3s-installation-command)
+ - [K3s installation of the server node `k3sserver01`](#k3s-installation-of-the-server-node-k3sserver01)
+ - [Enabling the `k3s-cleanup` service](#enabling-the-k3s-cleanup-service)
+- [K3s Agent nodes setup](#k3s-agent-nodes-setup)
+- [Understanding your cluster through `kubectl`](#understanding-your-cluster-through-kubectl)
+ - [The `kubectl` command has to be executed with `sudo` in server nodes](#the-kubectl-command-has-to-be-executed-with-sudo-in-server-nodes)
+ - [The `kubectl` command does not work on pure agent nodes](#the-kubectl-command-does-not-work-on-pure-agent-nodes)
+- [Enabling bash autocompletion for `kubectl`](#enabling-bash-autocompletion-for-kubectl)
+- [Regular K3s logs are journaled](#regular-k3s-logs-are-journaled)
+- [Rotating the `containerd.log` file](#rotating-the-containerdlog-file)
+- [K3s relevant paths](#k3s-relevant-paths)
+ - [K3s paths at SERVER nodes](#k3s-paths-at-server-nodes)
+ - [K3s server paths under the `/etc/rancher` folder](#k3s-server-paths-under-the-etcrancher-folder)
+ - [K3s server paths under the `/etc/systemd/system` folder](#k3s-server-paths-under-the-etcsystemdsystem-folder)
+ - [K3s server paths under the `/var/lib/rancher/k3s` folder](#k3s-server-paths-under-the-varlibrancherk3s-folder)
+ - [K3s paths at AGENT nodes](#k3s-paths-at-agent-nodes)
+ - [K3s agent paths under the `/etc/rancher` folder](#k3s-agent-paths-under-the-etcrancher-folder)
+ - [K3s agent paths under the `/etc/systemd/system` folder](#k3s-agent-paths-under-the-etcsystemdsystem-folder)
+ - [K3s agent paths under the `/var/lib/rancher/k3s` folder](#k3s-agent-paths-under-the-varlibrancherk3s-folder)
+- [Starting up and shutting down the K3s cluster nodes](#starting-up-and-shutting-down-the-k3s-cluster-nodes)
+ - [Automatic ordered start or shutdown of the K3s nodes VMs](#automatic-ordered-start-or-shutdown-of-the-k3s-nodes-vms)
+ - [Understanding the shutdown/reboot process of your Proxmox VE host with the K3s cluster running](#understanding-the-shutdownreboot-process-of-your-proxmox-ve-host-with-the-k3s-cluster-running)
+ - [Warning about the Kubernetes graceful shutdown feature](#warning-about-the-kubernetes-graceful-shutdown-feature)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders on the Proxmox VE host](#folders-on-the-proxmox-ve-host)
+ - [Files on the Proxmox VE host](#files-on-the-proxmox-ve-host)
+ - [Folders on the VMs/K3s nodes](#folders-on-the-vmsk3s-nodes)
+ - [Files on the VMs/K3s nodes](#files-on-the-vmsk3s-nodes)
+- [References](#references)
+ - [Proxmox VE](#proxmox-ve)
+ - [Debian and Linux SysOps](#debian-and-linux-sysops)
+ - [Changing the `Hostname`](#changing-the-hostname)
+ - [Network interfaces configuration](#network-interfaces-configuration)
+ - [Logrotate configuration](#logrotate-configuration)
+ - [Downloading files with `wget`](#downloading-files-with-wget)
+ - [K3s cluster setup](#k3s-cluster-setup)
+ - [Kubernetes](#kubernetes)
+ - [K3s configuration](#k3s-configuration)
+ - [Flannel](#flannel)
+ - [Graceful node shutdown](#graceful-node-shutdown)
+ - [Embedded software in K3s](#embedded-software-in-k3s)
+ - [YAML](#yaml)
+- [Navigation](#navigation)
+
+## Build your virtualized K3s cluster
+
+Having a more specialized VM template suited for creating K3s nodes makes easier building your own virtualized K3s cluster. First you have create and set up the VMs you need. Then you install the K3s software in the VM nodes, although with a slightly different configuration on each of them.
## Criteria for the VMs' IPs and hostnames
-In the [**G021** guide](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#suggestion-about-ip-configuration-in-your-network), I made you a suggestion about how to organize the IPs within your network. Here, I'll deepen in the matter by showing you a simple IP arrangement for the K3s cluster nodes you'll create in this guide. Also, I'll show you a naming convention for the VMs' hostnames, since it's required that each node in a Kubernetes cluster has an unique hostname for identification purposes.
+In the [chapter **G021**](G021%20-%20K3s%20cluster%20setup%2004%20~%20Debian%20VM%20configuration.md#suggestion-about-the-ip-organization-within-your-lan) I made you a suggestion about how to organize the IPs within your network. Here, I'll deepen in the matter by showing you a simple IP arrangement for the K3s cluster nodes you will create in this chapter. Also, I'll show you a naming convention for the VMs' hostnames, since it's required that each node in a Kubernetes cluster has an unique hostname for identification purposes.
-Another thing you must know is the two node types that exist in a K3s cluster:
+Another thing you must know is the two types of nodes that exist in a K3s cluster:
-- The **server** node, which corresponds with the **master** in a standard Kubernetes cluster.
-- The **agent** node, which is called **worker** in Kubernetes.
+- The **server** node, specialized in running the control plane of the Kubernetes cluster.
+- The **agent** node, where K3s runs the workloads in the cluster.
-A master can also act as an agent at the same time, but I won't show you that case in this guide series.
+> [!NOTE]
+> **Kubernetes no longer distinguishes between nodes**\
+> In older versions of Kubernetes, control plane nodes were called _master_ and the rest _workers_. In modern Kubernetes clusters, this distinction is no longer relevant since the control plane's workloads can be run in any node.
+>
+> Still, Kubernetes maintains some sort of default behavior that makes all [control plane components start in the same node](https://kubernetes.io/docs/concepts/architecture/#control-plane-components), and also avoids running any user container in that node.
-> **BEWARE!**
-> In this guide you'll see how to create a single-server/master K3s cluster that uses an embedded sqlite database as data storage. If you want a multiserver/multimaster cluster setup, you'll need to combine the instructions given here with the indications summarized in the [**G908** appendix guide](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md).
-> Also be aware that a sqlite-based cluster can only have one server node and **cannot** be converted to a multiserver one.
+A server can also act as an agent at the same time, but this chapter only explains the scenario using one "pure" server node.
-### _Criteria for IPs_
+> [!NOTE]
+> **This chapter shows you how to create a single-server K3s cluster that uses an embedded sqlite database as data storage**\
+> If you want a multiserver/multimaster cluster setup, you need to combine the instructions given here with the indications summarized in the [appendix chapter **G908**](G908%20-%20Appendix%2008%20~%20K3s%20cluster%20with%20two%20or%20more%20server%20nodes.md).
+> Also be aware that a sqlite-based cluster can only have one server node and cannot be converted to a multiserver one.
-I'll assume the most simple scenario, which is a single local network behind one router. This means that everything falls within a network range such as `192.168.1.0/24`, and no other subnets are present. Don't forget that the VMs will have two network cards: the primary NIC which will be the one exposed to the internet, and the secondary NIC which will be configured to communicate only through the isolated `vmbr1` bridge. Finally, also know that the cluster won't have a high number of nodes; in this guide I'm aiming to have only three nodes. With all this in mind, the IPs arrangement could be something like the following.
+### Criteria for IPs
-#### **IP range for VM templates**
+I'll assume the most simple scenario, which is a single local network behind one router. This means that everything falls within a [private network IPv4 range](https://en.wikipedia.org/wiki/Reserved_IP_addresses#IPv4) such as `10.0.0.0/8`, and no other subnets are present.
-- Main network card: between `192.168.1.11` and `192.168.1.20`.
-- Secondary network card: any address beyond `10.0.0.20`.
+> [!NOTE]
+> **I picked a big private network IP range to minimize conflicts**\
+> Nowadays, is a common hardening feature to make devices use randomized MACs to connect to networks. This makes those devices get a new random IP from the router every time they connect to a network. Depending on how well the router is able to handle the IP assignments, this could lead to IP conflicts with devices that have statically assigned IPs. These conflicts emerge when a device with a static IP happens to be temporarily unavailable in the network, then other device gets the same IP through dynamic assignment because the router has not been programmed to take this situation into account. When the device with a static IP, usually a server, comes back and tries to claim its IP it sees that it has been assigned already to other device and is forced to get a different one.
+>
+> To help minimize this problem, I considered better to use the widest valid network range available for private LANs in my own home network: `10.0.0.0/8`. This range provides up to 16.777.216 addresses, a big enough number to avoid having conflicts of IPs between devices.
-#### **IP range for K3s SERVER nodes**
+Do not forget that the VMs will have two network cards:
-- Main network card: between `192.168.1.21` and `192.168.1.30`.
-- Secondary network card: between `10.0.0.1` and `10.0.0.10`.
+- **The primary NIC which is exposed to the internet**\
+ Its private static IP will be in the `10.0.0.0/8` range.
-#### **IP range for K3s AGENT nodes**
+- **The secondary NIC that only connects with other VMs through the isolated `vmbr1` bridge**\
+ Its private static IP will be in the `172.16.0.0/12` range.
-- Main network card: between `192.168.1.31` and `192.168.1.40`.
-- Secondary network card: between `10.0.0.11` and `10.0.0.20`.
+Finally, also know that the cluster will not have a high number of nodes. In this guide I'm aiming to have only three nodes. With all this in mind, the IPs arrangement I propose for the K3s nodes is detailed in the next subsections.
-This is just a suggestion, although I'll use it to illustrate the setup of the K3s cluster.
+#### IP ranges for VM templates
-> **BEWARE!**
-> You must not forget to set up **static** IPs to **all** your other devices in your network, like your Proxmox VE host. Otherwise you'll risk having conflicts due to the dynamic IP assignment applied by your router.
+These proposed IP ranges also affect, in retrospective, to the main NIC of the first Debian VM created in the [chapter **G023**](G023%20-%20K3s%20cluster%20setup%2006%20~%20Debian%20VM%20template%20and%20backup.md):
-### _Naming convention for hostnames_
+- **Main network card**\
+ From `10.4.0.1` to `10.4.0.255`.
-The naming schema I'll use to assign hostnames to the new VMs is the following.
+- **Secondary network card**\
+ From `172.31.254.1` to `172.31.254.255`.
-- `k3snodetpl`: name for the VM that is the template of all the K3s cluster VM nodes.
-- `k3sserverXX`: name for VMs that will act as server nodes in the K3s cluster.
-- `k3sagentXX`: name for VMs that will act as agent nodes in the K3s cluster.
+#### IP ranges for K3s server nodes
+
+- **Main network card**\
+ From `10.4.1.1` to `10.4.1.255`.
+
+- **Secondary network card**\
+ From `172.16.1.1` to `172.16.1.255`.
+
+#### IP ranges for K3s agent nodes
+
+- **Main network card**\
+ From `10.4.2.1` to `10.4.2.255`.
+
+- **Secondary network card**\
+ From `172.16.2.1` to `172.16.2.255`.
+
+### Naming convention for hostnames
+
+This is the naming convention I'll use to assign a hostname to each new VMs:
+
+- `k3snodetpl`\
+ Name for the VM that is the template of all the K3s cluster VM nodes.
+
+- `k3sserverXX`\
+ Name for VMs that will act as server nodes in the K3s cluster.
+
+- `k3sagentXX`\
+ Name for VMs that will act as agent nodes in the K3s cluster.
My intention is to have a K3s cluster with one server and two agents, therefore the hostnames for each VM will be:
- The VM acting as server will be called `k3sserver01`.
-- The other two VMs that will run as agents will be named `k3sagent01` and `k3sagent02`.
+- The other two VMs running as agents will be named `k3sagent01` and `k3sagent02`.
-As you can see above, the naming convention doesn't need to be complicated, but make sure that it makes sense and reflects the role given to each VM in the K3s cluster.
+As you can see above, the naming convention does not need to be complicated. Just make sure that it makes sense and reflects the role given to each VM in the K3s cluster.
## Creation of VMs based on the K3s node VM template
-For starters, you'll create two VMs: one will be a K3s server and the other an agent. After creating them, you'll have to configure a number of things in both.
+For starters, you have to create two VMs: one will be a K3s server and the other an agent. After creating them, you'll have to configure a number of things in both.
-To create these two, go to your latest VM template (the `k3snodetpl` one), unfold the `More` options and click on `Clone`. In the `Clone` window that appears:
+To create these two VMs, go to your latest VM template (the `k3snodetpl` one), unfold the `More` options and click on `Clone`. In the `Clone` window that appears:
- Make the `VM ID` somehow correspond to the IPs the VM will eventually have.
- Specify a significant `Name`, like `k3sserver01`.
- Leave the mode as `Linked Clone`, so the new VMs take up less space by reusing the data already present in the VM template.
-
+
-In the snapshot you see that I've given the ID `2101` to the VM, since this VM is going to have a primary IP ended in `21`, and a secondary IP ended in `01`. Also see that, since this VM is going to be the K3s server in the cluster, I've named this VM `k3sserver01`.
+In the snapshot you see that I've given the ID `411` to the VM, since the primary IP of this VM is going to be `10.4.1.1`. Also see that, since this VM is going to be the K3s server in the cluster, I've named this VM `k3sserver01`.
-The second VM is going to be an agent, so its name should be `k3sagent01`. The IPs I plan to give to this VM end in `31` and `11` respectively, so a good `ID` would be `3111`.
+The second VM is going to be an agent, so its name should be `k3sagent01`. The main IP for this VM will be `10.4.2.1`, so a good `ID` would be `421`.
-> **BEWARE!**
-> The VM IDs are important for configuring their automatic starting and shutting down by Proxmox VE. Later in this guide I'll tell you about it, but you can check how the VM IDs are important in such process [in the Proxmox VE official documentation](https://pve.proxmox.com/pve-docs/chapter-qm.html#qm_startup_and_shutdown).
+> [!NOTE]
+> **The VM IDs are important for configuring their automatic starting and shutting down by Proxmox VE**\
+> This chapter explains more about it later, but you can check how the VM IDs are important in such process [in the Proxmox VE official documentation](https://pve.proxmox.com/pve-docs/chapter-qm.html#qm_startup_and_shutdown).
-Again, my hostname schema and ID assignments are just suggestions. Put the names and IDs you find suitable for your particular preferences or requirements.
+Again, my hostname schema, static IP assignments and ID assignments are just suggestions. Put the names, IPs and IDs you find suitable for your particular preferences or requirements.
-> **BEWARE!**
-> Linked clones are attached to their VM template, so Proxmox VE won't allow you to remove the template unless you delete its linked clones first.
+> [!NOTE]
+> **Remember that linked clones are attached to their VM template**\
+> Proxmox VE will not allow you to remove the template unless you delete its linked clones first.
-### _VM as K3s server node_
+### VM as K3s server node
-K3s server nodes can run workloads (apps and services), but it's more appropriate to use them just for managing the cluster. Acting just as a server is a heavy duty job, even in a small setup such as the one you're building with this guide series. So, you can start assigning your `k3sserver01` node low hardware specs and increase them later depending on how well the node runs. Still, be aware that if the server node has a bad performance, the whole cluster won't run properly either. In my case I left this VM with rather low capabilities to begin with.
+K3s server nodes can run workloads (apps and services), but it is more appropriate to use them just for handling your Kubernetes cluster's control plane. Acting just as a server is a heavy duty job, even in a small setup such as the one you're building with this guide. So, you can start assigning your `k3sserver01` node low hardware specs and increase them later depending on how well the node runs. Still, be aware that if the server node has a bad performance, the whole cluster won't run properly either. In my case I left this VM with rather low capabilities to begin with:
- **Memory**: 1.00/1.50 GiB of RAM.
- **Processors**: 2 vCPUs.
-Those are really low hardware capabilities, so don't go lower than those or it will be too tight for the server node to run properly. Don't forget that, to change the hardware attached to a VM, just go to the `Hardware` tab of the VM. There, choose the item you want to modify and press on `Edit`.
+Those are really low hardware capabilities. Do not go lower than those or it will be too tight for the server node to run properly. Do not forget that, to change the hardware attached to a VM, just go to the `Hardware` tab of the VM. There, either just double click the item you want to modify or select it and press on `Edit`.
-
+
-> **NOTE**
-> Remember that in this guide series I'm working on a low end computer, but if yours happens to be more powerful, don't hesitate to assign more resources to the VMs if you want or need to, although be careful of not overloading your hardware.
+> [!NOTE]
+> **Remember that this guide is based on a low end computer**\
+> If your hardware setup happens to be more powerful, do not hesitate to assign more resources to the VMs if you want or need to. Just be careful of not overloading your hardware.
-### _VM as K3s agent node_
+### VM as K3s agent node
K3s agents are the ones meant to run workloads in the cluster, so how much CPU and RAM you must assign to them depends heavily on what will be load they'll run. To play it safe you can do as I did and start by assigning them something like this.
@@ -103,165 +221,171 @@ K3s agents are the ones meant to run workloads in the cluster, so how much CPU a
As it happens with the server node VM, depending on how well your agent nodes run, later you'll have to adjust their capabilities to fit both their needs and the limits of your real hardware.
-### _Assign a static IP to each VM_
+### Assign a static IP to each VM
-After creating the VMs, go to your router or gateway and assign each a static IP. In this guide, I'll follow the criteria I explained at the beginning of this guide, so the nodes here will have the following IPs for their **primary network cards**.
+After creating the VMs, go to your router or gateway and assign each a static IP. Here I'll follow the [criteria explained earlier](#criteria-for-ips), so the nodes here will have the following IPs for their **primary network cards**.
-- K3s **server** node 1: `192.168.1.21`
-- K3s **agent** node 1: `192.168.1.31`
+- K3s **server** node 1: `10.4.1.1`
+- K3s **agent** node 1: `10.4.2.1`
## Preparing the VMs for K3s
Now that you have the new VMs created, you might think that you can start creating the K3s cluster, right? Wrong. In the following subsections, you'll see the pending procedures that you must apply in your VMs to make them fully adequate to become K3s cluster nodes.
-### _Customizing the hostname_
+### Customizing the hostname
-As it happened when you set up the k3s node VM template, these two new VMs you've created both have the same hostname inherited from the VM template. You need to change it, as you did before, but customizing it to the role each node will play in the K3s cluster. So, if you apply the naming scheme I showed you back at the beginning of this guide, the hostname value for each VM will be as follows.
+As it happened when you set up the k3s node VM template, these two new VMs you have created both have the same hostname inherited from the VM template. You need to change it, as you did before, but customizing it to the role each node will play in the K3s cluster. So, if you apply the [naming scheme shown in a previous section](#naming-convention-for-hostnames), the hostname value for each VM will be as follows.
- The K3s server VM will be called `k3sserver01`.
- The other VM will run as an agent, so it'll be named `k3sagent01`.
With the naming scheme decided, you can change the hostname **to each VM** in the same way you did when you configured the K3s node VM template.
-> **BEWARE!**
-> At this point, your two new VMs will have the same credentials (meaning the same certificates, password and TFA TOTP code for the `mgrsys` user) as the ones used in the VM template they are clones of. Remember this when you try to connect remotely to those VMs through SSH.
+> [!WARNING]
+> **At this point, your two new VMs will have the same credentials**\
+> This means having the same certificates, password and TFA TOTP code for the `mgrsys` user as the ones set in the K3s node VM template those VMs are clones of.
+>
+> Remember this when you try to connect remotely to those VMs through SSH.
-1. Using the `hostnamectl` command, change the VM's hostname value (`k3snodetpl` now). For instance, for the K3s server VM it would be like below.
+1. Using the `hostnamectl` command, change the VM's hostname value (`k3snodetpl` now). For instance, for the K3s server VM it would be like below:
- ~~~bash
+ ~~~sh
$ sudo hostnamectl set-hostname k3sserver01
~~~
-2. Edit the `/etc/hosts` file, where you must replace the old hostname (`k3snodetpl`) with the new one. The hostname should only appear in the `127.0.1.1` line.
+2. Edit the `/etc/hosts` file, where you must replace the old hostname (`k3snodetpl`) with the new one. The hostname should only appear in the `127.0.1.1` line:
~~~properties
- 127.0.1.1 k3sserver01.deimos.cloud k3sserver01
+ 127.0.1.1 k3sserver01.homelab.cloud k3sserver01
~~~
-3. Don't forget to do the same steps on the `k3sagent01` VM. First the `hostnamectl` command.
+3. Do not forget to do the same steps on the `k3sagent01` VM. First the `hostnamectl` command.
- ~~~bash
+ ~~~sh
$ sudo hostnamectl set-hostname k3sagent01
~~~
Then, changing the proper line in the `/etc/hosts` file.
~~~properties
- 127.0.1.1 k3sagent01.deimos.cloud k3sagent01
+ 127.0.1.1 k3sagent01.homelab.cloud k3sagent01
~~~
-Remember that to see the change applied, you have to exit your current shell session and log back into the VM. You'll see the new hostname in the shell prompt.
+Remember that to see the change applied, you have to exit your current shell session and log back into the VM. You will see the new hostname in the shell prompt.
-### _Changing the second network card's IP address_
+### Changing the second network card's IP address
-The second network card of the new VMs has the same IP address that was configured in the template, something you must correct or the networking with this card won't work. Following the pattern I established before, the IPs I'm going to set for this VMs are the next.
+The secondary network card of the new VMs has the same IP address that was configured in their template, something you must correct or the networking with this card won't work. [Adhering the criteria I established before](#criteria-for-ips), the static IPs I'm going to set for this VMs are:
-- K3s **server** node 1: `10.0.0.1`
-- K3s **agent** node 1: `10.0.0.11`
+- K3s **server** node 1: `172.16.1.1`
+- K3s **agent** node 1: `172.16.2.1`
1. To change this value on each VM, you just have to edit the `/etc/network/interfaces` file and replace there the template IP with the correct one. Of course, for extra safety, first do a backup.
- ~~~bash
+ ~~~sh
$ sudo cp /etc/network/interfaces /etc/network/interfaces.bkp
~~~
-2. Edit the `interfaces` file and just change the address assigned to the interface. In my case, it's the `ens19` interface, and this is how it would look in the K3s server node.
+2. Edit the `interfaces` file and just change the address assigned to the interface. In my case, it is the `ens19` interface, and this is how it would look in the K3s server node.
- ~~~bash
+ ~~~sh
# The secondary network interface
allow-hotplug ens19
iface ens19 inet static
- address 10.0.0.1
- netmask 255.255.255.0
+ address 172.16.1.1
+ netmask 255.240.0.0
~~~
3. To apply the change you'll have to restart the interface with the `ifdown` and `ifup` commands.
- ~~~bash
+ ~~~sh
$ sudo ifdown ens19
- RTNETLINK answers: Cannot assign requested address
+ Error: ipv4: Address not found.
$ sudo ifup ens19
~~~
- Don't mind the warning `ifdown` returns, `ifup` will be able to activate the interface just fine.
+ Don't mind the error warning `ifdown` returns, `ifup` will be able to activate the interface just fine.
4. Finally, check with the `ip` command that the interface has the new IP address.
- ~~~bash
+ ~~~sh
$ ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
+ valid_lft forever preferred_lft forever
2: ens18: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether f6:26:b3:ce:76:a6 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:81:81:59 brd ff:ff:ff:ff:ff:ff
altname enp0s18
- inet 192.168.1.21/24 brd 192.168.1.255 scope global dynamic ens18
- valid_lft 84332sec preferred_lft 84332sec
+ altname enxbc2411818159
+ inet 10.4.1.1/8 brd 10.255.255.255 scope global dynamic noprefixroute ens18
+ valid_lft 85227sec preferred_lft 74427sec
3: ens19: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether 5a:b0:5f:41:3c:48 brd ff:ff:ff:ff:ff:ff
+ link/ether bc:24:11:54:d4:fa brd ff:ff:ff:ff:ff:ff
altname enp0s19
- inet 10.0.0.1/24 brd 10.0.0.255 scope global ens19
- valid_lft forever preferred_lft forever
+ altname enxbc241154d4fa
+ inet 172.16.1.1/12 brd 172.31.255.255 scope global ens19
+ valid_lft forever preferred_lft forever
~~~
-Remember to change the secondary network card's IP, following the same steps, on your other VM!
+Remember to change the secondary network card's IP, following the same steps, on your other `K3sagent01` VM!
-### _Changing the TOTP code_
+### Changing the TOTP code
The VMs share the same TOTP code that you generated for the Debian VM template. Obviously, this is not secure, so you must change it **on each VM**. On the other hand, you'll probably don't want to complicate your system management too much for what is just a homelab setup. So, for the scenario contemplated in this guide, a middle-ground solution would be having one TOTP for the K3s server node, and another TOTP code for all the K3s agent nodes. Regardless of the strategy you adopt, the procedure to change the TOTP on each VM is the same.
You just have to execute the `google-authenticator` command, and it will overwrite the current content of the `.google_authenticator` file in the `$HOME` directory of your current user. For instance, in the K3s server node you would execute something like the following.
-~~~bash
-$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i k3sserverxx.deimos.cloud -l mgrsys@k3sserverxx
+~~~sh
+$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i k3sserverxx.homelab.cloud -l mgrsys@k3sserverxx
~~~
And the command for the agent node should be as shown next.
-~~~bash
-$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i k3sagentxx.deimos.cloud -l mgrsys@k3sagentxx
+~~~sh
+$ google-authenticator -t -d -f -r 3 -R 30 -w 3 -Q UTF8 -i k3sagentxx.homelab.cloud -l mgrsys@k3sagentxx
~~~
-> **BEWARE!**
+> [!IMPORTANT]
> Export and save all the codes and even the `.google_authenticator` file in a password manager or by any other secure method.
-### _Changing the ssh key-pair_
+### Changing the SSH key-pair
-As with the TOTP code, the ssh key-pair files are the same ones you created for your first VM template, and all the VMs you've created till now have the very same pair. Following the same strategy as with the TOTP code, create a different key-pair for the server node and for the agent node. The procedure in both cases is as follows.
+As with the TOTP code, the SSH key-pair files are the same ones you created for your first Debian VM template, and all the VMs you have created till now have the very same pair. Following the same strategy as with the TOTP code, create a different key-pair for the server node and for the agent node. The procedure in both cases is as follows.
1. Being sure that you're on your `$HOME` directory, remove the current key-pair completely.
- ~~~bash
+ ~~~sh
$ cd
$ rm -rf .ssh/
~~~
2. Now you can create a new ssh key-pair with the `ssh-keygen` command.
- ~~~bash
- $ ssh-keygen -t rsa -b 4096 -C "k3sserverxx.deimos.cloud@mgrsys"
+ ~~~sh
+ $ ssh-keygen -t ed25519 -a 250 -C "k3sserverxx.homelab.cloud@mgrsys"
~~~
- > **BEWARE!**
- > The comment (`-C`) in the command above is just an example, replace it with whatever string suits the node or your requirements.
+ > [!NOTE]
+ > **The comment (`-C`) in the command above is just an example**\
+ > Replace that comment with whatever string suits your requirements.
3. Now enable the public part of the key-pair.
- ~~~bash
- $ cat .ssh/id_rsa.pub >> .ssh/authorized_keys ; chmod 600 .ssh/authorized_keys
+ ~~~sh
+ $ cat .ssh/id_ed25519.pub >> .ssh/authorized_keys ; chmod 600 .ssh/authorized_keys
~~~
-Careful now, you have to export the new private key (the `id_rsa` file) so you can remotely connect through your ssh client. Don't close your current connection so you can use it to export the private key.
+Careful now, you have to export the new private key (the `id_rsa` file) so you can remotely connect through your ssh client. Do not close your current connection so you can use it to export the private key.
-> **BEWARE!**
-> Don't forget to export and save the new ssh key-pairs in a password manager or by any other secure method.
+> [!IMPORTANT]
+> Do not forget to export and save the new ssh key-pairs in a password manager or by any other secure method.
-### _Changing the administrative user's password_
+### Changing the administrative user's password
The `mgrsys` user also has the same password you gave it in the creation of the first Debian 10 template. To change it, I'll follow the same strategy as with the TOTP code and the ssh key-pair, one password for the K3s server and another for all the K3s agents. To change the password, just execute the `passwd` command.
-~~~bash
+~~~sh
$ passwd
Changing password for mgrsys.
Current password:
@@ -270,43 +394,46 @@ Retype new password:
passwd: password updated successfully
~~~
-> **BEWARE!**
+> [!IMPORTANT]
> Save the password somewhere safe, like in a password manager.
-### _Creating the pending K3s agent node VM_
+### Creating the pending K3s agent node VM
-You've created and configured one VM that will act as a K3s server node, and another that will be a K3s agent node. Still, you're missing another agent node, so link-clone it to the K3s node template with a `3112` VM ID and for name the string `k3sagent02`. Then, you'll have to configure it as you did with the first K3s nodes you've created, but with some differences.
+You have created and configured one VM that will act as a K3s server node, and another that will be a K3s agent node. Still, you're missing another agent node, so link-clone it to the K3s node template with a `3112` VM ID and for name the string `k3sagent02`. Then, configure it as you did with the other K3s nodes you have already created, but with some differences.
-#### **Changing hostname and assigning IPs**
+#### Changing hostname and assigning IPs
-As you did with both the k3s server and the first agent nodes, you'll need to assign concrete static IPs to the network devices. Also, you'll need to change the hostname.
+As you did with both your K3s server and first agent nodes, you need to assign concrete static IPs to the network devices. Also, you need to change the hostname.
-1. Assign in your router or gateway the static main IPs for the new VM. In my criteria, its address would be as follows.
+1. Assign in your router or gateway the static main IP for the new VM. In my criteria, its address would be as follows.
- - K3s **agent** node 2: `192.168.1.32`
+ - K3s **agent** node 2: `10.4.2.2`
-2. Change its hostname, as you've already seen before in this guide.
+2. Change its hostname, as you've already seen before in this chapter.
- K3s **agent** node 2: `k3sagent02`
-3. Finally, you'll also have to change the IP address of its second network interface.
+3. Finally, you also have to change the IP address of its secondary network interface.
- - K3s **agent** node 2: `10.0.0.12`
+ - K3s **agent** node 2: `172.16.2.2`
-#### **Exporting the TOTP codes, the ssh key-pairs and reusing passwords**
+#### Exporting the TOTP codes, the SSH key-pairs and reusing passwords
-To ease a bit the burden of system maintenance, and reduce the madness of many passwords and codes, let's reuse the ones you generated for the `mgrsys` user in the first agent node. It's not the safest configuration possible, true, but should be safe enough for the homelab you're building in this guide series.
+To ease a bit the burden of system maintenance, and reduce the madness of so many passwords and TOTP codes, let's reuse the ones you generated for the `mgrsys` user in your first agent node. It is not the safest configuration possible, true, but should be safe enough for the homelab you are building in this guide.
-The idea is that you export the TOTP code and the ssh key-pair from your `k3sagent01` VM to the `k3sagent02` one. Also, you would reuse the new password you've applied in the first agent. This way, at least you'll have different authorization codes for the different types of nodes in your K3s cluster.
+The idea is that you export the TOTP code and the SSH key-pair from your `k3sagent01` VM to the `k3sagent02` one. Also, you would reuse the new password you've applied in the first agent. This way, at least you'll have different authorization codes for the different types of nodes in your K3s cluster.
The files you have to export are:
-- `/home/mgrsys/.google_authenticator`: this is the file where the TOTP code is stored.
-- `/home/mgrsys/.ssh/`: in this folder are the ssh key-files. Export the whole folder.
+- `/home/mgrsys/.google_authenticator`\
+ The file where the TOTP code is stored.
+
+- `/home/mgrsys/.ssh/`\
+ In this folder are the SSH key-files. Export the whole folder.
The most convenient way to export files is packaging and compressing them with the `tar` command.
-~~~bash
+~~~sh
$ cd
$ tar czvf .ssh.tgz .ssh/
$ tar czvf .google_authenticator.tgz .google_authenticator
@@ -314,7 +441,7 @@ $ tar czvf .google_authenticator.tgz .google_authenticator
When you have exported these `.tgz` files (with a tool like WinSCP, for instance) to the second agent node, remove the ones already present and then decompress the `.tgz` files.
-~~~bash
+~~~sh
$ cd
$ rm -rf .ssh
$ rm -f .google_authenticator
@@ -331,181 +458,203 @@ Finally, don't forget to use the `passwd` command to change the password and put
A K3s cluster uses certain ports to work, although they're different on each K3s node type.
-### _Port mapping_
+### Port mapping
+
+In a default installation of a K3s cluster, the ports you need to have open are the following.
+
+- **On SERVER nodes**
+ - TCP `2379-2380`\
+ Necessary when using HA with embedded etcd database engines. These are not used in a one-server-node setup.
-In a **default** installation of a K3s cluster, the ports you need to have open are the following.
+ - TCP `6443`\
+ This is for connecting to the Kubernetes API server, necessary for cluster management tasks.
-- On SERVER nodes:
- - TCP `2379-2380`: necessary when using HA with embedded etcd database engines. These are not used in a one-server-node setup.
- - TCP `6443`: this is for connecting to the Kubernetes API server, necessary for cluster management tasks.
- - TCP `10250`: required to access to Kubelet metrics.
+ - TCP `10250`\
+ Required to access to Kubelet metrics.
-- On AGENT nodes:
- - TCP `80`: used by the Traefik service.
- - TCP `443`: also used by the Traefik service.
- - TCP `10250`: required to access Kubelet metrics.
+- **On AGENT nodes**
+ - TCP `80`\
+ Used by the Traefik service.
+
+ - TCP `443`\
+ Also used by the Traefik service.
+
+ - TCP `10250`\
+ Required to access Kubelet metrics.
On the other hand, there's also the TCP port `22`, which you must keep open in the firewall to allow yourself SSH access into all your VMs. Also, you need to give all your VMs access to the TCP port `3493` in the Proxmox VE host, so they can connect to the NUT server monitoring the UPS in your system.
-Now that you know the ports required by K3s, you may wonder how these ports will be arranged in the network interface cards enabled in your VMs. Check it out below.
+Now that you know the ports required by K3s, you may wonder how these ports will be arranged in the network interface cards enabled in your VMs. Check it out below:
-- On SERVER nodes:
- - NIC `net0`/`ens18`, IP `192.168.1.x`:
- - TCP: `22`, `6443`.
- - NIC `net1`/`ens19`, IP `10.0.0.x`:
- - TCP: `2379-2380`, `6443`, `10250`.
+- **On SERVER nodes**
+ - NIC `net0`/`ens18`, IP `10.4.1.x`:
+ - TCP: `22`, `6443`.
-- On AGENT nodes:
- - NIC `net0`/`ens18`, IP `192.168.1.x`:
- - TCP: `22`, `80`, `443`.
- - NIC `net1`/`ens19`, IP `10.0.0.x`:
- - TCP: `10250`.
+ - NIC `net1`/`ens19`, IP `172.16.1.x`:
+ - TCP: `2379-2380`, `6443`, `10250`.
+
+- **On AGENT nodes**
+ - NIC `net0`/`ens18`, IP `10.4.2.x`:
+ - TCP: `22`, `80`, `443`.
+ - NIC `net1`/`ens19`, IP `172.16.2.x`:
+ - TCP: `10250`.
Remember that each NIC is meant for a particular use:
-- The `net0`/`ens18` interfaces are the firewalled ones facing the external network. You'll configure your K3s cluster to use these ones only for external traffic.
+- The `net0`/`ens18` interfaces are the ones that need to be firewalled since they face the external network. You will configure your K3s cluster to use these ones only for external traffic.
-- The `net1`/`ens19` interfaces are not firewalled but isolated through the vmbr1 bridge. Your K3s cluster will used these NICs for its internal networking needs.
+- The `net1`/`ens19` interfaces are not firewalled but isolated through the `vmbr1` bridge. Your K3s cluster will use these NICs for its internal networking needs.
-The upshot is that you'll need to open in your Proxmox VE firewall only the ports required on the `net0`/`ens18` interfaces. Also, you'll need to give your VMs explicit access to the `3493` TCP port.
+The upshot is that you will need to open in your Proxmox VE firewall only the ports required on the `net0`/`ens18` interfaces. Also, you'll need to give your K3s VMs explicit access to the `3493` TCP port to reach the NUT server running in the Proxmox VE system.
-### _Firewall configuration for the K3s node VMs_
+### Firewall configuration for the K3s node VMs
-Now that you have the ports setup visualized, let's get down to it. The process is like what you did in the [**G022** guide](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#opening-the-upsd-port-on-the-proxmox-ve-node), where you gave your first Debian VM access to the NUT TCP port `3493` at the Proxmox VE host. This time you'll handle more ports and IPs.
+Now that you have the ports setup visualized, let's get down to it. The process is like what you did in the [chapter **G022**](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md#opening-the-upsd-port-on-the-proxmox-ve-node), where you gave your first Debian VM access to the NUT TCP port `3493` at the Proxmox VE host. This time you'll handle more ports and IPs.
-#### **Allowing access to the host's NUT port for all K3s nodes VMs**
+#### Allowing access to the host's NUT port for all K3s nodes VMs
-To give your VMs access to the NUT server running in your Proxmox VE host, you just have to include their main IPs to the IP set you already allowed to reach the NUT port.
+To give your VMs access to the NUT server running in your Proxmox VE host, you just have to include their main IPs to the IP set you already allowed to reach the NUT port:
-1. Open your Proxmox VE web console and go to the `Datacenter`, then to `Firewall > Alias`. There you must add the IPs of your VMs' `net0` NICs, the `192.168.1.x` ones. The view should end looking like below.
+1. Open your Proxmox VE web console and go to the `Datacenter`, then to `Firewall > Alias`. There you must add the IPs of your VMs' `net0` NICs, the `192.168.1.x` ones. The view should end looking like below:
- 
+ 
- Remember that you created, back in the [**G022** guide](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md), the alias `debiantpl_net0` you see already listed here.
+ Remember that you created, back in the [chapter **G022**](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md), the alias `debiantpl_net0` you see already listed here.
-2. Browse now to the `Datacenter > Firewall > IPSet` tab, where you'll find the already existing `k3s_nodes_net0_ips` IP set.
+2. Browse now to the `Datacenter > Firewall > IPSet` tab, where you can find the `k3s_nodes_net0_ips` IP set:
- 
+ 
-3. Select the `k3s_nodes_net0_ips` set, where you'll see the `debiantpl_net0` alias already added there. Since it's the IP of the first Debian VM template you've created, and VM templates can't be run, you can remove the `debiantpl_net0` alias from this IP set by pressing the `Remove` button.
+3. Select the `k3s_nodes_net0_ips` set, where you'll see the `debiantpl_net0` alias already added there. Since it's the IP of the first Debian VM template you've created, and VM templates can't be run, you can remove the `debiantpl_net0` alias from this IP set by pressing the `Remove` button:
- 
+ 
- You'll have to confirm the action.
+ You'll have to confirm the `Remove` action:
- 
+ 
-4. Now, using the `Add` button, aggregate to the `k3s_nodes_net0_ips` IP set the other aliases you've created before. The IP set should end looking like below.
+4. Use the `Add` button to aggregate the K3s node aliases you've created before to the `k3s_nodes_net0_ips` IP set. The IP set should end looking like below:
- 
+ 
-The connection of your VMs with the NUT server should be enabled now. Check it out with a NUT client command like `upsc apc@192.168.1.107`. Or go back to the [G004](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) or [G022](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md) guides to remember how to use the NUT-related commands.
+The connection of your K3s nodes with the NUT server should be enabled now. Check it out with a NUT client command like `upsc eaton@10.1.0.1`. Or go back to the chapters [**G004**](G004%20-%20Host%20configuration%2002%20~%20UPS%20management%20with%20NUT.md) or [**G022**](G022%20-%20K3s%20cluster%20setup%2005%20~%20Connecting%20the%20VM%20to%20the%20NUT%20server.md) to see how to use the NUT-related commands.
-#### **K3s node VMs' firewall setup**
+#### K3s node VMs' firewall setup
-1. Browse back to the `Datacenter > Firewall > Alias` tab. In this page, add the whole IP range of your local network (`192.168.1.0/24`). The `Alias` view should end looking like in the next snapshot.
+1. Browse back to the `Datacenter > Firewall > Alias` tab. In this page, add the whole IP range of your local network (`10.0.0.0/8` in this guide):
- 
+ 
-2. Go to the `Datacenter > Firewall > IPSet` page and create an IP set for the local network IPs, called `local_network_ips`. In it, add the `local_network` alias.
+2. Go to the `Datacenter > Firewall > IPSet` page and create an IP set for the local network IPs, called `local_network_ips`. In it, add the `local_network` alias:
- 
+ 
- I could have used just the `local_network` alias directly for creating rules, but it's better to use sets, even for just one alias or IP, since they give you more flexibility to manipulate IPs within the sets.
+ I could have used just the `local_network` alias directly for creating rules, but it is better to use sets, even for just one alias or IP, since they give you more flexibility to manipulate IPs within the sets.
-3. Now jump to the `Datacenter > Firewall > Security Group` page. Here you'll define all the rules to give access to the ports you need open in your K3s cluster, in this case only the ones on your VM's `net0` interfaces. Next, I'll detail the rules, but organized in two distinct security groups.
+3. Jump to the `Datacenter > Firewall > Security Group` page. Define and enable here all the rules to give access to the ports you need open in your K3s cluster, which are only the ones on your VM's `net0` interfaces. Next, I'll detail the rules, but organized in two distinct security groups:
- `k3s_srvrs_net0_in`:
- - Rule 1: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `22`, Comment `SSH standard port open for entire local network`.
+
+ - Rule 1: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `22`, Comment `SSH standard port open for entire local network`.
- `k3s_agnts_net0_in`:
- - Rule 1: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `22`, Comment `SSH standard port open for entire local network`.
- - Rule 2: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `80`, Comment `HTTP standard port open for entire local network`.
- - Rule 3: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `443`, Comment `HTTPS standard port open for entire local network`.
- Don't mind the rules numbers, it just for your reference when you read them. Their numbering will be different when you add them in the security groups, and they should end looking like in the following snapshots.
+ - Rule 1: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `22`, Comment `SSH standard port open for entire local network`.
- 
+ - Rule 2: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `80`, Comment `HTTP standard port open for entire local network`.
- 
+ - Rule 3: Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `443`, Comment `HTTPS standard port open for entire local network`.
- > **BEWARE!**
- > Don't forget to **enable** the rules when you create them, so revise the `On` column and check the ones you may have left disabled.
+ Do not mind the rules numbers, it just for your reference when you read them. Their numbering will be different when you add them in the security groups, and they should end looking like this:
- The security groups don't work just as they are, you have to apply them **on each** of your K3s node VMs. In particular, you have to insert the `k3s_srvrs_net0_in` security group in the K3s **server** VM firewall, and put the `k3s_agnts_net0_in` one in your other two K3s **agent** VMs firewalls.
+ 
-4. Next, I'll show you how the firewall should look in the `k3sserver01` firewall with it's corresponding security group inserted as a rule, after using the `Insert: Security Group` button.
+ 
- 
+ > [!IMPORTANT]
+ > **Do not forget to enable the rules when you create them**\
+ > Revise the `On` column and check the ones you may have left disabled.
+
+ The security groups do not work just as they are, **you have to apply them on each of your K3s node VMs**:
+
+ - Insert the `k3s_srvrs_net0_in` security group in the K3s server VM firewall.
+ - Put the `k3s_agnts_net0_in` security group in your other two K3s agent VMs firewalls.
+
+4. Next, I'll show you how the firewall should look in the `k3sserver01` firewall with it's corresponding security group inserted as a rule, after using the `Insert: Security Group` button:
+
+ 
Notice that not only I've inserted and enabled the corresponding `k3s_srvrs_net0_in` group, I've applied it to the `net0` interface of this VM.
-5. Browse to the VM's `Firewall > IPSet` page and create an IP set for the `net0` network interface. In the case of the `k3sserver01` VM, it should be like in the following snapshot.
+5. Browse to the `k3sserver01` VM's `Firewall > IPSet` page and create an IP set for the `net0` network interface. In the case of the `k3sserver01` VM, it should be like this:
- 
+ 
- Notice how the IP set's name follows the pattern `ipfilter-netX`, where "X" is the network interface's number in the VM. Keep the name as it is, since it's the one the Proxmox VE firewall expects for IP filtering. Also see how I've added only the aliased IP for the `net0` interface on the IP set, restricting to one the valid IPs that can initiate outgoing connections from that interface.
+ Notice how the IP set's name follows the pattern `ipfilter-netX`, where "X" is the network interface's number in the VM. Keep the name as it is, since it's the one the Proxmox VE firewall expects for IP filtering. Also see how I've only added the aliased IP for the `net0` interface to the IP set, restricting to one the valid IPs that can initiate outgoing connections from that interface.
-6. Now you can enable the firewall itself on `k3sserver01`. Go to the VM's `Firewall > Options` tab and `Edit` the `Firewall` field to enable it.
+6. Now you can enable the firewall itself on `k3sserver01`. Go to the VM's `Firewall > Options` tab and `Edit` the `Firewall` field to enable it:
- 
+ 
Notice that I've also adjusted some other options:
- - The `NDP` option is disabled because is only useful for IPv6 networking, which is not active in your VMs.
+ - The `NDP` option is disabled because is only useful for IPv6 networking, which is not active in any of the existing VMs.
- The `IP filter` is enabled, which helps to avoid IP spoofing.
- - Remember that enabling the option is not enough. You need to specify the concrete IPs allowed on the network interface in which you want to apply this security measure, something you've just done in the previous step.
+
+ > [!NOTE]
+ > **Enabling the `IP filter` option is not enough**\
+ > You need to specify the concrete IPs allowed in the network interface on which you want to apply this security measure, something you've just done in the previous step.
- The `log_level_in` and `log_level_out` options are set to `info`, enabling the logging of the firewall on the VM. This allows you to see, in the `Firewall > Log` view of the VM, any incoming or outgoing traffic that gets dropped or rejected by the firewall.
- On the other hand, you must know that the firewall configuration you apply to the VM is saved as a `.fw` file in your Proxmox VE host, under the `/etc/pve/firewall` path. So, open a shell as `mgrsys` on your Proxmox VE host and `cd` to that directory.
+ On the other hand, you must know that the firewall configuration you apply to the VM is saved as a `.fw` file in your Proxmox VE host, under the `/etc/pve/firewall` path. Open a shell as `mgrsys` on your Proxmox VE host and `cd` to that directory:
- ~~~bash
+ ~~~sh
$ cd /etc/pve/firewall
~~~
- There, execute an `ls` and see what files are there.
+ There, execute an `ls` and see what files are there:
- ~~~bash
+ ~~~sh
$ ls
- 2101.fw cluster.fw
+ 411.fw cluster.fw
~~~
- The `2101.fw` is the firewall configuration for the `k3sserver01` VM, identified in Proxmox VE with the VM ID `2101`. The `cluster.fw` is the file containing the whole Proxmox VE datacenter firewall configuration. If you open the `2101.fw` file, you'll see the following content.
+ The `411.fw` file contains the firewall configuration for the `k3sserver01` VM, identified in Proxmox VE with the VM ID `411`. The `cluster.fw` is the file containing the whole Proxmox VE datacenter firewall configuration. If you open the `411.fw` file, you'll see the following content:
~~~properties
[OPTIONS]
- log_level_in: info
- log_level_out: info
- ndp: 0
enable: 1
+ ndp: 0
+ log_level_out: info
ipfilter: 1
+ log_level_in: info
[IPSET ipfilter-net0]
- k3sserver01_net0
+ dc/k3sserver01_net0
[RULES]
GROUP k3s_srvrs_net0_in -i net0
~~~
-7. As you've done with your K3s server node, now you have to apply the corresponding security group (`k3s_agnts_net0_in`) and other firewall configuration to the K3s agent node VMs, starting with the `k3sagent01` VM (with ID `3111`). Applying this configuration will generate a `/etc/pve/firewall/3111.fw` file that should look like below.
+7. As you've done with your K3s server node, now you have to apply the corresponding security group (`k3s_agnts_net0_in`) and other firewall configuration to the K3s agent node VMs, starting with the `k3sagent01` VM (with ID `421`). Applying this configuration will generate a `/etc/pve/firewall/421.fw` file that should look like below:
~~~properties
[OPTIONS]
- enable: 1
+ log_level_in: info
log_level_out: info
- ndp: 0
ipfilter: 1
- log_level_in: info
+ ndp: 0
+ enable: 1
[IPSET ipfilter-net0]
- k3sagent01_net0
+ dc/k3sagent01_net0
[RULES]
@@ -514,91 +663,91 @@ The connection of your VMs with the NUT server should be enabled now. Check it o
This firewall setup for `k3sagent01` is almost identical to the one for `k3sserver01`:
- - The lines under `OPTIONS` might show up ordered differently, as happens above, but they are the same as in the `k3sserver01` VM.
+ - The lines under `OPTIONS` might show up ordered differently, as it happens above, but they are the same as in the `k3sserver01` VM.
- - There's only one `IPSET` that applies an IP filter on the VM's `net0` network card.
+ - There is only one `IPSET` that applies an IP filter on the VM's `net0` network card.
- The security group in the `GROUP` rules and alias in the `IPSET` directive correspond to the ones corresponding to the agent nodes.
8. Since the firewall configuration for the second K3s agent node is essentially the same as with the first one, instead of using the web console to set up the firewall for the other VM, open a shell as `mgrsys` on your Proxmox VE host and do the following.
- ~~~bash
+ ~~~sh
$ cd /etc/pve/firewall
- $ sudo cp 3111.fw 3112.fw
+ $ sudo cp 421.fw 422.fw
~~~
- Edit the new `3112.fw` file to replace just the IP alias in the `IPSET` block with the correct one (`k3sagent02_net0`) for the `3112` VM.
+ Edit the new `422.fw` file to replace just the IP alias in the `IPSET` block with the correct one (`k3sagent02_net0`) for the `422` VM:
~~~properties
[OPTIONS]
- enable: 1
+ log_level_in: info
log_level_out: info
- ndp: 0
ipfilter: 1
- log_level_in: info
+ ndp: 0
+ enable: 1
[IPSET ipfilter-net0]
- k3sagent02_net0
+ dc/k3sagent02_net0
[RULES]
GROUP k3s_agnts_net0_in -i net0
~~~
-With the necessary ports open for the right IPs, you can now install the K3s software in your VMs. Also remember that it's not necessary to have the VMs running to configure their firewalls, and that the changes in the firewall configuration are put in effect immediately after the rules are enabled.
+With the necessary ports open for the right IPs, you can now install the K3s software in your VMs. Also remember that it is not necessary to have the VMs running to configure their firewalls, and that the changes in the firewall configuration are put in effect immediately after the rules are enabled.
-#### **Port `6443` left closed on the server nodes' `net0` NIC**
+#### Port `6443` left closed on the server node's `net0` NIC
-I listed the `6443` port as one to be opened in the `net0` network card of your server node. So, why I haven't told you to open it in the firewall already? This port, on the `net0` NIC of your server nodes, is meant to be open only for external clients from which you'll manage remotely your K3s cluster. I'll explain how to set up such a client in the upcoming [**G026** guide](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md), where you'll open the `6443` port on your server node accordingly.
+I listed the `6443` port as one of those to be opened in the `net0` network card of your server node. Why I haven't told you to open it in the firewall already? This port, on the `net0` NIC of your server nodes, is meant to be open only for external clients from which you'll manage remotely your K3s cluster. I'll explain how to set up such a client in the upcoming [chapter **G026**](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md), where you will open the `6443` port on your server node accordingly.
-## Considerations before installing the K3s cluster nodes
+## Considerations before installing the K3s software
-To install the K3s software on any node, the procedure is essentially the same: you have to download and run a small executable and it will do everything to install and start the `k3s` service. The question is that, before you install the K3s software, you must plan in advance how you want to configure your cluster. This is because you'll not only configure the K3s service itself, but also which embedded services it'll (or won't) deploy from the start and how those will run in your K3s cluster. In other words, the initial configuration determines how the K3s cluster and its services are configured, and reconfiguring all of it later can be a more complicated matter.
+To install the K3s software on any node, the procedure is essentially the same: you have to download and run a small executable and it will do everything to install and start the `k3s` service. But, before you install the K3s software, you must plan in advance how you want to configure your cluster. This is because you not only will configure the K3s service itself, but also which embedded services it will (or won't) deploy from the start and how those will run in your K3s cluster. In other words, the initial configuration determines how the K3s cluster and its services are configured, and reconfiguring all of it later can be a more complicated matter.
-On the other hand, the K3s service installer supports two ways of being configured: by using arguments like any other command, or reading its settings from a configuration file. In this guide, I'll show you how to do it with the configuration file.
+On the other hand, the K3s service installer supports two ways of being configured: by using arguments like any other command, or reading its settings from a configuration file. Here I'll show you how to do it with the configuration file.
-### _The K3s installer's configuration file_
+### The K3s installer's configuration file
By default, the K3s installer looks for the `/etc/rancher/k3s/config.yaml` file in the system, although you can specify another path with the `--config` or `-c` argument.
-Another aspect you must be aware of is that the K3s installer arguments **take precedence** over the parameters set in the file. So, careful of using the same argument both in the command and in the `config.yaml` file, **only the command's value will stand**.
+Also know that the **K3s installer arguments take precedence over the parameters set in the file**. Avoid using the same argument both in the command and in the `config.yaml` file, because **only the command's value will stand**.
## K3s Server node setup
-The very first nodes you must install in a Kubernetes cluster are the **master** ones. These are called **servers** in K3s jargon, and you'll setup one for your cluster.
+The very first nodes you must deploy in a Kubernetes cluster are those that run the control plane managing the cluster itself. They are the _server_ nodes in K3s jargon and, in this guide's cluster, there is only one you have to setup.
-### _Folder structure for K3s configuration files_
+### Folder structure for K3s configuration files
-In your K3s cluster server node (`k3sserver01`), execute the following.
+In your K3s server node (`k3sserver01`), execute the following.
-~~~bash
-$ sudo mkdir -p /etc/rancher/k3s/ /etc/rancher/k3s.config.d/
+~~~sh
+$ sudo mkdir -p /etc/rancher/k3s/ /etc/rancher/config.yaml.d/
~~~
-Notice that I've created two folders, the `/etc/rancher/k3s/` expected by K3s by default and the `/etc/rancher/k3s.config.d/`. The idea is to put your own configuration files in the `k3s.config.d` directory and symlink them in the `k3s` one. This is convenient because, if you happen to uninstall the K3s software for some reason, the uninstaller also removes the `/etc/rancher/k3s/` folder completely.
+Notice that I've created two folders, the `/etc/rancher/k3s/` expected by K3s by default and the `/etc/rancher/config.yaml.d/`. The idea is to put your own configuration files in the `config.yaml.d` directory and symlink them in the `k3s` one. This is convenient because, if you happen to uninstall the K3s software for some reason, the uninstaller also removes the `/etc/rancher/k3s/` folder completely.
-### _Enabling graceful shutdown on the server node_
+### Enabling graceful shutdown on the server node
-Since the release `1.21`, the graceful node shutdown capacity is available (as beta for now) in Kubernetes, and you can use it in your K3s cluster. This way, you can protect better your cluster nodes against unexpected failures or just ensuring that, when your Proxmox VE host shuts down, your nodes also shutdown but in a graceful manner.
+[The graceful node shutdown feature is available (as beta for now) and enabled by default in Kubernetes](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/), and you can use it in your K3s cluster. This way, you can protect better your cluster nodes against unexpected failures or just ensuring that, when your Proxmox VE host shuts down, your K3s nodes also shutdown gracefully.
-To configure this functionality in your cluster, you need to setup a configuration file with two specific parameters and, optionally, a system service for cleaning up any Kubernetes pods that could get stuck in shutdown phase.
+To configure this functionality in your cluster, you need to setup a configuration file with two specific parameters and, optionally, a system service for cleaning up any Kubernetes pods that could get stuck in their shutdown phase.
-#### **The `kubelet.config` file**
+#### The `kubelet.conf` file
The configuration file you need to enable the graceful node shutdown can be called anything and be on any path accessible by the K3s service. So, let's use the folder structure created before and create in it the required file with a significant name.
-1. Create a `kubelet.config` file at `/etc/rancher/k3s.config.d`, and then symlink it in the `/etc/rancher/k3s` folder.
+1. Create a `kubelet.conf` file at `/etc/rancher/config.yaml.d`, and then symlink it in the `/etc/rancher/k3s` folder:
- ~~~bash
- $ sudo touch /etc/rancher/k3s.config.d/kubelet.config
- $ sudo ln -s /etc/rancher/k3s.config.d/kubelet.config /etc/rancher/k3s/kubelet.config
+ ~~~sh
+ $ sudo touch /etc/rancher/config.yaml.d/kubelet.conf
+ $ sudo ln -s /etc/rancher/config.yaml.d/kubelet.conf /etc/rancher/k3s/kubelet.conf
~~~
- The file is named `kubelet.config` because it affects the configuration of the kubelet process thar runs on each node of any Kubernetes cluster. Symlinking this file is not really necessary, its just a manner of keeping all the configuration files accesible through the same default K3s folder for coherence.
+ The file is named `kubelet.conf` because it affects the configuration of the kubelet process thar runs on each node of any Kubernetes cluster. Symlinking this file is not really necessary, its just a manner of keeping all the configuration files accesible through the same default K3s folder for coherence.
-2. In the `kubelet.config` file, put the following lines.
+2. In the `kubelet.conf` file, put the following lines:
~~~yaml
# Kubelet configuration
@@ -609,27 +758,33 @@ The configuration file you need to enable the graceful node shutdown can be call
shutdownGracePeriodCriticalPods: 10s
~~~
- A few highlight from the yaml above.
+ The highlights from this YAML are:
- - `apiVersion`: the Kubernetes API used here is the beta one (`v1beta1`).
+ - `apiVersion`\
+ The Kubernetes API used here is the beta one (`v1beta1`).
- - `kind`: indicates the type of Kubernetes object specified in the file. In this case is a `KubeletConfiguration`, meant to be used for configuring the kubelet process that any Kubernetes node has running.
+ - `kind`\
+ Indicates the type of Kubernetes object specified in the file. In this case is a `KubeletConfiguration`, meant to be used for configuring the kubelet process that any Kubernetes node has running.
- - `shutdownGracePeriod`: default value is `0`. Total delay period of the node's shutdown, which gives the regular pods this time MINUS the period specified in the `shutdownGracePeriodCriticalPods` parameter.
+ - `shutdownGracePeriod`\
+ Default value is `0`. Total delay period of the node's shutdown, which gives the regular pods this time MINUS the period specified in the `shutdownGracePeriodCriticalPods` parameter.
- - `shutdownGracePeriodCriticalPods`: default value is `0`. This is the grace period conceded only to pods marked as critical. This value has to be lower than `shutdownGracePeriod`.
+ - `shutdownGracePeriodCriticalPods`\
+ Default value is `0`. This is the grace period conceded only to pods marked as critical. This value has to be lower than the `shutdownGracePeriod`.
- - Both the `shutdownGracePeriod` and the `shutdownGracePeriodCriticalPods` must be set to **non-zero** values to enable the graceful shutdown functionality.
+ > [!IMPORTANT]
+ > **The two `shutdownGracePeriod` parameters must have a non-zero value**\
+ > Both the `shutdownGracePeriod` and the `shutdownGracePeriodCriticalPods` configuration options [must be set to **non-zero** values to enable the graceful shutdown functionality](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#configuring-graceful-node-shutdown).
- - With the values set in the yaml above, the node will have `20` seconds to terminate all regular pods running in it, and `10` to end the critical ones.
+ With the values set in the YAML above, the node will have `20` seconds to terminate all regular pods running in it, and `10` to end the critical ones.
-#### **Cleanup pods script**
+#### Cleanup pods script
-It might be that some pods don't shutdown in time and get stuck with the `Shutdown` status, and could reappear as dead unevicted pods in your cluster after a reboot. To clean them up, you can prepare a system service in your server node that can get rid of them.
+It might be that some pods don't shutdown in time and get stuck with the `Shutdown` status. They could reappear later as dead unevicted pods in your cluster after a reboot. To clean them up, you can prepare a system service in your server node that can get rid of them:
1. Create the `k3s-cleanup.service` file at `/lib/systemd/system/`.
- ~~~bash
+ ~~~sh
$ sudo touch /lib/systemd/system/k3s-cleanup.service
~~~
@@ -655,66 +810,68 @@ It might be that some pods don't shutdown in time and get stuck with the `Shutdo
WantedBy=multi-user.target
~~~
- Notice how, in the `ExecStart` parameter, the service invokes a `kubectl` command. You don't have it installed in the server node, so you can't enable this service yet until you install the K3s software in the node.
+ Notice how, in the `ExecStart` parameter, the service invokes a `kubectl` command. Since it is not available in your server node yet, you cannot enable this service until you install the K3s software package in the VM.
-### _The `k3sserver01` node's `config.yaml` file_
+### The `k3sserver01` node's `config.yaml` file
-As I've already told you before, the `/etc/rancher/k3s/config.yaml` file is the one the K3s installer will try to read by default.
+As I've already told you before, the `/etc/rancher/k3s/config.yaml` file is the one the K3s installer will try to read by default:
-1. Create the `config.yaml` file as follows.
+1. Create the `config.yaml` file as follows:
- ~~~bash
- $ sudo touch /etc/rancher/k3s.config.d/config.yaml
- $ sudo ln -s /etc/rancher/k3s.config.d/config.yaml /etc/rancher/k3s/config.yaml
+ ~~~sh
+ $ sudo touch /etc/rancher/config.yaml.d/config.yaml
+ $ sudo ln -s /etc/rancher/config.yaml.d/config.yaml /etc/rancher/k3s/config.yaml
~~~
Notice how the `config.yaml` file is symlinked into the `/etc/rancher/k3s` folder, so the K3s installer can find it.
2. You'll also need some basic networking information from the VM itself, which you can get with the `ip` command.
- ~~~bash
+ ~~~sh
$ ip a
- 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
- 2: ens18: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether f6:26:b3:ce:76:a6 brd ff:ff:ff:ff:ff:ff
- altname enp0s18
- inet 192.168.1.21/24 brd 192.168.1.255 scope global dynamic ens18
- valid_lft 73731sec preferred_lft 73731sec
- 3: ens19: mtu 1500 qdisc fq state UP group default qlen 1000
- link/ether 5a:b0:5f:41:3c:48 brd ff:ff:ff:ff:ff:ff
- altname enp0s19
- inet 10.0.0.1/24 brd 10.0.0.255 scope global ens19
- valid_lft forever preferred_lft forever
+ 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ 2: ens18: mtu 1500 qdisc fq state UP group default qlen 1000
+ link/ether bc:24:11:81:81:59 brd ff:ff:ff:ff:ff:ff
+ altname enp0s18
+ altname enxbc2411818159
+ inet 10.4.1.1/8 brd 10.255.255.255 scope global dynamic noprefixroute ens18
+ valid_lft 79472sec preferred_lft 68672sec
+ 3: ens19: mtu 1500 qdisc fq state UP group default qlen 1000
+ link/ether bc:24:11:54:d4:fa brd ff:ff:ff:ff:ff:ff
+ altname enp0s19
+ altname enxbc241154d4fa
+ inet 172.16.1.1/12 brd 172.31.255.255 scope global ens19
+ valid_lft forever preferred_lft forever
~~~
The values you must have at hand are the IP for the first interface (`ens18` in the output above), and the IP and name of the second interface (`ens19`).
-3. Next, edit the `/etc/rancher/k3s/config.yaml` file by adding the following content.
+3. Next, edit the `/etc/rancher/k3s/config.yaml` file by adding the following content:
~~~yaml
# k3sserver01
- cluster-domain: "deimos.cluster.io"
+ cluster-domain: "homelab.cluster"
tls-san:
- - "k3sserver01.deimos.cloud"
+ - "k3sserver01.homelab.cloud"
+ - "10.4.1.1"
flannel-backend: host-gw
flannel-iface: "ens19"
bind-address: "0.0.0.0"
https-listen-port: 6443
- advertise-address: "10.0.0.1"
+ advertise-address: "172.16.1.1"
advertise-port: 6443
- node-ip: "10.0.0.1"
- node-external-ip: "192.168.1.21"
+ node-ip: "172.16.1.1"
+ node-external-ip: "10.4.1.1"
node-taint:
- - "k3s-controlplane=true:NoExecute"
- log: "/var/log/k3s.log"
- kubelet-arg: "config=/etc/rancher/k3s/kubelet.config"
+ - "node-role.kubernetes.io/control-plane=true:NoSchedule"
+ kubelet-arg: "config=/etc/rancher/k3s/kubelet.conf"
disable:
- - metrics-server
- - servicelb
+ - metrics-server
+ - servicelb
protect-kernel-defaults: true
secrets-encryption: true
agent-token: "SomeReallyLongPassword"
@@ -722,84 +879,123 @@ As I've already told you before, the `/etc/rancher/k3s/config.yaml` file is the
The parameters from the `config.yaml` file above are explained next.
- - `cluster-domain`: specify the base domain name used **internally** in your cluster for assigning DNS records to pods and services. By default is `cluster.local`. If you change it, make it different to the main domain name you may want to use for accessing **externally** the services you'll deploy in your cluster later.
+ - `cluster-domain`\
+ Specify the base domain name used internally in your cluster for assigning DNS records to pods and services. By default is `cluster.local`. If you change it, make it different to the main domain name you want to use for accessing externally the services you'll deploy in your cluster later.
+
+ - `tls-san`\
+ Additional hostnames or IPs that will be applied as Subject Alternative Names in the self-generated TLS certs of the K3s service, meaning not the ones specified in the `bind-address`, `advertise-address`, `node-ip` or `node-external-ip` parameters.
+
+ Put here the VM's full hostname and also the external IP of this server node to ensure that both values get included as Subject Alternative Names in its autogenerated TLS certificate. Otherwise, you will not be able to connect to your K3s cluster remotely with the official Kubernetes client using either the external IP or the full server node's hostname. Both will be rejected as not being "recognized" by the TLS certificate.
+
+ > [!NOTE]
+ > How to configure and use the Kubernetes client is explained [in the next chapter **G026**](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md).
+
+ - `flannel-backend`\
+ [Flannel](https://github.com/coreos/flannel) is a plugin for handling the cluster's internal networking. Flannel supports four different network backend methods, being `vxlan` the default one. The `host-gw` backend set in the `config.yaml` above has better performance than `vxlan` for this guide's particular setup, something you really want to have in your rather hardware-constrained K3s cluster.
- - `tls-san`: additional hostnames or IPs that will be applied in the self-generated TLS certs of the K3s service, meaning not the ones specified in the `bind-address`, `advertise-address`, `node-ip` or `node-external-ip` parameters. For instance, you could put here the VM's hostname. Also notice that, in this scenario, the `tls-san` value is identical to the one set in the `/etc/hosts` file, previously specified in the [Customizing the hostname](#customizing-the-hostname) subsection.
+ > [!NOTE]
+ > **Know more about the Flannel backend options**\
+ > [In this related K3s documentation](https://docs.k3s.io/networking/basic-network-options#flannel-options) and [also here](https://stackoverflow.com/questions/45293321/why-host-gw-of-flannel-requires-direct-layer2-connectivity-between-hosts).
- - `flannel-backend`: [Flannel](https://github.com/coreos/flannel) is a plugin for handling the cluster's internal networking. Flannel supports four different network backend methods, being `vxlan` the default one. The `host-gw` backend set in the `config.yaml` above has better performance than `vxlan`, something you really want to have in your rather hardware-constrained K3s cluster. Know more about the Flannel backend options [in this related documentation](https://www.containerlabs.kubedaily.com/rancher/Networking/Networking-with-Flannel.html) and [also here](https://stackoverflow.com/questions/45293321/why-host-gw-of-flannel-requires-direct-layer2-connectivity-between-hosts).
- > **BEWARE!**
- > The backend you choose for your cluster will affect its performance, sometimes in a big way. For instance, the default `vxlan` proved to be an awful backend for my setup, making services respond not just slowly but erratically even. The solution was to use `host-gw`, which offered a very noticeable good performance. Therefore, be aware that you might need to run some tests to choose the right backend which suits your requirements and your cluster setup.
+ > [!IMPORTANT]
+ > **The backend you choose for your cluster will affect its performance, sometimes in a big way**\
+ > For instance, the default `vxlan` proved to be an awful backend for my setup, making services respond not just slowly but erratically even. The solution was to use `host-gw`, which offered a very noticeable good performance. Therefore, be aware that you might need to run some tests to choose the right backend which suits your requirements and your cluster setup.
- - `flannel-iface`: with this parameter you can specify through which interface you want Flannel to run. In this case, I'm making it use the bridged network.
+ - `flannel-iface`\
+ Specifies through which interface you want Flannel to run. In this case, I'm making it use the internal bridged network (enabled through the `vmbr1` bridge in Proxmox VE) not exposed to the LAN.
- - `bind-address`: the address on which the K3s api service will listen. By default is already `0.0.0.0` (listening on all interfaces), but I think it's convenient to make it explicit for clarity.
+ - `bind-address`\
+ The address on which the K3s API service will listen. By default is already `0.0.0.0` (listening on all interfaces), but I find convenient making this and other default values explicit for clarity.
- - `https-listen-port`: the port through which the K3s api service will listen. By default is `6443`, change it if needed (but don't forget to update your firewall rules accordingly!).
+ - `https-listen-port`\
+ The port through which the K3s API service will listen. By default is `6443`, change it if needed (but do not forget to update your firewall rules accordingly!).
- - `advertise-address`: the K3s api server will advertise through this IP to the other nodes in the cluster. See how I've used the IP meant for the bridged network.
+ - `advertise-address`\
+ The K3s API server will advertise through this IP to the other nodes in the cluster. See how I've used the IP meant for the internal bridged network.
- - `advertise-port`: this is the port used only to advertise the api to the other nodes in the cluster. By default takes the value set in the `https-listen-port` parameter.
+ - `advertise-port`\
+ Port used only to advertise the API to the other nodes in the cluster. By default takes the value set in the `https-listen-port` parameter.
- - `node-ip`: the internal IP the node uses to advertise itself in the cluster. Again, here I'm using the IP for the bridged network.
+ - `node-ip`\
+ The internal IP the node uses to advertise itself in the cluster. Again, here I'm using the IP for the bridged network.
- - `node-external-ip`: the public or external IP where the node also advertises itself and through which it offers its services.
+ - `node-external-ip`\
+ The public or external IP where the node also advertises itself and through which it offers its services.
- - `node-taint`: a taint is a way used in Kubernetes to mark nodes and other objects with certain characteristics. In this case, with the `k3s-controlplane=true:NoExecute` taint applied, this node will only do control-plane work. Therefore, it won't run any normal workloads, not even deployments for embedded services like Traefik.
+ - `node-taint`\
+ A taint is a way used in Kubernetes to mark nodes and other objects with certain characteristics. In this case, with the `node-role.kubernetes.io/control-plane=true:NoSchedule` taint applied, this node will only perform control-plane tasks. Therefore, the `k3sserver01` node will not run any user workload, not even deployments for embedded services like Traefik.
- - `log`: the K3s service logs by default in the `/var/log/daemon.log` and the `/var/log/syslog` files. Putting here another file path makes the K3s service to write its log lines there, which helps uncluttering your VM's other log files.
+ - `kubelet-arg`\
+ Allows you to specify parameters to the kubelet process that runs in this node.
- - `kubelet-arg`: allows you to specify parameters to the kubelet process that runs in this node.
- - `config`: path to a configuration file with parameters overwriting the kubelet defaults. In this case, the file is the one you've configured before to enable the node's graceful shutdown.
+ - `config`\
+ Path to a configuration file with parameters overwriting the kubelet defaults. In this case, the file is the one you've configured before to enable the node's graceful shutdown.
- - `disable`: for specifying which embedded components are not to be deployed in the cluster. In this case, you can see that I've disabled two embedded services.
- - `metrics-server`: service for monitoring resources usage. It will be deployed as a regular service rather than an embedded one for adjusting its configuration to the particularities of the network setup used in this guide for the K3s cluster.
- - `servicelb`: this is the default load balancer. In the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md) I'll show you how to deploy a better alternative.
+ - `disable`\
+ For specifying which embedded components are not to be deployed in the cluster. In this case, you can see that I've disabled two embedded services:
- - `protect-kernel-defaults`: When set, the K3s service will return an error if certain kernel sysctl parameters are different than the defaults required by the kubelet service. You already set those parameters properly in the previous [**G024** guide](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-sysctl-kernel-parameters-for-k3s-nodes).
+ - `metrics-server`\
+ Service for monitoring resources usage. It will be deployed as a regular service rather than an embedded one for adjusting its configuration to the particularities of the network setup used in this guide for the K3s cluster.
- - `secrets-encryption`: _experimental argument_. When the node is at rest, it will encrypt any secrets resources created in the cluster using a self-generated AES-CBC encrypted key. This option has to be set when the K3s cluster is created initially, not in a later reconfiguration, or you'll have authentication problems later with your cluster nodes. To know more about this feature, [read here about it](https://rancher.com/docs/k3s/latest/en/advanced/#secrets-encryption-config-experimental).
+ - `servicelb`\
+ This is the default load balancer. In the [chapter **G027**](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md) you will learn how to deploy a better alternative.
- - `agent-token`: _experimental argument_. Alternative shared password to be used only by agents to join the cluster.
+ - `protect-kernel-defaults`\
+ When set, the K3s service will return an error if certain Linux kernel `sysctl` parameters are different than the defaults required by the kubelet service. You already set those parameters properly in the previous [chapter **G024**](G024%20-%20K3s%20cluster%20setup%2007%20~%20K3s%20node%20VM%20template%20setup.md#setting-up-sysctl-kernel-parameters-for-k3s-nodes).
- > **BEWARE!**
- This password can be any string, but **all** the server nodes in a multiserver cluster **must** have configured the **same** value.
+ - `secrets-encryption`\
+ When the node is at rest, it will encrypt any secrets resources created in the cluster using a self-generated AES-CBC encrypted key. This option has to be set when the K3s cluster is created initially, not in a later reconfiguration. Otherwise, you will have authentication problems later with your cluster nodes. To know more about this feature, [read here about it](https://docs.k3s.io/security/secrets-encryption).
- Mind that this value is just the password part of what shall become the full agent token. The complete pattern of an agent token is as follows.
+ - `agent-token`\
+ Alternative shared password to be used only by agents to join the cluster.
- ~~~bash
- K10::node:
- ~~~
+ > [!IMPORTANT]
+ > **The `agent-token` value is a shared key among all server nodes of the cluster**\
+ > This password can be any string, but **all the server nodes of a cluster must have configured the same value**.
- The cluster CA certificate is the `/var/lib/rancher/k3s/server/tls/server-ca.crt` file of the server node (or the **first server** in a multiserver cluster). The sha256 sum is already calculated in the first portion of the server token saved in the `/var/lib/rancher/k3s/server/token` file. Alternatively, you can calculate the sha256 sum yourself with the command `sudo sha256sum /var/lib/rancher/k3s/server/tls/server-ca.crt`.
+ Mind that this value is **just the password part** of what shall become the full agent token. The complete pattern of an agent token is as follows.
- The `node` is the username used by default for agents, whereas for server nodes the username is `server` (you can see this in the server token string saved in the file `/var/lib/rancher/k3s/server/token`).
+ ~~~sh
+ K10::node:
+ ~~~
-### _Installation of your K3s server node_
+ The cluster CA certificate is the `/var/lib/rancher/k3s/server/tls/server-ca.crt` file of the server node (or the first server in a multiserver cluster). The sha256 sum is already calculated in the first portion of the server token saved in the `/var/lib/rancher/k3s/server/token` file. Alternatively, you can calculate the sha256 sum yourself with the command `sudo sha256sum /var/lib/rancher/k3s/server/tls/server-ca.crt`.
-With the `config.yaml` and other files ready, you can launch the installation of K3s in your VM.
+ The `node` string is the username used by default for agents, whereas for server nodes the username is `server` (you can see this in the server token string saved in the file `/var/lib/rancher/k3s/server/token`).
-#### **K3s installation command**
+### Installation of your K3s server node
+
+With the `config.yaml` and the other configuration files ready, you can launch the installation of K3s in your VM.
+
+#### K3s installation command
The command to execute in the server node is the following.
-~~~bash
-$ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.22.3+k3s1" sh -s - server
+~~~sh
+$ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.33.4+k3s1" sh -s - server
~~~
The command will find the `config.yaml` file in the default path on the VM, and apply it in the installation. Also noticed the following details in the command above.
-- `wget`: this is the command that downloads the K3s installer. Instead of saving the executable into a file, `wget` dumps it into the shell output (option `-O` followed by `-`) to be consumed by the following pipe (`|`) command.
+- `wget`\
+ Command for downloading the K3s installer. Instead of saving the executable into a file, `wget` dumps it into the shell output (option `-O` followed by `-`) to be consumed by the following pipe (`|`) command.
+
+ > [!NOTE]
+ > **The official K3s installation method uses `curl` for downloading the installer**\
+ > Debian does not come with the `curl` command included by default, but `wget`.
-- `INSTALL_K3S_VERSION`: with this environment variable you can control what version of K3s you're installing on your system. It's optional and, when omitted altogether, the K3s installer will download and install the lastest release of K3s. To know which releases are available, check the [Releases page of K3s on GitHub](https://github.com/k3s-io/k3s/releases).
+- `INSTALL_K3S_VERSION`\
+ [Environment variable](https://docs.k3s.io/reference/env-variables) for controlling what version of K3s you're installing on your system. It is optional and, when omitted altogether, the K3s installer will download and install the lastest stable release of K3s. To know which releases are available, check the [Releases page of K3s on GitHub](https://github.com/k3s-io/k3s/releases).
-- `server`: makes the node run as a server in the cluster.
+- `server`\
+ Option to make the node run as a server in the cluster.
-#### **K3s installation of the server node `k3sserver01`**
+#### K3s installation of the server node `k3sserver01`
-1. Execute the installation command on the `k3sserver01` VM.
+1. Execute the installation command on the `k3sserver01` VM:
- ~~~bash
- $ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.22.3+k3s1" sh -s - server
+ ~~~sh
+ $ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.33.4+k3s1" sh -s - server
[INFO] Using v1.22.3+k3s1 as release
[INFO] Downloading hash https://github.com/k3s-io/k3s/releases/download/v1.22.3+k3s1/sha256sum-amd64.txt
[INFO] Downloading binary https://github.com/k3s-io/k3s/releases/download/v1.22.3+k3s1/k3s
@@ -818,72 +1014,77 @@ The command will find the `config.yaml` file in the default path on the VM, and
[INFO] systemd: Starting k3s
~~~
-2. The installer won't take too long to do its job. Still, give some time to the `k3s.service`, which will start at the end of the installation, to setup itself. Open another shell session to this VM and check with the `kubectl` command the current status of this new K3s server node.
+2. The installer does not take too long to do its job. Still, give some time to the `k3s.service`, which will start at the end of the installation, to setup itself. Open another shell session to this VM and check with the `kubectl` command the current status of this new K3s server node:
- ~~~bash
+ ~~~sh
$ sudo kubectl get nodes
NAME STATUS ROLES AGE VERSION
- k3sserver01 NotReady control-plane,master 14s v1.22.3+k3s1
+ k3sserver01 NotReady control-plane,master 14s v1.33.4+k3s1
~~~
- > **BEWARE!**
- > The AGE column means how **old** is the node since it was created, **not** how long it has been running in the current session.
+ > [!IMPORTANT]
+ > **The AGE column means how old the node is since it was created**\
+ > It is NOT about how long the node has been running in the current session.
- At first, it will probably show the `NotReady` status for a moment. This depends on the capabilities given to your VM, so wait a bit and then execute again the `kubectl` command. But this time, lets use `kubectl` with a couple of extra options to get more information about the cluster.
+ At first, it will probably show the `NotReady` status for a moment. This depends on the capabilities given to your VM, so wait a bit and then execute again the `kubectl` command. But this time, lets use `kubectl` with a couple of extra options to get more information about the cluster:
- ~~~bash
+ ~~~sh
$ sudo kubectl get nodes -o wide
- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
- k3sserver01 Ready control-plane,master 2m20s v1.22.3+k3s1 10.0.0.1 192.168.1.21 Debian GNU/Linux 11 (bullseye) 5.10.0-9-amd64 containerd://1.5.7-k3s2
+ NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ k3sserver01 Ready control-plane,master 2m6s v1.33.4+k3s1 172.16.1.1 10.4.1.1 Debian GNU/Linux 13 (trixie) 6.12.41+deb13-amd64 containerd://2.0.5-k3s2
~~~
- See how now the `k3sserver01` node is `Ready` now. Also notice the roles it has: is not only a `master` of the cluster, but also has a `control-plane` role. This means that it will only worry about managing the cluster, not running workloads, unless the workloads are compatible with the `k3s-controlplane=true:NoExecute` taint given before to this server in the installation. On the other hand, you can verify that the IPs are assigned as expected, the one of the first network interface is the `EXTERNAL-IP` and the one of the second NIC is the `INTERNAL-IP`. Moreover, now you can see other information about the container runtime that is running in the cluster.
+ See how now the `k3sserver01` node is `Ready` now. Also notice which roles has assigned: is not only a `master` of the cluster, but also has a `control-plane` role. This K3s node will only worry about managing the cluster, not running workloads, unless the workloads are compatible with the `node-role.kubernetes.io/control-plane=true:NoSchedule` taint given before to this server node in its installation.
-3. If at this point the installer hasn't returned the control to the prompt yet, just get ouf of it by pressing `Ctrl+C`.
+ On the other hand, you can verify that the IPs are assigned as expected, the one of the first network interface is the `EXTERNAL-IP` and the one of the second NIC is the `INTERNAL-IP`. Moreover, now you can see other information about the container runtime that is running in the cluster.
-#### **Enabling the `k3s-cleanup` service**
+3. If at this point the installer hasn't returned the control to your shell prompt yet, just get ouf of it by pressing `Ctrl+C`.
-With the K3s software installed in your server node, you can enable the `k3s-cleanup` service you prepared before for cleaning up automatically any pods that get stuck with a shutdown status after some reboot.
+#### Enabling the `k3s-cleanup` service
-~~~bash
+With the K3s software installed in your server node, you can enable the `k3s-cleanup` service you prepared before for automatically cleaning up any pods that get stuck with a shutdown status after a reboot:
+
+~~~sh
$ sudo systemctl enable k3s-cleanup.service
$ sudo systemctl start k3s-cleanup.service
~~~
-Also, check it's status.
+Also, check it's current status:
-~~~bash
+~~~sh
$ sudo systemctl status k3s-cleanup.service
● k3s-cleanup.service - k3s-cleanup
- Loaded: loaded (/lib/systemd/system/k3s-cleanup.service; enabled; vendor preset: enabled)
- Active: active (exited) since Sat 2021-11-27 21:11:03 CET; 2s ago
- Process: 4803 ExecStart=kubectl delete pods --field-selector status.phase=Failed -A --ignore-not-found=true (code=exited, status=0/SUCCESS)
- Main PID: 4803 (code=exited, status=0/SUCCESS)
- CPU: 1.135s
-
-Nov 27 21:11:03 k3sserver01 systemd[1]: Starting k3s-cleanup...
-Nov 27 21:11:03 k3sserver01 kubectl[4803]: No resources found
-Nov 27 21:11:03 k3sserver01 systemd[1]: Finished k3s-cleanup.
+ Loaded: loaded (/usr/lib/systemd/system/k3s-cleanup.service; enabled; preset: enabled)
+ Active: active (exited) since Wed 2025-09-10 12:51:50 CEST; 5s ago
+ Invocation: 8495690ffa7747488b97ef03338a58a3
+ Process: 5652 ExecStart=kubectl delete pods --field-selector status.phase=Failed -A --ignore-not-found=true (code=exited, status=0/SUCCESS)
+ Main PID: 5652 (code=exited, status=0/SUCCESS)
+ Mem peak: 22M
+ CPU: 510ms
+
+Sep 10 12:51:49 k3sserver01 systemd[1]: Starting k3s-cleanup.service - k3s-cleanup...
+Sep 10 12:51:50 k3sserver01 kubectl[5652]: No resources found
+Sep 10 12:51:50 k3sserver01 systemd[1]: Finished k3s-cleanup.service - k3s-cleanup.
~~~
-In the command's output, you'll notice that in the last lines you can see the output of the `kubectl` command this service invokes for cleaning up pods. This time there was nothing to clean up, which is expected since you don't have anything running yet in your still incomplete cluster.
+In this service status output, you can see in its last lines the logs of the `kubectl` command this service invokes for cleaning up pods. This time there was nothing to clean up, which is expected since you don't have anything running yet in your still incomplete cluster.
-This service **can only run on server nodes**, because those are the ones which can fully run the `kubectl` command. I'll explain a bit more about this detail later in this guide.
+This service **can only run on server nodes**, because those are the ones which can fully run the `kubectl` command. I'll explain a bit more about this detail later in this chapter.
## K3s Agent nodes setup
-The procedure to setup your two remaining VMs as K3s agent nodes is mostly the same as with the server nodes. The few things that change are the parameters specified in the `config.yaml` files and an argument in the installer command.
+The procedure to setup your two remaining VMs as K3s agent nodes is mostly the same as with the server node. The few things that change are the parameters specified in the `config.yaml` files and an argument in the installer command:
-1. Create the `config.yaml` file for the installer.
+1. Create the `config.yaml` file for the installer:
- ~~~bash
- $ sudo mkdir -p /etc/rancher/k3s.config.d /etc/rancher/k3s
- $ sudo touch /etc/rancher/k3s.config.d/config.yaml /etc/rancher/k3s.config.d/kubelet.config
- $ sudo ln -s /etc/rancher/k3s.config.d/config.yaml /etc/rancher/k3s/config.yaml
- $ sudo ln -s /etc/rancher/k3s.config.d/kubelet.config /etc/rancher/k3s/kubelet.config
+ ~~~sh
+ $ sudo mkdir -p /etc/rancher/config.yaml.d /etc/rancher/k3s
+ $ sudo touch /etc/rancher/config.yaml.d/config.yaml /etc/rancher/config.yaml.d/kubelet.conf
+ $ sudo ln -s /etc/rancher/config.yaml.d/config.yaml /etc/rancher/k3s/config.yaml
+ $ sudo ln -s /etc/rancher/config.yaml.d/kubelet.conf /etc/rancher/k3s/kubelet.conf
~~~
-2. Put in the `kubelet.config` the same content as in the server node.
+2. Put in the `kubelet.conf` the same content as in the server node:
~~~yaml
# Kubelet configuration
@@ -894,35 +1095,33 @@ The procedure to setup your two remaining VMs as K3s agent nodes is mostly the s
shutdownGracePeriodCriticalPods: 10s
~~~
-3. Edit the `config.yaml` files, being aware of the particular values required on each agent node.
+3. Edit the `config.yaml` files, being aware of the particular values required on each agent node:
- Content for the `k3sagent01` node.
+ Configuration for the `k3sagent01` node:
~~~properties
# k3sagent01
flannel-iface: "ens19"
- node-ip: "10.0.0.11"
- node-external-ip: "192.168.1.31"
- server: "https://10.0.0.1:6443"
+ node-ip: "172.16.2.1"
+ node-external-ip: "10.4.2.1"
+ server: "https://172.16.1.1:6443"
token: "K10::node:"
- log: "/var/log/k3s.log"
- kubelet-arg: "config=/etc/rancher/k3s/kubelet.config"
+ kubelet-arg: "config=/etc/rancher/k3s/kubelet.conf"
protect-kernel-defaults: true
~~~
- Content for the `k3sagent02` node.
+ Configuration for the `k3sagent02` node:
~~~properties
# k3sagent02
flannel-iface: "ens19"
- node-ip: "10.0.0.12"
- node-external-ip: "192.168.1.32"
- server: "https://10.0.0.1:6443"
+ node-ip: "172.16.2.2"
+ node-external-ip: "10.4.2.2"
+ server: "https://172.16.1.1:6443"
token: "K10::node:"
- log: "/var/log/k3s.log"
- kubelet-arg: "config=/etc/rancher/k3s/kubelet.config"
+ kubelet-arg: "config=/etc/rancher/k3s/kubelet.conf"
protect-kernel-defaults: true
~~~
@@ -934,142 +1133,150 @@ The procedure to setup your two remaining VMs as K3s agent nodes is mostly the s
- The password is the one defined as `agent-token` in the server node.
-4. With the `config.yaml` and `kubelet.config` files ready, you can launch the K3s installer on **both** your `k3sagentXX` VMs.
+4. With the `config.yaml` and `kubelet.conf` files ready, you can launch the K3s installer on **both** your `k3sagentXX` VMs:
- ~~~bash
- $ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.22.3+k3s1" sh -s - agent
+ ~~~sh
+ $ wget -qO - https://get.k3s.io | INSTALL_K3S_VERSION="v1.33.4+k3s1" sh -s - agent
~~~
Notice that at the end of the command there's an `agent` parameter, indicating that the installer will setup and launch a `k3s-agent.service` which only runs agent worloads.
-5. On the server node, run a `watch sudo kubectl get nodes` and monitor how the agents join your cluster.
+5. On the server node, execute `watch sudo kubectl get nodes -o wide` to monitor the joining of your new agent nodes in your K3s cluster:
- ~~~bash
- Every 2.0s: sudo kubectl get nodes -o wide k3sserver01: Sun Nov 28 14:12:33 2021
+ ~~~sh
+ Every 2.0s: sudo kubectl get nodes -o wide k3sserver01: Wed Sep 10 13:11:50 2025
- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
- k3sserver01 Ready control-plane,master 110m v1.22.3+k3s1 10.0.0.1 192.168.1.21 Debian GNU/Linux 11 (bullseye) 5.10.0-9-amd64 containerd://1.5.7-k3s2
- k3sagent01 Ready 24s v1.22.3+k3s1 10.0.0.11 192.168.1.31 Debian GNU/Linux 11 (bullseye) 5.10.0-9-amd64 containerd://1.5.7-k3s2
- k3sagent02 Ready 21s v1.22.3+k3s1 10.0.0.12 192.168.1.32 Debian GNU/Linux 11 (bullseye) 5.10.0-9-amd64 containerd://1.5.7-k3s2
+ NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ k3sagent01 Ready 50s v1.33.4+k3s1 172.16.2.1 10.4.2.1 Debian GNU/Linux 13 (trixie) 6.12.41+deb13-amd64 containerd://2.0.5-k3s2
+ k3sagent02 Ready 51s v1.33.4+k3s1 172.16.2.2 10.4.2.2 Debian GNU/Linux 13 (trixie) 6.12.41+deb13-amd64 containerd://2.0.5-k3s2
+ k3sserver01 Ready control-plane,master 25m v1.33.4+k3s1 172.16.1.1 10.4.1.1 Debian GNU/Linux 13 (trixie) 6.12.41+deb13-amd64 containerd://2.0.5-k3s2
~~~
- > **BEWARE!**
+ > [!NOTE]
> The new nodes will need some time to appear in the list and reach the `Ready` status.
The `watch` command executes a command every two seconds by default, and constantly displays its newest output. To get out of this command, use `Ctrl+C`.
-6. If the K3s installer on any of your agent nodes doesn't return control to the prompt after that node has reached the `Ready` state, just `Ctrl+C` out of it.
+6. If the K3s installer on any of your agent nodes does not return control to the prompt after that node has reached the `Ready` state, just `Ctrl+C` out of it.
With the agent nodes running, your K3s Kubernetes cluster is completed and will deploy immediately on the agents all the embedded services that is allowed to run initially.
## Understanding your cluster through `kubectl`
-Congratulations, your K3s cluster is up and running! Now you'd like to know how to get all the information possible from your cluster, right? In your current setup, the command for managing the cluster and getting all the information is `kubectl`.
+Congratulations, your K3s cluster is up and running! Now you'd like to know how to get all the information possible from your cluster, right? In your current setup, the command for managing the cluster and getting all the information is `kubectl`:
+
+- `kubectl help`\
+ Lists the commands supported by `kubectl`.
-- `kubectl help`: lists the commands supported by `kubectl`.
-- `kubectl --help`: shows the help relative to a concrete command, for instance `kubectl get --help` will return extensive information about the `get` command.
-- `kubectl options`: lists all the options that can be passed to ANY `kubectl` command.
-- `kubectl api-resources`: lists the resources supported in the K3s cluster, like nodes or pods.
+- `kubectl --help`\
+ Shows the help relative to a concrete command, for instance `kubectl get --help` will return extensive information about the `get` command.
-> **NOTE**
-> Since the help texts can be lenghty, use a `| less` after the commands listed above to have a paginated output, rather than having the text directly dumped in the shell.
+- `kubectl options`\
+ Lists all the options that can be passed to ANY `kubectl` command.
-In particular, the main command for retrieving information about what's going on in your cluster is `kubectl get`. You've already used it to see the nodes, but you can also see other resources like pods, services and many other. Next I list a few examples to give you an idea of this command's usage.
+- `kubectl api-resources`\
+ Lists the resources supported in the K3s cluster, like nodes or pods.
-- `kubectl get pods -Ao wide`: information about the pods running in your cluster. Notice here that only agent nodes are running the pods, since the servers are tainted not to do so.
+> [!NOTE]
+> **Use `less` to paginate long outputs**\
+> Since the help texts can be lenghty, use a `| less` after any of the commands listed above to get a paginated output, rather than having the text directly dumped in your shell.
- ~~~bash
+In particular, the main command for retrieving information about what's going on in your cluster is `kubectl get`. You've already used it to see the nodes, but you can also see other resources like pods, services and many other. Next, I list a few examples to give you an idea of this command's usage:
+
+- `kubectl get pods -Ao wide`\
+ Information about the pods running in your cluster. Notice here that only agent nodes are running the pods, since the servers are tainted not to do so:
+
+ ~~~sh
$ sudo kubectl get pods -Ao wide
- NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- kube-system coredns-85cb69466-9l6ws 1/1 Running 0 111m 10.42.1.5 k3sagent01
- kube-system local-path-provisioner-64ffb68fd-zxm2v 1/1 Running 0 111m 10.42.1.2 k3sagent01
- kube-system helm-install-traefik-crd--1-bjv95 0/1 Completed 0 111m 10.42.1.3 k3sagent01
- kube-system helm-install-traefik--1-zb5gb 0/1 Completed 1 111m 10.42.1.4 k3sagent01
- kube-system traefik-74dd4975f9-tdv42 0/1 Running 0 34s 10.42.2.2 k3sagent02
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ kube-system coredns-64fd4b4794-klv8f 1/1 Running 0 39m 10.42.0.3 k3sserver01
+ kube-system helm-install-traefik-crd-qg8c4 0/1 Completed 0 39m 10.42.2.3 k3sagent01
+ kube-system helm-install-traefik-pqljk 0/1 Completed 2 39m 10.42.2.2 k3sagent01
+ kube-system local-path-provisioner-774c6665dc-gwmhv 1/1 Running 0 39m 10.42.0.2 k3sserver01
+ kube-system traefik-c98fdf6fb-zmkcn 1/1 Running 0 15m 10.42.1.2 k3sagent02
~~~
See that each pod has its own IP, and that it has nothing to do with the one defined as internal IP for the cluster.
-- `kubectl get services -Ao wide`: information about the services running in your cluster. Be aware that a service could be running in several pods at the same time.
+- `kubectl get services -Ao wide`\
+ Information about the services running in your cluster. Be aware that a service could be running in several pods at the same time:
- ~~~bash
+ ~~~sh
$ sudo kubectl get services -Ao wide
- NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- default kubernetes ClusterIP 10.43.0.1 443/TCP 112m
- kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 112m k8s-app=kube-dns
- kube-system traefik LoadBalancer 10.43.110.37 80:30963/TCP,443:32446/TCP 59s app.kubernetes.io/instance=traefik,app.kubernetes.io/name=traefik
+ NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+ default kubernetes ClusterIP 10.43.0.1 443/TCP 41m
+ kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 41m k8s-app=kube-dns
+ kube-system traefik LoadBalancer 10.43.174.63 80:30512/TCP,443:32647/TCP 16m app.kubernetes.io/instance=traefik-kube-system,app.kubernetes.io/name=traefik
~~~
See here that each service has its own cluster IP, different from the internal IP (configured in the installation process) and the pods IPs.
-- `kubectl get pv -Ao wide`: lists the persistent volumes active in the cluster. See that instead of using the full `persistentvolumes` resource name, I've typed the shortname `pv`. Not all resources have a shortname, something you should check in the list returned by `kubectl api-resources`.
+- `kubectl get pv -Ao wide`\
+ Lists the persistent volumes active in the cluster. See that instead of using the full `persistentvolumes` resource name, I've typed the shortname `pv`. Not all resources have a shortname, something you should check in the list returned by `kubectl api-resources`.
- ~~~bash
+ ~~~sh
$ sudo kubectl get pv -Ao wide
No resources found
~~~
- At this point, you won't have any persistent volume active in your cluster.
+ At this point, you don't have any persistent volume active in your cluster.
-- `kubectl get pvc -Ao wide`: returns the persitent volume clamins (`pvc` is shorthand for `persistentvolumeclaims`) active in the cluster.
+- `kubectl get pvc -Ao wide`\
+ Returns the persitent volume clamins (`pvc` is shorthand for `persistentvolumeclaims`) active in the cluster.
- ~~~bash
+ ~~~sh
$ sudo kubectl get pvc -Ao wide
No resources found
~~~
- Like it happens with the persistent volumes, there won't be any persistent volume claims active in your cluster at this point.
+ Like the persistent volumes, there are no persistent volume claims active in your cluster at this point.
-- `kubectl get all -Ao wide`: gives you information of all resources active in your cluster. Since this list can be long, append `| less` to this command to see the output paginated.
+- `kubectl get all -Ao wide`\
+ Gives you information of all resources active in your cluster. Since this list can be long, append `| less` to this command to see the output paginated.
- ~~~bash
+ ~~~sh
$ sudo kubectl get all -Ao wide
- NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- default kubernetes ClusterIP 10.43.0.1 443/TCP 112m
- kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 112m k8s-app=kube-dns
- kube-system traefik LoadBalancer 10.43.110.37 80:30963/TCP,443:32446/TCP 59s app.kubernetes.io/instance=traefik,app.kubernetes.io/name=traefik
- mgrsys@k3sserver01:~$ sudo kubectl get all -Ao wide
- NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- kube-system pod/coredns-85cb69466-9l6ws 1/1 Running 0 112m 10.42.1.5 k3sagent01
- kube-system pod/local-path-provisioner-64ffb68fd-zxm2v 1/1 Running 0 112m 10.42.1.2 k3sagent01
- kube-system pod/helm-install-traefik-crd--1-bjv95 0/1 Completed 0 112m 10.42.1.3 k3sagent01
- kube-system pod/helm-install-traefik--1-zb5gb 0/1 Completed 1 112m 10.42.1.4 k3sagent01
- kube-system pod/traefik-74dd4975f9-tdv42 1/1 Running 0 86s 10.42.2.2 k3sagent02
-
- NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- default service/kubernetes ClusterIP 10.43.0.1 443/TCP 112m
- kube-system service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 112m k8s-app=kube-dns
- kube-system service/traefik LoadBalancer 10.43.110.37 80:30963/TCP,443:32446/TCP 86s app.kubernetes.io/instance=traefik,app.kubernetes.io/name=traefik
-
- NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
- kube-system deployment.apps/local-path-provisioner 1/1 1 1 112m local-path-provisioner rancher/local-path-provisioner:v0.0.20 app=local-path-provisioner
- kube-system deployment.apps/coredns 1/1 1 1 112m coredns rancher/mirrored-coredns-coredns:1.8.4 k8s-app=kube-dns
- kube-system deployment.apps/traefik 1/1 1 1 86s traefik rancher/mirrored-library-traefik:2.5.0 app.kubernetes.io/instance=traefik,app.kubernetes.io/name=traefik
-
- NAMESPACE NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
- kube-system replicaset.apps/local-path-provisioner-64ffb68fd 1 1 1 112m local-path-provisioner rancher/local-path-provisioner:v0.0.20 app=local-path-provisioner,pod-template-hash=64ffb68fd
- kube-system replicaset.apps/coredns-85cb69466 1 1 1 112m coredns rancher/mirrored-coredns-coredns:1.8.4 k8s-app=kube-dns,pod-template-hash=85cb69466
- kube-system replicaset.apps/traefik-74dd4975f9 1 1 1 86s traefik rancher/mirrored-library-traefik:2.5.0 app.kubernetes.io/instance=traefik,app.kubernetes.io/name=traefik,pod-template-hash=74dd4975f9
-
- NAMESPACE NAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR
- kube-system job.batch/helm-install-traefik-crd 1/1 110m 112m helm rancher/klipper-helm:v0.6.6-build20211022 controller-uid=95e189fc-71b1-4299-928d-17668ca12a33
- kube-system job.batch/helm-install-traefik 1/1 110m 112m helm rancher/klipper-helm:v0.6.6-build20211022 controller-uid=ba1a2858-2044-40a1-8bf0-30d35bd76540
+ NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ kube-system pod/coredns-64fd4b4794-klv8f 1/1 Running 0 44m 10.42.0.3 k3sserver01
+ kube-system pod/helm-install-traefik-crd-qg8c4 0/1 Completed 0 44m 10.42.2.3 k3sagent01
+ kube-system pod/helm-install-traefik-pqljk 0/1 Completed 2 44m 10.42.2.2 k3sagent01
+ kube-system pod/local-path-provisioner-774c6665dc-gwmhv 1/1 Running 0 44m 10.42.0.2 k3sserver01
+ kube-system pod/traefik-c98fdf6fb-zmkcn 1/1 Running 0 19m 10.42.1.2 k3sagent02
+
+ NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+ default service/kubernetes ClusterIP 10.43.0.1 443/TCP 44m
+ kube-system service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 44m k8s-app=kube-dns
+ kube-system service/traefik LoadBalancer 10.43.174.63 80:30512/TCP,443:32647/TCP 19m app.kubernetes.io/instance=traefik-kube-system,app.kubernetes.io/name=traefik
+
+ NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
+ kube-system deployment.apps/coredns 1/1 1 1 44m coredns rancher/mirrored-coredns-coredns:1.12.3 k8s-app=kube-dns
+ kube-system deployment.apps/local-path-provisioner 1/1 1 1 44m local-path-provisioner rancher/local-path-provisioner:v0.0.31 app=local-path-provisioner
+ kube-system deployment.apps/traefik 1/1 1 1 19m traefik rancher/mirrored-library-traefik:3.3.6 app.kubernetes.io/instance=traefik-kube-system,app.kubernetes.io/name=traefik
+
+ NAMESPACE NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
+ kube-system replicaset.apps/coredns-64fd4b4794 1 1 1 44m coredns rancher/mirrored-coredns-coredns:1.12.3 k8s-app=kube-dns,pod-template-hash=64fd4b4794
+ kube-system replicaset.apps/local-path-provisioner-774c6665dc 1 1 1 44m local-path-provisioner rancher/local-path-provisioner:v0.0.31 app=local-path-provisioner,pod-template-hash=774c6665dc
+ kube-system replicaset.apps/traefik-c98fdf6fb 1 1 1 19m traefik rancher/mirrored-library-traefik:3.3.6 app.kubernetes.io/instance=traefik-kube-system,app.kubernetes.io/name=traefik,pod-template-hash=c98fdf6fb
+
+ NAMESPACE NAME STATUS COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR
+ kube-system job.batch/helm-install-traefik Complete 1/1 24m 44m helm rancher/klipper-helm:v0.9.8-build20250709 batch.kubernetes.io/controller-uid=551768ff-ea03-4961-b8c7-1eeb7a95a156
+ kube-system job.batch/helm-install-traefik-crd Complete 1/1 24m 44m helm rancher/klipper-helm:v0.9.8-build20250709 batch.kubernetes.io/controller-uid=ea8fb86b-942d-4b0b-8f57-7b95fa23ff8a
~~~
-In all those examples you see that I've used the `-Ao wide` options, which are related to the `get` command.
+In all these examples see that I've used the `-Ao wide` options, which are related to the `get` command:
- The `A` is for getting the resources from all the namespaces present in the cluster.
- The `o` is for indicating the format of the output returned by `kubectl get`. The `wide` string just indicates one of the formats available in the command.
-Also, don't forget that you can combine those `kubectl` commands with `watch` whenever you need to have a real time monitoring in your shell of the entities running in your cluster.
+Also, don't forget that you can combine those `kubectl` commands with `watch` whenever you need to run a real time monitoring from your shell of the entities active in your cluster.
-### _The `kubectl` command has to be executed with `sudo`_
+### The `kubectl` command has to be executed with `sudo` in server nodes
-Given how you've set up your cluster, `kubectl` must be executed with `sudo` in the cluster's nodes, otherwise you'll only see the following warning output.
+Given how the K3s cluster is configured, **`kubectl` must be executed with `sudo` in the cluster's server nodes**. Otherwise, you will only see the following warning output:
-~~~bash
+~~~sh
$ kubectl version
-WARN[0000] Unable to read /etc/rancher/k3s/k3s.yaml, please start server with --write-kubeconfig-mode to modify kube config permissions
+WARN[0000] Unable to read /etc/rancher/k3s/k3s.yaml, please start server with --write-kubeconfig-mode or --write-kubeconfig-group to modify kube config permissions
error: error loading config file "/etc/rancher/k3s/k3s.yaml": open /etc/rancher/k3s/k3s.yaml: permission denied
~~~
@@ -1081,53 +1288,66 @@ write-kubeconfig-mode: "0644"
...
~~~
-Above you see how you can change the mode of the `k3s.yaml` file, with values used with the `chmod` command. But, again, the **safe** configuration is the default one. The proper thing to do is to access from a remote client that has the `kubectl` command installed and configured in it, something I'll explain you in the upcoming [**G026** guide](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md).
+Above you see how you can change the mode of the `k3s.yaml` file, with values used with the `chmod` command. But, again, **the safe configuration is the default one**. The proper thing to do is to access from a remote client that has the `kubectl` command installed and configured in it, a matter that gets explained in the upcoming [chapter **G026**](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md).
-### _The `kubectl` command doesn't work on pure agent nodes_
+### The `kubectl` command does not work on pure agent nodes
-For the most part, `kubectl` won't work on pure agent nodes like the ones in your cluster. This is because pure agents are not meant to manage cluster, hence don't have the required server components running within them for such tasks. Without those components, `kubectl` will fail to run. This can be seen already just by executing the `kubectl version` command on any agent node.
+The `kubectl` command does not work on pure agent nodes like the ones in your K3s cluster. This is because pure agents are not meant to manage the cluster, and lack the required control plane components for such tasks. Without those components, `kubectl` will fail to run. Confirm this just by executing the `kubectl version` command on any agent node:
-~~~bash
+~~~sh
$ sudo kubectl version
-Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.3+k3s1", GitCommit:"61a2aab25eeb97c26fa3f2b177e4355a7654c991", GitTreeState:"clean", BuildDate:"2021-11-04T00:24:35Z", GoVersion:"go1.16.8", Compiler:"gc", Platform:"linux/amd64"}
+Client Version: v1.33.4+k3s1
+Kustomize Version: v5.6.0
The connection to the server localhost:8080 was refused - did you specify the right host or port?
~~~
See that the final output line warns about the server in `localhost:8080` refusing connection, just the component the `kubectl` program needs to work.
-You'll be asking yourself, why the `kubectl` command is also installed in the agent nodes? This is because it's embedded in the K3s software, and the installation makes it also available in whichever system K3s gets installed.
+You'll be asking yourself, why the `kubectl` command is also installed in the agent nodes? This is because it is embedded in the K3s software package, and the installation makes it also available in whichever system K3s gets installed.
## Enabling bash autocompletion for `kubectl`
-To make the use of the `kubectl` command a bit easier, you can enable the bash autocompletion in your **server** node, a hack which will help you type `kubectl` commands more agilely.
+To make the use of the `kubectl` command a bit easier, you can enable the bash autocompletion in your server node. This little hack will help you enter `kubectl` commands a bit more faster:
-1. Execute the following only in your **server** node (remember, `kubectl` is useless in the agent nodes).
+1. **Execute the following only in your server node** (remember, `kubectl` is useless in the agent nodes):
- ~~~bash
+ ~~~sh
$ sudo touch /usr/share/bash-completion/completions/kubectl
$ sudo kubectl completion bash | sudo tee /usr/share/bash-completion/completions/kubectl
~~~
+ The second command line will output a long script. It is what goes into the `kubectl` file.
+
2. Then, execute the following `source` command to enable the new bash autocompletion rules.
- ~~~bash
+ ~~~sh
source ~/.bashrc
~~~
-## Enabling the `k3s.log` file's rotation
+## Regular K3s logs are journaled
-Your `k3s.service` is configured to log in the `/var/log/k3s.log` file in **all** your nodes. The problem you have now is that this file, like any other log file, will grow over time and you'll have to empty it regularly. Instead of doing this manually, you can make the system take care of it automatically by using the `logrotate` service. Apply the following procedure in **all** your cluster nodes.
+The `k3s.service` running in all your K3s cluster nodes is configured to leave its logs in the Debian journaling system. To see the K3s logs specifically, you must execute the following command:
-1. Create the file `/etc/logrotate.d/k3s`.
+~~~sh
+$ sudo journalctl -u k3s
+~~~
- ~~~bash
- $ sudo touch /etc/logrotate.d/k3s
+Try this in the server node first, which is where you will see many log entries. The agents will not have anything logged yet since they are not running any workload.
+
+## Rotating the `containerd.log` file
+
+There is a log in the K3s setup for which you need to configure its rotation with `logrotate`: the file `/var/lib/rancher/k3s/agent/containerd/containerd.log`. Since it is an agent node log file, you have to do the following in all your nodes:
+
+1. Create the file `/etc/logrotate.d/k3s-containerd`.
+
+ ~~~sh
+ $ sudo touch /etc/logrotate.d/k3s-containerd
~~~
-2. Edit the `k3s` file by adding the following configuration block to it.
+2. Edit the `k3s-containerd` file by adding the following configuration block to it.
- ~~~bash
- /var/log/k3s.log {
+ ~~~sh
+ /var/lib/rancher/k3s/agent/containerd/containerd.log {
daily
rotate 5
missingok
@@ -1140,28 +1360,41 @@ Your `k3s.service` is configured to log in the `/var/log/k3s.log` file in **all*
The logrotate directives in the file mean the following.
- - `daily`: the log rotation will be done daily.
- - `rotate`: how many times a log file is rotated before is finally deleted.
- - `missingok`: if there's no log file to rotate, it's just ignored rather than provoking an error.
- - `notifempty`: if the current log file is empty, it's not rotated.
- - `dateext`: adds a date extension to the rotated log files, by default it's a string following the `YYYYMMDD` schema.
- - `compress`: logrotate compresses with gzip the rotated log files.
- - `delaycompress`: the previous log to the current one will be rotated but not compressed.
+ - `daily`\
+ The log rotation will be done daily.
+
+ - `rotate`\
+ How many times a log file is rotated before is finally deleted.
+
+ - `missingok`\
+ If there is no log file to rotate, it's just ignored rather than provoking an error.
+
+ - `notifempty`\
+ If the current log file is empty, it's not rotated.
+
+ - `dateext`\
+ Adds a date extension to the rotated log files, by default it's a string following the `YYYYMMDD` schema.
+
+ - `compress`\
+ Makes logrotate compress with gzip the rotated log files.
+
+ - `delaycompress`\
+ The previous log to the current one will be rotated but not compressed.
To know more about the logrotate directives, just check `man logrotate` in any of your cluster nodes.
-3. To test that the configuration works, execute the logrotate task manually.
+3. Test this new configuration.
- ~~~bash
- $ sudo logrotate -d /etc/logrotate.d/k3s
+ ~~~sh
+ $ sudo logrotate -d /etc/logrotate.d/k3s-containerd
~~~
- With the `-d` option, the `logrotate` command will do a dry run and also print some debug information, enough to see if there's something wrong with the configuration. The output should look like the following.
+ With the `-d` option, `logrotate` will do a dry run and also print some debug information:
- ~~~bash
- WARNING: logrotate in debug mode does nothing except printing debug messages! Consider using verbose mode (-v) instead if this is not what you want.
+ ~~~sh
+ warning: logrotate in debug mode does nothing except printing debug messages! Consider using verbose mode (-v) instead if this is not what you want.
- reading config file /etc/logrotate.d/k3s
+ reading config file /etc/logrotate.d/k3s-containerd
Reading state from file: /var/lib/logrotate/status
Allocating hash table for state file, size 64 entries
Creating new state
@@ -1171,208 +1404,175 @@ Your `k3s.service` is configured to log in the `/var/log/k3s.log` file in **all*
Creating new state
Creating new state
Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
- Creating new state
Handling 1 logs
- rotating pattern: /var/log/k3s.log after 1 days (5 rotations)
- empty log files are not rotated, old logs are removed
- considering log /var/log/k3s.log
+ rotating pattern: /var/lib/rancher/k3s/agent/containerd/containerd.log after 1 days empty log files are not rotated, (5 rotations), old logs are removed
+ considering log /var/lib/rancher/k3s/agent/containerd/containerd.log
Creating new state
- Now: 2021-11-22 14:53
- Last rotated at 2021-11-22 14:00
+ Now: 2025-09-10 16:30
+ Last rotated at 2025-09-10 16:00
log does not need rotating (log has already been rotated)
~~~
- Notice the final line which says that the `log does not need rotating`. This means that, if you execute the same `logrotate` command but without the `-d` parameter, it won't rotate the log at this moment.
-
-## Enabling the `containerd.log` file's rotation
-
-There's another log in the K3s setup that for which you need also to configure its rotation, the file `/var/lib/rancher/k3s/agent/containerd/containerd.log`. Notice that it's an agent node log file, so you'll find it in all your nodes. It's logrotate configuration is essentially the same one applied for the `k3s.log`, and you should also apply this in **all** your cluster nodes.
-
-1. Create the file `/etc/logrotate.d/k3s-containerd`.
-
- ~~~bash
- $ sudo touch /etc/logrotate.d/k3s-containerd
- ~~~
-
-2. Edit the `k3s-containerd` file by adding the following configuration block to it.
-
- ~~~bash
- /var/lib/rancher/k3s/agent/containerd/containerd.log {
- daily
- rotate 5
- missingok
- notifempty
- dateext
- compress
- delaycompress
- }
- ~~~
-
-3. Test this new configuration.
-
- ~~~bash
- $ sudo logrotate -d /etc/logrotate.d/k3s-containerd
- ~~~
+ Notice the final line which says that the `log does not need rotating`. It means that, if you execute the same `logrotate` command but without the `-d` parameter, it won't rotate the log at this moment.
## K3s relevant paths
-Something you should also know is where the K3s relevant paths are. On all the nodes of your K3s cluster you'll have the two following paths.
+Something you should also know is where the K3s relevant paths are. On all the nodes of your K3s cluster you have the two following paths:
- `/etc/rancher`
- `/var/lib/rancher/k3s`
From here on, the differences start between the server and the agent nodes. In the following subsections I list some paths you should be aware of on each node type.
-### _K3s paths at SERVER nodes_
+### K3s paths at SERVER nodes
-#### **Under the `/etc/rancher` folder**
+#### K3s server paths under the `/etc/rancher` folder
-- `/etc/rancher/k3s/k3s.yaml`:
- Kubeconfig file in which your K3s setup has its cluster, context and credentials defined; in other words, this file is used to configure access to the Kubernetes cluster. By default, this file can only be accessed by the `root` user, forcing you to use `sudo` to execute `kubectl` commands.
+- `/etc/rancher/k3s/k3s.yaml`\
+ Kubeconfig file in which your K3s setup has its cluster, context and credentials defined. In other words, this file is used to configure access to the Kubernetes cluster. By default, this file can only be accessed by the system's `root` user, forcing you to use `sudo` to execute `kubectl` commands.
-- `/etc/rancher/node/password`:
- Randomly generated password for the node, used by agents to register in the cluster (servers also have the agent processes active, since they're also K3s nodes).
+- `/etc/rancher/node/password`\
+ Randomly generated password for the node, used by agents to register in the cluster (servers also have the agent processes active, since they are also K3s nodes).
-#### **Under the `/etc/systemd/system` folder**
+#### K3s server paths under the `/etc/systemd/system` folder
-- `/etc/systemd/system/k3s.service`:
+- `/etc/systemd/system/k3s.service`\
Service script that runs the K3s server node. Like with any other systemd services, you can manage it with `systemctl` commands, for instance `sudo systemctl status k3s.service`.
-- `/etc/systemd/system/k3s.service.env`:
- An associated file for setting environment variables for the `k3s.service`. It's already called by the `k3s.service` script, but the installer creates it empty.
+- `/etc/systemd/system/k3s.service.env`\
+ An associated file for setting environment variables for the `k3s.service`. It is already called by the `k3s.service` script, but the installer creates it empty.
-#### **Under the `/var/lib/rancher/k3s` folder**
+#### K3s server paths under the `/var/lib/rancher/k3s` folder
-- `/var/lib/rancher/k3s/agent`:
+- `/var/lib/rancher/k3s/agent`\
This root-restricted folder is related to agent functionality. Contains configuration files, certificates and manifests.
-- `/var/lib/rancher/k3s/data`:
+- `/var/lib/rancher/k3s/data`\
Holds the busybox related binaries of command tools that come included with the K3s installation. Also has the configuration for the embedded [strongSwan](https://strongswan.org/) IPsec solution which provides encryption and authentication to the K3s cluster nodes.
-- `/var/lib/rancher/k3s/server`:
+- `/var/lib/rancher/k3s/server`\
This root-restricted folder is related to server functionality, so you'll only find it in server nodes. Contains configuration files and certificates related to the server functionality like, for instance, the internal database.
-- `/var/lib/rancher/k3s/server/token`:
+- `/var/lib/rancher/k3s/server/token`\
File containing a self-generated token used for authenticating when joining the cluster. Is symlinked by a `node-token` file also present in the same folder.
-- `/var/lib/rancher/k3s/server/manifests`:
+- `/var/lib/rancher/k3s/server/manifests`\
Contains the yaml manifests that configure the services and resources which run in a default K3s installation. For instance, here you'll find the manifests that configure the embedded Traefik service running in your K3s cluster.
-### _K3s paths at AGENT nodes_
+### K3s paths at AGENT nodes
-#### **Under the `/etc/rancher` folder**
+#### K3s agent paths under the `/etc/rancher` folder
-- `/etc/rancher/node/password`:
+- `/etc/rancher/node/password`\
Randomly generated password for the node, used by agents to register in the cluster.
-#### **Under the `/etc/systemd/system` folder**
+#### K3s agent paths under the `/etc/systemd/system` folder
-- `/etc/systemd/system/k3s-agent.service`:
+- `/etc/systemd/system/k3s-agent.service`\
Service script that runs the K3s agent node. Like with any other systemd services, you can manage it with `systemctl` commands, for instance `sudo systemctl status k3s-agent.service`.
-- `/etc/systemd/system/k3s-agent.service.env`:
- An associated file for setting environment variables for the `k3s-agent.service`. It's already called by the `k3s-agent.service` script, but the installer creates it empty.
+- `/etc/systemd/system/k3s-agent.service.env`\
+ An associated file for setting environment variables for the `k3s-agent.service`. It is already called by the `k3s-agent.service` script, but the installer creates it empty.
-#### **Under the `/var/lib/rancher/k3s` folder**
+#### K3s agent paths under the `/var/lib/rancher/k3s` folder
-- `/var/lib/rancher/k3s/agent`:
+- `/var/lib/rancher/k3s/agent`\
This root-restricted folder is related to agent functionality. Contains configuration files, certificates and manifests.
-- `/var/lib/rancher/k3s/data`:
- Holds the busybox related binaries of command tools that come included with the K3s installation. Also has the configuration for the embedded [strongSwan](https://strongswan.org/) IPsec solution which provides encryption and authentication to the K3s cluster nodes.
+- `/var/lib/rancher/k3s/data`\
+ Holds the busybox related binaries of command tools that come included with the K3s installation.
## Starting up and shutting down the K3s cluster nodes
-In a K3s/Kubernetes cluster you need to apply a certain order to start the whole cluster properly.
+In a K3s/Kubernetes cluster you need to apply a certain order to start the whole cluster properly:
- First the server nodes or, at least, one of them.
- Then the agent nodes, but only when you have started at least one server.
-And what about the shutting down order? As you may suppose, it's the same but just in reverse: agents first, then the servers. Be sure of always **shut down** (not halting, mind you) the agents first and doing it, of course, gracefully (using the feature you've specifically configured during the K3s installation). If you happen to shut down the servers first, the agents could get hang up waiting for the servers to be available again. This could force you to halt the agent VMs ungracefully, or to kill all the K3s process inside them before proceeding to shut those VMs down. Of course, this is a messy way of stopping the cluster so, again, remember: first the agents, then the servers.
+And what about the shutting down order? As you may suppose, it is the same but just in reverse: agents first, then the servers. **Be sure of always shut down (NOT halting, mind you) the agents first** and doing it, of course, gracefully (using the feature you've specifically configured during the K3s installation). If you happen to shut down the servers first, the agents could get hang up waiting for the servers to be available again. This could force you to halt the agent VMs ungracefully, or to kill all the K3s process inside them before proceeding to shut those VMs down. Of course, this is a messy way of stopping the cluster so, again, remember: **shut down first the agents, then the servers**.
-The question now is, do you have to start and shutdown the VMs of your K3s cluster manually every time? Not at all, Proxmox VE has the ability to start your VMs when your host boots up, and even allows you to specify in which order. The VM IDs are also relevant in the order in which the VMs are start or shut down, something I've mentioned you previously in this guide (in the section about link-cloning the VMs to the K3s node VM template). Read the next section to learn about this ordering.
+The question now is, do you have to start and shutdown the VMs of your K3s cluster manually every time? Not at all, Proxmox VE has the ability to start your VMs when your host boots up, and even allows you to specify in which order. The VM IDs are also relevant in the order in which the VMs are start or shut down, something [previously mentioned in this chapter](#creation-of-vms-based-on-the-k3s-node-vm-template).
-### _Automatic ordered start or shutdown of the K3s nodes VMs_
+### Automatic ordered start or shutdown of the K3s nodes VMs
-1. In your Proxmox VE web console, go to your K3s server VM (the `k3sserver01` one) and open its `Options` page.
+1. In your Proxmox VE web console, go to your `k3sserver01` server VM and open its `Options` page:
- 
+ 
The two options highlighted above are the ones related to the automatic start and shutdown process managed by Proxmox VE.
-2. Select `Start at boot` and then press on the `Edit` button above; you'll see that this option is just a simple checklist field.
+2. Edit `Start at boot`, which is composed only of one checkbox:
+
+ 
+
+ Enable it and press `OK`. Be aware that this action won't start the VM in that moment, it's only marking it to autostart at your Proxmox VE host's boot time.
+
+3. Now edit the `Start/Shutdown order` option:
+
+ 
- 
+ I'll explain below this feature's parameters:
- Enable it and press `OK`. Be aware that this action won't start the VM in that moment, it's only marking it to autostart at your host's boot time.
+ - `Start/Shutdown order`\
+ This is an integer number that indicates in which order you want to start AND shutdown this VM. The ordering works as follows:
-3. Now select the option right below, `Start/Shutdown order`, and `Edit` it too.
+ - **At boot time, lower numbers start before higher numbers**\
+ A VM with order `1` will be booted up by Proxmox VE before another having a `2` or a higher value.
- 
+ - **At shutdown, higher numbers get shutdown before lower numbers**\
+ A VM with order `2` or higher will shutdown _before_ another with any lower value.
- I'll explain next the three fields of this option.
+ - If two or more VMs have the same value on this field, they'll be ordered by VM ID using the same criteria as with the order value. So, if your K3s nodes have the same order, lets say `1`, the VM with the lower ID will start before another with a higher one.
- - `Start/Shutdown order`: this is an integer number that indicates in which order you want to start AND shutdown this VM. The ordering works as follows:
- - At boot time, **lower** numbers start before _higher_ numbers. So, a VM with order `1` will be booted up by Proxmox VE before another having a `2` or a higher value.
- - At shutdown, **higher** numbers are shutdown by Proxmox VE before _lower_ numbers. So, a VM with order `2` or higher will shutdown _before_ another with any lower value.
- - If two or more VMs have the same value on this field, they'll be ordered among each other by VM ID using the same criteria as with the order value. So, if your K3s nodes have the same order, lets say `1`, the VM with the lower ID will start before another with a higher one.
- - VMs with the default value `any` in this field will always start **after** the ones that have a concrete number set here.
- - The ordering behaviour for VMs that have this value set as `any` is not explicitly explained in the Proxmox VE official documentation, but I think it can be safely assumed that it would be like as if they have the same order value, hence the VM ID can be expected to be the one used as ordering value.
+ - VMs with the default value `any` in this field will always start after the ones that have a concrete number set here.
- - `Startup delay`: number of seconds that Proxmox VE must wait before it can boot up the next VM _after_ this one. So, setting this field with a value of 30, it would make Proxmox VE wait 30 seconds till it can start the next VM in order. The default value here is 0 seconds (this is not explicitly detailed in the Proxmox VE documentation).
+ - The ordering behaviour of Proxmox VE for VMs that have this value set as `any` is not explicitly explained in the Proxmox VE official documentation. I think it can be safely assumed that it would be like as if they have the same order value, hence the VM ID can be expected to be the one used as ordering value.
- - `Shutdown timeout`: number of seconds that Proxmox VE concedes to the VM to shutdown gracefully. If the VM hasn't shutdown by the time this countdown reaches 0, Proxmox VE will halt the VM forcefully. The default value is 180 seconds.
+ - `Startup delay`\
+ Number of seconds that Proxmox VE must wait before it can boot up the next VM after this one. For instance, setting this field with a value of 30, it would make Proxmox VE wait 30 seconds till it can start the next VM in order. The default value here is 0 seconds (not explicitly detailed in the Proxmox VE documentation).
- > **BEWARE!**
- > The `Start/Shutdown order` option only works among VMs that reside within the same Proxmox VE host. So, if you happened to have a Proxmox VE cluster of two or more nodes (instead of the standalone node setup used in this guide series), this option wouldn't be shared cluster wide, it just works in a node-per-node basis. To have cluster-wide ordering, you have to use the HA (High Availability) manager, which offers its own ways to do such things. Further, VMs managed by HA skip this `Start/Shutdown order` option altogether.
+ - `Shutdown timeout`\
+ Number of seconds that Proxmox VE concedes to the VM to shutdown gracefully. If the VM hasn't shutdown by the time this countdown reaches 0, Proxmox VE will halt the VM forcefully. The default value is 180 seconds.
-4. So, set the server node with a `Start/Shutdown order` of 1, a `Startup delay` of 10 seconds, and leave the `Shutdown timeout` with the `default` value. Press `OK` and return to the VM's `Options` page.
+ > [!IMPORTANT]
+ > **The `Start/Shutdown order` option only works with VMs hosted in the same Proxmox VE node**\
+ > If you have a Proxmox VE cluster of two or more nodes (instead of the standalone node setup used in this guide), this option would not be shared cluster wide. This feature just works in a node-per-node basis. To have cluster-wide ordering, you have to use the HA (High Availability) manager, which offers its own ways to do such things. Furthermore, VMs managed by HA ignore this `Start/Shutdown order` option altogether.
- 
+4. Set your server node with a `Start/Shutdown order` of 1, a `Startup delay` of 10 seconds, and leave the `Shutdown timeout` with the `default` value. Press `OK` and return to the VM's `Options` page:
- With the startup delay, which is shown as `up=10` in the `Start/Shutdown order` option, will make Proxmox VE wait 10 seconds before starting up any other VM that may come after this one. This is convenient to give some time to your server to fully start up before it can serve your agents in your K3s cluster.
+ 
-5. In the other VMs, the two that act as agents of your cluster, you'll have to edit the same options but, this time, just give a higher order number (like 2) to all of them.
+ The startup delay, shown as `up=10` in the `Start/Shutdown order` option, will make Proxmox VE wait 10 seconds before starting up any other VM that may come after this one. This is convenient to give some time to your server to fully start up before it can serve the agent nodes in your K3s cluster.
- 
+5. In the other VMs, those acting as agent nodes of your cluster, you have to edit the same options but, this time, just give the same higher order number (like 2) to all of them:
- This way, the agent nodes will start in order of VM ID: first the VM 3111, then the VM 3112. And both of them will start **after** the delay of 10 seconds set in the `k3sserver01` node, the VM 2101.
+ 
-6. Reboot your Proxmox VE host, your only `pve` node in your `Datacenter`, with the `Reboot` button.
+ This way, the agent nodes will start in order of VM ID: first the VM 421, then the VM 422. And both of them will start after the delay of 10 seconds set in the `k3sserver01` node, the VM 411.
- 
+6. Reboot your Proxmox VE host, your only `pve` node in your `Datacenter`, with the `Reboot` button:
-7. After rebooting your Proxmox VE host, get back inside the web console and open the `Tasks` log console at the bottom. There you'll see listed when the VMs start and shutdown tasks started. Pay particular attention to the time difference between the `VM 2101-Start` and the `VM 3111-Start` tasks, you'll see that it's exactly of 10 seconds.
+ 
- 
+7. After rebooting your Proxmox VE host, get back inside the web console and open the `Tasks` log console at the bottom. There you'll see listed when the VMs start and shutdown tasks started. Pay particular attention to the time difference between the `VM 411 - Start` and the `VM 421-Start` tasks. In my case, it required more than the configured 10 seconds delay:
- You'll also see the lines `Start all VMs and Containers` and `Stop all VMs and Containers`, those are the tasks that do begin those two processes.
+ 
-### _Understanding the shutdown/reboot process of your Proxmox VE host with the K3s cluster running_
+ The logs `Bulk shutdown VMs and Containers` and `Bulk start VMs and Containers` will also appear representing the tasks executing those two processes.
-When you press the `Shutdown` or the `Reboot` button on the Proxmox VE web console, what you're really doing is sending the corresponding signal to your host to execute those actions. This is the same as executing the `reboot` or `shutdown` commands on a shell in your host, or just pressing the power or reset button on the machine itself. Your VMs also shutdown because they have the Qemu guest agent running, through which they automatically receive the same shutdown signal.
+### Understanding the shutdown/reboot process of your Proxmox VE host with the K3s cluster running
-On the other hand, you might be thinking now that the NUT configuration you've done in the VMs is now kind of redundant, since when the UPS kicks in it already provokes a shutdown action of your whole Proxmox VE system, including the VMs. About this, you have to consider that a shutdown provoked by the UPS is something unexpected, so having the NUT clients in your VMs allows you to apply more fine grained behaviours with shell scripts that can be launched whenever some UPS-related event happens.
+When you press the `Shutdown` or the `Reboot` button on the Proxmox VE web console, what you're really doing is sending the corresponding signal to your host to execute those actions. This is the same as executing the `reboot` or `shutdown` commands on a shell in your host, or just pressing the power or reset button on the machine running Proxmox VE. Your VMs also shutdown because they have the Qemu guest agent running, through which they automatically receive the same shutdown signal.
-### _Warning about the Kubernetes graceful shutdown feature_
+On the other hand, you might be thinking now that the NUT configuration you've done in the VMs is kind of redundant. After all, when the UPS kicks in it already provokes a shutdown action of your whole Proxmox VE system, including the VMs. Still, consider that a shutdown provoked by the UPS is something unexpected, and having the NUT clients in your VMs allows you to apply more fine grained behaviours with shell scripts that can be launched within your VMs whenever some UPS-related event happens.
-Bear in mind that this is currently a beta feature, so it could give you surprises. For instance, in my experience with the release `v1.22.3+k3s1` of K3s (the one you've installed in this guide), after a graceful reboot the pods will appear as `Terminated` although they are actually running. This is something you can see with `kubectl` when checking the pods like in the output below.
+### Warning about the Kubernetes graceful shutdown feature
-~~~bash
+This feature is still considered beta, so it could give you surprises. I'll give you an example from my experience with the older K3s release `v1.22.3+k3s1` of K3s. After a graceful reboot, the pods appeared as `Terminated` although they were actually running. This is something you can detect with `kubectl` when checking the pods like in the output below:
+
+~~~sh
$ kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
helm-install-traefik-crd--1-bjv95 0/1 Completed 0 30h
@@ -1383,22 +1583,22 @@ traefik-74dd4975f9-tdv42 1/1 Terminated 3 (21m ago) 29h
metrics-server-5b45cf8dbb-nv477 1/1 Terminated 1 (21m ago) 28m
~~~
-Notice the `STATUS` column. All the pods that appear `Terminated` there are in fact `Running`. How can you tell? The `READY` column informs you that there's 1 out of 1 pods ready (`1/1`) which should be 0 out of 1 if in fact no pod is running. Just be aware of this quirk, kind of unsurprising when using a beta feature.
+Notice the `STATUS` column. All the pods that appear `Terminated` there are in fact `Running`. How can you tell? The `READY` column informs you that there's 1 out of 1 pods ready (`1/1`), but should be 0 out of 1 if in fact no pod was running. Just be aware of this quirk, kind of unsurprising when using a beta feature.
## Relevant system paths
-### _Folders on the Proxmox VE host_
+### Folders on the Proxmox VE host
- `/etc/pve/firewall`
-### _Files on the Proxmox VE host_
+### Files on the Proxmox VE host
- `/etc/pve/firewall/2101.fw`
- `/etc/pve/firewall/3111.fw`
- `/etc/pve/firewall/3112.fw`
- `/etc/pve/firewall/cluster.fw`
-### _Folders on the VMs/K3s nodes_
+### Folders on the VMs/K3s nodes
- `/etc`
- `/etc/bash_completion.d`
@@ -1406,7 +1606,7 @@ Notice the `STATUS` column. All the pods that appear `Terminated` there are in f
- `/etc/network`
- `/etc/rancher`
- `/etc/rancher/k3s`
-- `/etc/rancher/k3s.config.d`
+- `/etc/rancher/config.yaml.d`
- `/etc/rancher/node`
- `/etc/sysctl.d`
- `/etc/systemd/system`
@@ -1419,16 +1619,15 @@ Notice the `STATUS` column. All the pods that appear `Terminated` there are in f
- `/var/lib/rancher/k3s/server`
- `/var/lib/rancher/k3s/server/manifests`
- `/var/lib/rancher/k3s/server/tls`
-- `/var/log`
-### _Files on the VMs/K3s nodes_
+### Files on the VMs/K3s nodes
- `/etc/bash_completion.d/kubectl`
- `/etc/logrotate.d/k3s`
- `/etc/network/interfaces`
- `/etc/rancher/k3s/config.yaml`
- `/etc/rancher/k3s/k3s.yaml`
-- `/etc/rancher/k3s.config.d/config.yaml`
+- `/etc/rancher/config.yaml.d/config.yaml`
- `/etc/rancher/node/password`
- `/etc/systemd/system/k3s.service`
- `/etc/systemd/system/k3s.service.env`
@@ -1439,108 +1638,117 @@ Notice the `STATUS` column. All the pods that appear `Terminated` there are in f
- `/var/lib/rancher/k3s/agent/containerd/containerd.log`
- `/var/lib/rancher/k3s/server/token`
- `/var/lib/rancher/k3s/server/tls/server-ca.crt`
-- `/var/log/k3s.log`
## References
-### _Proxmox VE_
+### Proxmox VE
- [Firewall IP sets](https://pve.proxmox.com/wiki/Firewall#pve_firewall_ip_sets)
- [IPFilter vs IPSet](https://forum.proxmox.com/threads/ipfilter-vs-ipset.36127/#post-384591)
- [How to apply proxmox firewall rules to VMs?](https://serverfault.com/questions/801617/how-to-apply-proxmox-firewall-rules-to-vms)
-- [How to prevent stealing of (others) IP addresses on KVM/Proxmox based virtualisation hosts?](https://serverfault.com/questions/1030177/how-to-prevent-stealing-of-others-ip-addresses-on-kvm-proxmox-based-virtualisa)
- [Startup Order](https://forum.proxmox.com/threads/startup-order.13629/)
- [Automatic Start and Shutdown of Virtual Machines](https://pve.proxmox.com/pve-docs/chapter-qm.html#qm_startup_and_shutdown)
- [Clean scheduled reboot](https://forum.proxmox.com/threads/clean-scheduled-reboot.38386/)
-- [How to shutdown PROXMOX from the command line?](https://www.reddit.com/r/selfhosted/comments/jkmpgt/how_to_shutdown_proxmox_from_the_command_line/)
- [Shutdown VM's and CT's from Proxmox Shutdown Command?](https://www.reddit.com/r/Proxmox/comments/agdfgj/shutdown_vms_and_cts_from_proxmox_shutdown_command/)
-- [Proxmox can’t stop VM – How we fix it!](https://bobcares.com/blog/proxmox-cant-stop-vm/)
+- [Fix Proxmox Can’t Stop VM Issue – Step-by-Step Solutions](https://bobcares.com/blog/proxmox-cant-stop-vm/)
- [UPS APC to shutdown VMs?](https://forum.proxmox.com/threads/ups-apc-to-shutdown-vms.54695/)
-### _Debian and Linux SysOps_
+### Debian and Linux SysOps
-#### **Changing the `Hostname`**
+#### Changing the `Hostname`
- [How to Change Hostname in Debian](https://linuxhandbook.com/debian-change-hostname/)
-#### **Network interfaces configuration**
+#### Network interfaces configuration
-- [Proxmox VE con Open vSwitch](https://www.doblefactor.com/en/seguridad/proxmox-ve-con-open-vswitch/)
-- [IEEE 802.1Q VLAN Tutorial](http://www.microhowto.info/tutorials/802.1q.html)
-- [Configure an Ethernet interface as a VLAN trunk (Debian)](http://www.microhowto.info/howto/configure_an_ethernet_interface_as_a_vlan_trunk_on_debian.html)
-- [How to configure 802.1Q VLAN Tagging on Debian 9](https://www.snel.com/support/how-to-configure-802-1q-vlan-tagging-on-debian-9/)
-- [How To Configure VLAN Interface on Debian 10 (Buster)](https://techviewleo.com/how-to-configure-vlan-interface-on-debian/)
- [Debian: add and configure VLAN](https://docs.gz.ro/debian-linux-vlan.html)
- [How to setup a Static IP address on Debian Linux](https://linuxconfig.org/how-to-setup-a-static-ip-address-on-debian-linux)
- [Howto: Ubuntu Linux convert DHCP network configuration to static IP configuration](https://www.cyberciti.biz/tips/howto-ubuntu-linux-convert-dhcp-network-configuration-to-static-ip-configuration.html)
- [Debian Linux Configure Network Interface Cards – IP address and Netmasks](https://www.cyberciti.biz/faq/howto-configuring-network-interface-cards-on-debian/)
- [For those wanting to play with VLANs](https://forums.virtualbox.org/viewtopic.php?f=1&t=38037)
-- [Open vSwitch Documentation about VLANs](https://docs.openvswitch.org/en/latest/faq/vlan/)
-- [Configure openvswitch in virtualization environment](https://www.humblec.com/configure-openvswitch-in-virt-environment/)
+- [Open vSwitch. Documentation. VLANs](https://docs.openvswitch.org/en/latest/faq/vlan/)
+- [Configure openvswitch in virtualization environment and use it for simple and complex (ex: vlan) testing in centos/rhel/fedora..etc](https://www.humblec.com/configure-openvswitch-in-virt-environment/)
- [How to setup and save vlans on ethernet](https://askubuntu.com/questions/660506/how-to-setup-and-save-vlans-on-ethernet)
- [Proxmox OVS VLANs any idea how to do ?](https://www.reddit.com/r/Proxmox/comments/7prxig/proxmox_ovs_vlans_any_idea_how_to_do/)
+- [Wikipedia. Reserved IP addresses. IPv4](https://en.wikipedia.org/wiki/Reserved_IP_addresses#IPv4)
-#### **Logrotate configuration**
+#### Logrotate configuration
- [logrotate(8) — Linux manual page](https://www.man7.org/linux/man-pages/man8/logrotate.8.html)
- [How to Setup and Manage Log Rotation Using Logrotate in Linux](https://www.tecmint.com/install-logrotate-to-manage-log-rotation-in-linux/)
- [How to Install and Configure logrotate on Linux](https://www.osradar.com/how-to-install-and-configure-logrotate-on-linux/)
-#### **Downloading files with `wget`**
+#### Downloading files with `wget`
-- [Execute Bash Script Directly From a URL](https://www.baeldung.com/linux/execute-bash-script-from-url#installation-and-usage---wget)
+- [Execute Bash Script Directly. Installation and Usage – wget](https://www.baeldung.com/linux/execute-bash-script-from-url#installation-and-usage---wget)
- [How to disable HTTP redirect in wget](https://www.xmodulo.com/disable-http-redirect-wget.html)
-### _K3s cluster setup_
+### K3s cluster setup
-#### **K3s configuration**
+#### [Kubernetes](https://kubernetes.io/)
+
+- [Cluster Architecture](https://kubernetes.io/docs/concepts/architecture/)
+ - [Control plane components](https://kubernetes.io/docs/concepts/architecture/#control-plane-components)
+
+- [kubectl completion](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_completion/)
-- [Releases](https://github.com/k3s-io/k3s/releases)
-- [Installation](https://rancher.com/docs/k3s/latest/en/installation/)
-- [Server Configuration Reference](https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/)
-- [Agent Configuration Reference](https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/)
-- [Installation Options. Configuration File](https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file)
-- [Secrets Encryption Config (Experimental)](https://rancher.com/docs/k3s/latest/en/advanced/#secrets-encryption-config-experimental)
-- [Architecture](https://rancher.com/docs/k3s/latest/en/architecture/)
-- [Is there any way to bind K3s / flannel to another interface?](https://stackoverflow.com/questions/66449289/is-there-any-way-to-bind-k3s-flannel-to-another-interface)
-- [5 Ways to Install K3s](https://www.youtube.com/watch?v=O3s3YoPesKs)
-- [Intro to K3s Online Training: Lightweight Kubernetes](https://www.youtube.com/watch?v=vRjk3r9fwFo)
-- [K3s – lightweight kubernetes made ready for production – Part 1](https://digitalis.io/blog/kubernetes/k3s-lightweight-kubernetes-made-ready-for-production-part-1/)
-- [bash auto-completion on Linux](https://kubernetes.io/docs/tasks/tools/included/optional-kubectl-configs-bash-linux/)
- [How to Enable kubectl Autocompletion in Bash?](https://www.digitalocean.com/community/questions/how-to-enable-kubectl-autocompletion-in-bash)
- [How to enable kubernetes commands autocomplete](https://stackoverflow.com/questions/53444924/how-to-enable-kubernetes-commands-autocomplete)
+
+#### K3s configuration
+
+- [K3s. Releases](https://github.com/k3s-io/k3s/releases)
+
+- [Architecture](https://docs.k3s.io/architecture)
+
+- [Installation](https://docs.k3s.io/installation)
+ - [Configuration Options. Configuration File](https://docs.k3s.io/installation/configuration#configuration-file)
+
+- [CLI Tools. k3s server](https://docs.k3s.io/cli/server)
+- [CLI Tools. k3s agent](https://docs.k3s.io/cli/agent)
+
+- [Basic Network Options](https://docs.k3s.io/networking/basic-network-options)
+
+- [K3s. Issues. Node taint k3s-controlplane=true:NoExecute](https://github.com/k3s-io/k3s/issues/1401)
+
+- [Secrets Encryption Config](https://docs.k3s.io/security/secrets-encryption)
+
+- [Is there any way to bind K3s / flannel to another interface?](https://stackoverflow.com/questions/66449289/is-there-any-way-to-bind-k3s-flannel-to-another-interface)
- [401 Unauthorized message when joining a single-node cluster](https://github.com/k3s-io/k3s/issues/2463)
- [k3s + Gitlab. Remote Access with kubectl](https://github.com/apk8s/k3s-gitlab#remote-access-with-kubectl)
- [How to Install and Configure K3s on Ubuntu 18.04](https://www.liquidweb.com/kb/how-to-install-and-configure-k3s-on-ubuntu-18-04/)
- [Home Server with k3s](https://www.publish0x.com/awesome-self-hosted/home-server-with-k3s-xdnwrmx)
- [Questions regarding the server node's agent-token parameter](https://github.com/k3s-io/k3s/discussions/3443)
- [Question: Problem setting up k3s in my VPS](https://www.reddit.com/r/k3s/comments/nl85h2/question_problem_setting_up_k3s_in_my_vps/)
-- [K3s Network Options](https://rancher.com/docs/k3s/latest/en/installation/network-options/)
-#### **Flannel**
+#### Flannel
- [Flannel Backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md)
-- [Networking with Flannel](https://www.containerlabs.kubedaily.com/rancher/Networking/Networking-with-Flannel.html)
+- [K3s. Basic Network Options](https://docs.k3s.io/networking/basic-network-options)
+ - [Flannel Options](https://docs.k3s.io/networking/basic-network-options#flannel-options)
- [K3S Supports Container Network Interface (CNI) and Flannel](https://www.henrydu.com/2020/11/16/k3s-cni-flannel/)
- [why host-gw of flannel requires direct layer2 connectivity between hosts?](https://stackoverflow.com/questions/45293321/why-host-gw-of-flannel-requires-direct-layer2-connectivity-between-hosts)
- [Flannel Networking Demystify](https://msazure.club/flannel-networking-demystify/)
- [Configure Kubernetes Network With Flannel](https://dzone.com/articles/configure-kubernetes-network-with-flannel)
- [Configure Kubernetes Network with Flannel](https://appfleet.com/blog/configure-kubernetes-network-with-flannel/)
-- [Networking with Flannel](https://www.containerlabs.kubedaily.com/rancher/Networking/Networking-with-Flannel.html)
- [flannel host-gw network](https://hustcat.github.io/flannel-host-gw-network/)
- [Kubernetes the not so hard way with Ansible - Harden the instances - (K8s v1.21)](https://www.tauceti.blog/posts/kubernetes-the-not-so-hard-way-with-ansible-harden-the-instances/)
-#### **Graceful Shutdown**
+#### Graceful node shutdown
- [Graceful Node Shutdown Goes Beta](https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/)
-- [Graceful node shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown)
+
+- [Node Shutdowns](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/)
+ - [Graceful node shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)
+ - [Configuring graceful node shutdown](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#configuring-graceful-node-shutdown)
+
- [Set Kubelet parameters via a config file](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)
-- [Graceful Node shutdown Single Node Cluster](https://github.com/k3s-io/k3s/discussions/4319)
-#### **Embedded software in K3s**
+- [K3s. Graceful Node shutdown Single Node Cluster](https://github.com/k3s-io/k3s/discussions/4319)
+
+#### Embedded software in K3s
- [Kubernetes](https://kubernetes.io/)
-- [strongSwan](https://strongswan.org/)
- [CoreDNS](https://coredns.io/)
- [Traefik](https://traefik.io/)
- [Traefik on GitHub](https://github.com/traefik/traefik)
@@ -1552,14 +1760,12 @@ Notice the `STATUS` column. All the pods that appear `Terminated` there are in f
- [Containerd K3s version on GitHub](https://github.com/k3s-io/containerd)
- [Runc](https://github.com/opencontainers/runc)
- [Flannel](https://github.com/flannel-io/flannel)
-- [Flannel on GitHub](https://github.com/flannel-io/flannel)
- [Metrics-server](https://github.com/kubernetes-sigs/metrics-server)
- [Helm-controller](https://github.com/k3s-io/helm-controller)
- [Local-path-provisioner](https://github.com/rancher/local-path-provisioner)
-### _YAML_
+### [YAML](http://yaml.org/)
-- [YAML Ain't Markup Language](http://yaml.org/)
- [YAML Tutorial](https://www.tutorialspoint.com/yaml/index.htm)
## Navigation
diff --git a/G026 - K3s cluster setup 09 ~ Setting up a kubectl client for remote access.md b/G026 - K3s cluster setup 09 ~ Setting up a kubectl client for remote access.md
index fe5e349..4341057 100644
--- a/G026 - K3s cluster setup 09 ~ Setting up a kubectl client for remote access.md
+++ b/G026 - K3s cluster setup 09 ~ Setting up a kubectl client for remote access.md
@@ -1,74 +1,100 @@
# G026 - K3s cluster setup 09 ~ Setting up a `kubectl` client for remote access
-To manage a K3s Kubernetes cluster through `kubectl` is recommended not to do it directly from the _server_ nodes, but to connect remotely from another computer. This way, you won't have to copy your `.yaml` files describing your deployments or configurations directly on any of your server nodes.
-
-## Scenario
-
-In this guide I'll assume that you want to access remotely your K3s cluster from a Debian-based Linux computer. This guide will use the `curl` command, which may not come installed in your Linux distribution. In a Debian system, install the `curl` package as follows:
-
-~~~bash
+- [Never handle your Kubernetes cluster directly from server nodes](#never-handle-your-kubernetes-cluster-directly-from-server-nodes)
+- [Description of the `kubectl` client system](#description-of-the-kubectl-client-system)
+- [Getting the right version of `kubectl`](#getting-the-right-version-of-kubectl)
+- [Installing `kubectl` on your client system](#installing-kubectl-on-your-client-system)
+- [Getting the configuration for accessing the K3s cluster](#getting-the-configuration-for-accessing-the-k3s-cluster)
+- [Opening the `6443` port in the K3s server node](#opening-the-6443-port-in-the-k3s-server-node)
+- [Enabling bash autocompletion for `kubectl`](#enabling-bash-autocompletion-for-kubectl)
+- [Validate Kubernetes configuration files with `kubeconform`](#validate-kubernetes-configuration-files-with-kubeconform)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders in client system](#folders-in-client-system)
+ - [Files in client system](#files-in-client-system)
+ - [Folder in K3s server node](#folder-in-k3s-server-node)
+ - [File in K3s server node](#file-in-k3s-server-node)
+- [References](#references)
+ - [Kubernetes](#kubernetes)
+ - [K3s](#k3s)
+ - [kubeconform](#kubeconform)
+- [Navigation](#navigation)
+
+## Never handle your Kubernetes cluster directly from server nodes
+
+To manage a K3s Kubernetes cluster through `kubectl` is recommended not to do it directly from the server nodes, but to connect remotely from another client computer. This way, you don't have to copy your `.yaml` files describing your deployments or configurations directly on any of your server nodes.
+
+## Description of the `kubectl` client system
+
+This chapter assumes that you want to access your K3s cluster remotely from a Debian-based Linux client system. For convenience, this chapter uses the `curl` command which may not come installed by default in your client system. In a Debian-based system, you can install the `curl` package as follows:
+
+~~~sh
$ sudo apt install -y curl
~~~
## Getting the right version of `kubectl`
-The first thing you must know is the version of the K3s cluster you're going to connect to. This is important because `kubectl` is guaranteed to be compatible only with its own correlative version or those that are at one _minor_ version of difference from it. For instance, at the time of writing this guide, the latest `kubectl` _minor_ version is 1.22, meaning that it's compatible with the 1.21, 1.22 and future 1.23 versions of the Kubernetes api. K3s follows the same versioning system, since it's "just" a particular distribution of Kubernetes.
+The first thing you must know is the version of the K3s cluster you're going to connect to. This is important because `kubectl` is guaranteed to be compatible only with its own correlative version or those that are at one _minor_ version of difference from it.
+
+For instance, at the time of writing this chapter, the latest `kubectl` _minor_ version is 1.34, meaning that it's compatible with the 1.33, 1.34 and future 1.35 versions of the Kubernetes api. K3s follows the same versioning system, since it is "just" a particular distribution of Kubernetes.
-To check the version of your K3s cluster, connect to your `k3sserver01` **serve node** and execute the following `k3s` command.
+Open a shell to your `k3sserver01` server node and check your K3s software version with this `k3s` command:
-~~~bash
+~~~sh
$ sudo k3s --version
-k3s version v1.22.3+k3s1 (61a2aab2)
-go version go1.16.8
+k3s version v1.33.4+k3s1 (148243c4)
+go version go1.24.5
~~~
-The line `k3s version` is the one you have to pay attention to and, in particular, read the `v1.22.3` part. This K3s server node is running Kubernetes version `1.22.3`, and you can connect to it with the latest `1.22` version you can get of the `kubectl` command.
+Look at the `k3s version` line and read the `v1.33.4` part. This K3s server node is running Kubernetes version `1.33.4`, and you can connect to it with the latest `1.34` version you can get of the `kubectl` command.
-To know which is the latest stable release of Kubernetes, check [this `stable.txt` file](https://dl.k8s.io/release/stable.txt) online. It just contains a version string which, at the time of writing this, is `v1.22.4`.
+To know which is the latest stable release of Kubernetes, check [this `stable.txt` file](https://dl.k8s.io/release/stable.txt) online. It just contains a version string which, at the time of writing this, is `v1.34.1`.
## Installing `kubectl` on your client system
-Now you know the version of `kubectl` you have to put in your client system, but first you must prepare it to install the command. First of all, don't install `kubectl` with a software manager like apt or yum. This is to avoid that a regular update changes your version of the command to an uncompatible one with your cluster, or not being able to upgrade `kubectl` because some reason of other. So, better make a manual, non system-wide installation of `kubectl` in your client computer.
+Your client system has to be prepared before you can install the `kubectl` command in it. First of all, **do not install `kubectl` with a software manager like apt or yum**. This is to avoid that a regular update changes your version of the command to an uncompatible one with your cluster, or not being able to upgrade `kubectl` because some reason of other. So, better make a manual, non system-wide installation of `kubectl` in your client computer.
-Said that, let's proceed with this manual installation of `kubectl`.
+Said that, let's proceed with this manual installation of `kubectl`:
-1. Get into your client system as your preferred user and open a shell terminal in it. Then, remaining in the $HOME directory of your user, execute the following `mkdir` command.
+1. Get into your Linux client system as your preferred user and open a shell terminal in it. Then, remaining in the $HOME directory of your user, execute the following `mkdir` command:
- ~~~bash
+ ~~~sh
$ mkdir -p $HOME/bin $HOME/.kube
~~~
- The folders I've created are the following.
+ These new folders are created for specific reasons:
- - `$HOME/bin`: in some Linux systems like Debian, this directory is already in the `$PATH` of any user, but has to be created.
- - `$HOME/.kube`: where the kubectl command will look for the configuration file to connect to the K3s cluster.
+ - `$HOME/bin`\
+ Folder where you have to put the `kubectl` command to make it available only for your user. In some Linux systems like Debian, this directory is already specified in the user's `$PATH`, but must be explicitly created.
-2. Download the `kubectl` command in your `$HOME/bin/kubectl-bin` folder with `curl` as follows.
+ - `$HOME/.kube`\
+ This folder is where the `kubectl` command looks by default for the configuration file to connect to the Kubernetes cluster.
- ~~~bash
+2. Download the `kubectl` command in your `$HOME/bin/kubectl-bin` folder with `curl` as follows:
+
+ ~~~sh
$ curl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -o $HOME/bin/kubectl
~~~
- Check with `ls` the file you've downloaded.
+ Check with `ls` the file you have just downloaded:
- ~~~bash
+ ~~~sh
$ ls bin
kubectl
~~~
-3. Adjust the permission of this file so only your current user can execute it.
+3. Adjust the kubectl file permissions to make your current user the only one who can execute it:
- ~~~bash
+ ~~~sh
$ chmod 700 $HOME/bin/kubectl
~~~
-At this point, **DON'T** execute the `kubectl` command yet! You still need to get the configuration for connecting with the cluster, so keep reading.
+At this point, **DO NOT execute the `kubectl` command yet!** You still need to get the configuration for connecting with your K3s cluster, so keep on reading this chapter.
## Getting the configuration for accessing the K3s cluster
-The configuration file you need is inside the server nodes of your K3s cluster.
+The configuration file you need is inside the server node of your K3s cluster:
-1. Get into your K3s cluster's server node, and open the `/etc/rancher/k3s/k3s.yaml` file in it. It should look like below.
+1. Get into your K3s cluster's server node, and open the `/etc/rancher/k3s/k3s.yaml` file in it. It should look like this:
~~~yaml
apiVersion: v1
@@ -92,154 +118,152 @@ The configuration file you need is inside the server nodes of your K3s cluster.
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpCTkxJK0ZtNFNqZUlqUFBiSnFNRWlDWmtuU1dJL0JOYnNWWVM1VkhydTZvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMm1mQ1FYeDFaOWNUMTZmcUMweVRLQVhZdmxXNEVGME1GVTVtSjRHV1llbkFaOVhFclNMSApCZk5sNEVzbDk4b2FTa3VnVUREMGg5SHFCb0VPeVVVWnVRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
~~~
-2. Copy the `k3s.yaml` file in your client system, then move it to the `.kube` folder in your user's `$HOME` path but renamed as `config`.
+2. Copy the whole `k3s.yaml` file into your user's `$HOME/.kube` folder within your client system, but renamed as `config`:
- ~~~bash
- $ mv k3s.yaml $HOME/.kube/config
+ ~~~sh
+ $ mv $HOME/.kube/k3s.yaml $HOME/.kube/config
~~~
Then adjust the permissions and ownership of the `config` file as follows.
- ~~~bash
+ ~~~sh
$ chmod 640 $HOME/.kube/config
$ chown youruser:yourusergroup $HOME/.kube/config
~~~
- - Alternatively, you could just create the `config` file, then paste the contents of the `k3s.yaml` file in it.
+ - Alternatively, you could just create an empty `config` file, then paste the contents of the `k3s.yaml` file in it:
- ~~~bash
- $ touch $HOME/.kube/config ; chmod 640 $HOME/.kube/config
- ~~~
+ ~~~sh
+ $ touch $HOME/.kube/config ; chmod 640 $HOME/.kube/config
+ ~~~
-3. Edit the `config` file and edit the `server:` line present there. You'll have to replace the url with the external IP and port of the K3s server node from which you got the configuration file. For instance, it could be `https://192.168.1.21:6443` (the `k3sserver01` node in this guide series).
+3. Edit the `config` file and edit the `server:` line present there. You'll have to replace the url with the external IP and port of the K3s server node from which you got the configuration file. For instance, it could be `https://10.4.1.1:6443` (the `k3sserver01` node in this guide):
~~~yaml
...
- server: https://192.168.1.21:6443
+ server: https://10.4.1.1:6443
...
~~~
- Save the change to the file.
+ Save the change.
## Opening the `6443` port in the K3s server node
-Now is the moment to open the `6443` port on the external IPs of your K3s server node, but only for the IP of your `kubectl` client. I'll list you briefly the steps to do so.
-
-1. Go to your Proxmox VE web console, then go to the `Datacenter > Firewall > Alias` view. There, add your client's IP as a new alias, but giving it a significant name. For instance, you could use the following schema.
-
- ~~~bash
- ext_sys_[client's hostname]_net[network interface number]
- ~~~
+This is the moment for opening the `6443` port on the external IPs of your K3s server node:
- If the client happened to be called `kubectl_client`, the alias would be named `ext_sys_kubectl_client_net0`.
+1. Login into your Proxmox VE web console, then browse to the `Datacenter > Firewall > Security Group` page. Add to the `k3s_srvrs_net0_in` security group the following rule.
-2. Browse to the `Datacenter > Firewall > IPSet` page and create a new IP set called `kubectl_ext_clients`. Add to it the alias created in the previous step.
+ - Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `local_network_ips`, Dest. port `6443`, Comment `K3s API server port open externally for LAN kubectl clients`.
-3. Go to the `Datacenter > Firewall > Security Group` page. Add to the `k3s_srvrs_net0_in` security group the following rule.
+ The `local_network_ips` source is an IP set you already created back [in the previous chapter **G025**](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#k3s-node-vms-firewall-setup). This allows you connect as a `kubectl` client from any IP in your local network, something very convenient when your router uses a dynamic IP assignment for your devices (which is the most common setup in household LANs).
- - Type `in`, Action `ACCEPT`, Protocol `tcp`, Source `kubectl_ext_clients`, Dest. port `6443`, Comment `K3s api server port open externally for kubectl external clients`.
+ The security group should look like this:
- The security group should end looking like in the next screenshot.
+ 
- 
+ > [!NOTE]
+ > **Consider a more restrictive access to the `6443` port by using static IPs in your local network**\
+ > If you only want a specific set of devices to be able to act as `kubectl` clients, you will have to assign them static IPs in your LAN. Then, in your Proxmox VE system you should make an alias for each of those static IPs and put those alias all in the same IP set. Then, set that IP set as the source of the rule you have stablished in this step.
-4. To verify that you can connect to the cluster, try in your client the `kubectl cluster-info` command.
+2. To verify that you can connect to the cluster, try the `kubectl cluster-info` command from your client:
- ~~~bash
- $ kubectl cluster-info
- Kubernetes control plane is running at https://192.168.1.21:6443
- CoreDNS is running at https://192.168.1.21:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
- Metrics-server is running at https://192.168.1.21:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
+ ~~~sh
+ $ kubectl cluster-info
+ Kubernetes control plane is running at https://10.4.1.1:6443
+ CoreDNS is running at https://10.4.1.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
~~~
-And this is it! From now on, you can execute any `kubectl` command you want on your cluster from this particular client.
+ This output confirms that you can start managing your cluster remotely from your `kubectl` client.
## Enabling bash autocompletion for `kubectl`
-If you are using bash in your client system, you can enable the bash autocompletion for `kubectl`.
+If you are using bash in your client system, you can enable the bash autocompletion for `kubectl`:
-1. Open a terminal in your client system and do the following.
+1. Open a terminal in your client system and do the following:
- ~~~bash
+ ~~~sh
$ sudo touch /etc/bash_completion.d/kubectl
$ kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl
~~~
-2. Then, execute the following `source` command to enable the new bash autocompletion rules.
+2. Then, execute the following `source` command to enable the new bash autocompletion rules:
- ~~~bash
+ ~~~sh
source ~/.bashrc
~~~
-## **Kubeval**, tool for validating Kubernetes configuration files
+## Validate Kubernetes configuration files with `kubeconform`
-Since from now on you're going to deal with Kubernetes configuration files, you'd like to know if they're valid **before** you apply them in your K3s cluster. To help you with this task, there's a command line tool called [**Kubeval**](https://www.kubeval.com/) that you can install in your `kubectl` client system [as follows](https://www.kubeval.com/installation/).
+Since from now on you're going to deal with Kubernetes configuration files, you would like to know if they're valid before applying them in your K3s cluster. To help you with this task, there is a command line tool called [**kubeconform**](https://github.com/yannh/kubeconform) that you can install in your `kubectl` client system [as follows](https://github.com/yannh/kubeconform?tab=readme-ov-file#Installation).
-1. Download the compressed package containing the executable in your `$HOME/bin` directory (which you created already during the `kubectl` setup).
+1. Download the compressed package containing the executable in your `$HOME/bin` directory (which you created already during the `kubectl` setup):
- ~~~bash
- $ cd $HOME/bin ; wget https://github.com/instrumenta/kubeval/releases/latest/download/kubeval-linux-amd64.tar.gz
+ ~~~sh
+ $ cd $HOME/bin ; wget https://github.com/yannh/kubeconform/releases/download/v0.7.0/kubeconform-linux-amd64.tar.gz
~~~
-2. Unpackage the `tar.gz`'s contents.
+2. Unpack the `tar.gz` file:
- ~~~bash
- $ tar xvf kubeval-linux-amd64.tar.gz
+ ~~~sh
+ $ tar xvf kubeconform-linux-amd64.tar.gz
~~~
-3. The `tar` command will extract three files. One is the `kubeval` command, the other two are a `LICENSE` and a `README.md` file that you can delete together with the `tar.gz`.
+3. The `tar` command will extract two files. One is the `kubeconform` command, the other is the `LICENSE` that you can delete together with the `tar.gz`:
- ~~~bash
- $ rm kubeval-linux-amd64.tar.gz LICENSE README.md
+ ~~~sh
+ $ rm kubeconform-linux-amd64.tar.gz LICENSE
~~~
-4. The kubeval command already comes enabled for execution, but you might like to restrict its permission mode so only your user can execute it.
+4. The `kubeconform` command already comes enabled for execution, but you might like to restrict its permission mode so only your user can execute it:
- ~~~bash
- $ chmod 700 kubeval
+ ~~~sh
+ $ chmod 700 kubeconform
~~~
5. Test the command by getting its version.
- ~~~bash
- $ kubeval --version
- Version: 0.16.1
- Commit: f5dba6b486fa18b9179b91e15eb6f2b0f7a5a69e
- Date: 2021-03-30T15:17:06Z
+ ~~~sh
+ $ kubeconform -v
+ v0.7.0
~~~
-> **BEWARE!**
-> Theres's an important issue with this command, as of its `0.16.1` version at least. It cannot validate the Kubernetes kustomize `kustomization.yaml` files (which you'll use a lot in upcoming guides) properly and returns errors where it shouldn't while checking them.
-
## Relevant system paths
-### _Folders in client system_
+### Folders in client system
- `$HOME`
- `$HOME/.kube`
- `$HOME/bin`
-### _Files in client system_
+### Files in client system
- `$HOME/.kube/config`
- `$HOME/bin/kubectl`
-- `$HOME/bin/kubeval`
+- `$HOME/bin/kubeconform`
-### _Folder in K3s server node_
+### Folder in K3s server node
- `/etc/rancher/k3s`
-### _File in K3s server node_
+### File in K3s server node
- `/etc/rancher/k3s/k3s.yaml`
## References
-- [Install and Set Up `kubectl` on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
-- [K3s cluster access](https://rancher.com/docs/k3s/latest/en/cluster-access/)
-- [Kubeval](https://www.kubeval.com/)
+### [Kubernetes](https://kubernetes.io/)
+
+- [Install and Set Up kubectl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
+
+### [K3s](https://docs.k3s.io/)
+
+- [Cluster Access](https://docs.k3s.io/cluster-access)
+
+### [kubeconform](https://github.com/yannh/kubeconform)
+
+- [Installation](https://github.com/yannh/kubeconform?tab=readme-ov-file#Installation)
## Navigation
diff --git a/G027 - K3s cluster setup 10 ~ Deploying the MetalLB load balancer.md b/G027 - K3s cluster setup 10 ~ Deploying the MetalLB load balancer.md
index 6b67d5e..1fa0fe8 100644
--- a/G027 - K3s cluster setup 10 ~ Deploying the MetalLB load balancer.md
+++ b/G027 - K3s cluster setup 10 ~ Deploying the MetalLB load balancer.md
@@ -1,98 +1,170 @@
# G027 - K3s cluster setup 10 ~ Deploying the MetalLB load balancer
-You've got your K3s cluster up and running, but it's missing a crucial component: a load balancer. I told you in the previous [**G025** guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#the-k3sserver01-nodes-configyaml-file) to disable the default one because here you'll deploy a more capable and popular alternative called [MetalLB](https://metallb.universe.tf/).
+- [MetalLB as the load balancer of choice](#metallb-as-the-load-balancer-of-choice)
+- [Considerations before deploying MetalLB](#considerations-before-deploying-metallb)
+ - [Choosing the right mode of operation for MetalLB](#choosing-the-right-mode-of-operation-for-metallb)
+ - [Reserve an IP range for services](#reserve-an-ip-range-for-services)
+ - [Ports used by MetalLB](#ports-used-by-metallb)
+ - [Deploying from an external `kubectl` client](#deploying-from-an-external-kubectl-client)
+- [Choosing the IP ranges for MetalLB](#choosing-the-ip-ranges-for-metallb)
+- [Deploying MetalLB on your K3s cluster](#deploying-metallb-on-your-k3s-cluster)
+ - [Preparing the Kustomize folder structure](#preparing-the-kustomize-folder-structure)
+ - [Setting up the configuration files](#setting-up-the-configuration-files)
+ - [Deploying MetalLB](#deploying-metallb)
+- [MetalLB's Kustomize project attached to this guide](#metallbs-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders on remote kubectl client](#folders-on-remote-kubectl-client)
+ - [Files on remote kubectl client](#files-on-remote-kubectl-client)
+- [References](#references)
+ - [MetalLB](#metallb)
+ - [Kustomize](#kustomize)
+- [Navigation](#navigation)
+
+## MetalLB as the load balancer of choice
+
+You have your K3s cluster up and running, but it is missing a crucial component: a load balancer.
+
+I told you in the previous [chapter **G025**](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#the-k3sserver01-nodes-configyaml-file) to disable the default one because here you will deploy a more capable and popular alternative called [MetalLB](https://metallb.io/).
## Considerations before deploying MetalLB
Before you can deploy MetalLB in your K3s cluster, there are certain points you must consider first.
-> **BEWARE!**
-> The configuration shown here is only valid for MetalLB versions **previous** to the `0.13.0` release. From that version onwards, the configuration has to be set in a different way. This doesn't meant that this whole guide is invalid, is just a change in how the configuration is specified to MetalLB. I'll remind you about this issue later in this guide, so you can apply the proper correction if you're using the `0.13.0` or superior version of MetalLB.
+### Choosing the right mode of operation for MetalLB
-### _Choosing the right mode of operation for MetalLB_
+MetalLB can work in one of these two modes:
-MetalLB can work in one of two modes: [**layer 2**](https://metallb.universe.tf/concepts/layer2/) or [**BGP**](https://metallb.universe.tf/concepts/bgp/). The layer 2 option is the one that fits your K3s cluster, and is the most simple and straightforward to configure and run. BGP, on the other hand, requires a more complex setup (including network traffic routing) more appropriate for large Kubernetes clusters.
+- [**Layer 2 (_L2_)**](https://metallb.io/concepts/layer2/).
+- [**BGP**](https://metallb.io/concepts/bgp/).
-### _Reserve an IP range for services_
+The layer 2 option is the one that fits your K3s cluster, and is the most simple and straightforward mode to configure and run. BGP, on the other hand, requires a more complex setup (including network traffic routing) more appropriate for large Kubernetes clusters.
-You need to have a range, a continuous one if possible, of free IPs in your network. MetalLB, in layer 2 mode, will then assign IPs to each app you expose directly through it. This is to avoid collisions between services that happen to use the same ports, like the widely used 80 or 443. There's also the possibility of assigning just one IP to the load balancer, but it would imply micromanaging the ports of each service you deploy in your K3s cluster.
+### Reserve an IP range for services
-On the other hand, remember that you've setup your cluster to use two networks, one for internal communications and other to face the external network. You'll only have to reserve an IP range in your external network, since the internal communications will remain within your cluster. You'll have to ensure having enough IPs available for your services, something that could be problematic in your external network, since it's also where your other devices are connecting to. So, if you haven't done it already at this point, organize your external network by assigning static IPs to all your devices, and clear a range of IPs that MetalLB can then use freely.
+You need to reserve a range, continuous if possible, of free IP addresses in your network. MetalLB, in layer 2 mode, will then assign IPs to each app you expose directly through it. This is to avoid collisions between services that happen to use the same ports, like the widely used 80 or 443. There is also the possibility of assigning just one IP to the load balancer, but it would imply micromanaging the ports of each service you deploy in your K3s cluster.
-### _Ports used by MetalLB_
+On the other hand, remember that you have configured your cluster to use two networks, one for internal communications and other to face the external network. You only have to reserve an IP range in your external network (your LAN), since the internal communications will remain within your cluster. You have to ensure having enough IPs available for your services, something that could be problematic in your external network, since it is also where your other devices are connecting to.
-MetalLB requires the `7946` port open both in TCP and UDP in all the nodes of your cluster, but only for internal communications among the MetalLB-related processes running on your cluster nodes. So, this `7946` port will be seen only in the internal network that runs through your isolated `vmbr1` bridge. This means that you don't have to worry about adding specific firewall rules to open this port on your K3s cluster nodes.
+You have two ways to deal with the issue of possible IP conflicts between your devices and the apps exposed to your LAN by MetalLB:
-### _Deploying from an external `kubectl` client_
+- **Assign static IPs to all devices in your LAN**\
+ Doable although cumbersome since this demands the manual handling of all IP assignments in your LAN. Still, this is the one that can almost (if you also leave the dynamic IP assignment enabled, conflicts may still happen) guarantee that your devices and apps will not collide in their IP assignments. If you opt to this method, be sure of clearing a range of IPs in your router (meaning, do not assign any IP from that range to any device) that MetalLB can use freely.
-In the previous [**G026** guide](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md) you've seen how to prepare an external `kubectl` client system for managing remotely your K3s cluster. Just don't forget to have such client ready and **always** use it for handling your K3s cluster. This guide and the following ones will assume that you're using this `kubectl` client system.
+- **Making your private network assign IPs from the `10.0.0.0/8` range**\
+ As I already explained [back in chapter **G025**](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md#criteria-for-ips), for my LAN I opted to use the biggest IPv4 range available for private networks: `10.0.0.0/8`. Still, this measure only mitigates the possibility of conflict between a device and an app exposed by MetalLB (or just with another device). This also depends on how capable your LAN's router is handling IP assignments. The good thing is that you do not have to manage manually the IPs assigned to your devices.
+
+In my case, I opted to "risk it" and stick with the dynamic IP assignment to allow devices to connect with randomized MACs, which is the default behavior nowadays.
+
+### Ports used by MetalLB
+
+When using the L2 operating mode, MetalLB requires the `7946` port open both in TCP and UDP in all the nodes of your cluster, but only for internal communications among the MetalLB-related processes running on your cluster nodes. So, this `7946` port will be seen only in the internal network that runs through your isolated `vmbr1` bridge. This means that you do not have to worry about adding specific firewall rules to open this port on your K3s cluster nodes.
+
+### Deploying from an external `kubectl` client
+
+In the previous [chapter **G026**](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md) you prepared an external `kubectl` client system for managing remotely your K3s cluster. Just don't forget to have such client ready and always use it for handling your K3s cluster. This and following chapters will assume that you're using this `kubectl` client system.
## Choosing the IP ranges for MetalLB
-You have to choose an IP range on the external network your K3s cluster is connected to. This IP range should leave out the IPs already used by the K3s nodes themselves, helping you in keeping the nodes differentiated from the services deployed in them. Also bear in mind that MetalLB links IPs to services, so when MetalLB moves a service from one node to another, the IP sticks to the service. So, any IP within the ranges managed by MetalLB can jump from node to node of your cluster as seen fit by the load balancer.
+You have to choose an IP range on the external network your K3s cluster is connected to. This IP range should leave out the IPs already used by the K3s nodes themselves, helping you in keeping the nodes differentiated from the services deployed in them. In this chapter, the chosen IP subrange "reserved" for MetalLB is `10.7.0.0-10.7.0.20`. Notice that it only has twenty one IPs, enough for the small number of apps or services that are going to be exposed with external IPs in later chapters of this guide.
-In the external network `192.168.1.0` used in this guide series, all the VMs created previously don't go over the IP `192.168.1.40`. Assuming that all other devices (including the Proxmox VE host) have IPs beyond `192.168.1.100`, this means that a continuous IP range available for MetalLB starts at `192.168.1.41` and can end at `192.168.1.100`.
+> [!IMPORTANT]
+> **The bigger the range, the greater the risk of having IP conflicts**\
+> In a private network where IPs are dynamically assigned to devices, you want to keep the MetalLB IP range as small as possible to reduce the chance of IP conflicts.
+
+Also bear in mind that MetalLB links IPs to services. When MetalLB moves a service from one node to another, the IP sticks to the service. Any IP within the ranges managed by MetalLB can jump from node to node of your cluster as seen fit by the load balancer.
## Deploying MetalLB on your K3s cluster
Next, I'll show you how to deploy MetalLB using `kubectl` and [Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/). **Kustomize** is the official Kubernetes tool for customizing resource configuration without using templates or other techniques as is done with tools such as Helm. Kustomize is already integrated in the `kubectl` command, so you don't need to install anything else in your client system.
-### _Preparing the Kustomize folder structure_
+### Preparing the Kustomize folder structure
-It's better to treat each deployment as an independent project with its own folder structure. On this regard, there's the _overlay_ model [shown in the official introduction to Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/#2-create-variants-using-overlays), but also I found [another one in this "best practices" article](https://www.openanalytics.eu/blog/2021/02/23/kustomize-best-practices/) meant for a repository-based organization of Kustomize projects. I'll base the folder structures for the Kustomize projects you'll see in this and upcoming guides on what is indicated in that article.
+It is better to treat each deployment as an independent project with its own folder structure. On this regard, there is the _overlay_ model [shown in the official introduction to Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/#2-create-variants-using-overlays), but also I found [another one in this "best practices" article](https://www.openanalytics.eu/blog/2021/02/23/kustomize-best-practices/) meant for a repository-based organization of Kustomize projects. I'll base the folder structures for the Kustomize projects you'll see in this and upcoming chapters on what is indicated in the "best practices" article.
-Therefore, create a folder structure for your MetalLB deployment files as follows.
+Therefore, begin by creating a folder structure for your MetalLB deployment files as follows:
-~~~bash
-$ mkdir -p $HOME/k8sprjs/metallb/configs
+~~~sh
+$ mkdir -p $HOME/k8sprjs/metallb/resources
~~~
Notice that I've created a structure of three folders:
-- `k8sprjs`: where the MetalLB and any future Kustomize projects can be kept.
-- `metallb`: for the MetalLB deployment Kustomize project.
-- `configs`: to hold MetalLB configuration files.
+- `k8sprjs`\
+ Where the MetalLB and any future Kustomize projects can be kept.
+
+- `metallb`\
+ For the MetalLB deployment Kustomize project.
-Also, needless to say that you could use any other base path instead of `$HOME` in your kubectl client system.
+- `resources`\
+ Holds MetalLB resources' configuration files.
-### _Setting up the configuration files_
+Needless to say that you could use any other base path instead of `$HOME` within your `kubectl` client system.
-Now you need to create the files that describe the deployment of MetalLB.
+### Setting up the configuration files
-1. MetalLB reads its configuration from a particular configuration file called `config`, so create a new empty one in the `configs` folder.
+Now you need to create the files that describe the MetalLB deployment:
- ~~~bash
- $ touch $HOME/k8sprjs/metallb/configs/config
+1. In the `resources` folder, create the files `l2-ip.l2advertisement.metallb.yaml` and `default-pool.ipaddresspool.metallb.yaml`:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/metallb/resources/{l2-ip.l2advertisement.metallb.yaml,default-pool.ipaddresspool.metallb.yaml}
~~~
-2. Edit the new `config` and put the following yaml lines in it.
+2. In `l2-ip.l2advertisement.metallb.yaml` you specify the operation mode and pool to use:
+
+ ~~~yaml
+ apiVersion: metallb.io/v1beta1
+ kind: L2Advertisement
+
+ metadata:
+ name: l2-ip
+ spec:
+ ipAddressPools:
+ - default-pool
+ ~~~
+
+ This YAML indicates to MetalLB the following:
+
+ - The kind `L2Advertisement` sets the mode used as L2.
+
+ - The `spec.ipAddressPool` parameter points to the pools of usable IPs. In this case it is just one named `default-pool`.
+
+3. Declare the IP address pool in `default-pool.ipaddresspool.metallb.yaml`:
~~~yaml
- address-pools:
- - name: default
- protocol: layer2
+ apiVersion: metallb.io/v1beta1
+ kind: IPAddressPool
+
+ metadata:
+ name: default-pool
+ spec:
addresses:
- - 192.168.1.41-192.168.1.80
+ - 10.7.0.0-10.7.0.20
~~~
- Above you can see how a pool of IPs named `default` is defined to operate with the `layer2` protocol and has a concrete IP range defined under the `addresses` parameter, corresponding with what you've seen detailed previously in this guide.
+ Here you have configured a simple pool of IP addresses:
+
+ - The kind `IPAddressPool` indicates that this is a MetalLB pool of IP addresses.
+
+ - The name is the same `default-pool` one indicated in the `l2-ip.l2advertisement.metallb.yaml`.
- Alternatively, you could have an address pool that include several different IP ranges, something useful if you don't have a big continuous range of IPs available in your network. For instance, you could have configured the range in the `default` pool as:
+ - The `spec.addresses` parameter is a list of IP ranges that can be expressed in different ways. This is useful when you do not have a big continuous range of IPs available in your network. For instance, you could have configured the `default-pool` IP range as:
~~~yaml
...
- addresses:
- - 192.168.1.41-192.168.1.60
- - 192.168.1.61-192.168.1.80
+ spec:
+ addresses:
+ - 10.7.0.0-10.7.0.10
+ - 10.7.0.11-10.7.0.20
~~~
-3. Next, you need to create the `kustomization.yaml` file that describes the deployment of MetalLB in `kustomize` format.
+4. Create the `kustomization.yaml` file where to describe the deployment of MetalLB in `kustomize` format:
- ~~~bash
+ ~~~sh
$ touch $HOME/k8sprjs/metallb/kustomization.yaml
~~~
-4. Edit your new `kustomization.yaml` file, filling it with the configuration lines below.
+5. Specify all the MetalLB resources declared in the previous steps in your new `kustomization.yaml` file:
~~~yaml
# MetalLB setup
@@ -102,160 +174,150 @@ Now you need to create the files that describe the deployment of MetalLB.
namespace: metallb-system
resources:
- - github.com/metallb/metallb//manifests?ref=v0.11.0
-
- configMapGenerator:
- - name: config
- files:
- - configs/config
- options:
- disableNameSuffixHash: true
+ - github.com/metallb/metallb/config/native?ref=v0.15.2
+ - resources/l2-ip.l2advertisement.metallb.yaml
+ - resources/default-pool.ipaddresspool.metallb.yaml
~~~
- There are a number of things to notice in the yaml above.
+ Things to notice in this kustomization YAML:
- The file is based on the one offered [in the official MetalLB documentation](https://metallb.org/installation/#installation-with-kustomize).
- - The `namespace` for all the MetalLB resources deployed in your K3s cluster is going to be `metallb-system`. The resources in this project that already have a `namespace` specified will get it changed to this one, and those who doesn't have one will be set to this one too.
+ - The `namespace` for all the MetalLB resources deployed in your K3s cluster is going to be `metallb-system`. The resources in this project that already have a `namespace` specified will get it changed to this one, and those who do not have one will be set to `metallb-system` too.
- - In the `resources` section you see that no manifest is called directly there, but a github url with a reference to a concrete MetalLB version: `ref=v0.11.0`.
+ - The `resources` section lists the files describing the resources used to deploy MetalLB:
- - MetalLB requires a `ConfigMap` resource with a certain IP range configuration set in it. Instead of just creating that config map with a yaml manifest, a `configMapGenerator` is used here.
- - This Kustomize functionality allows you to generate one or more Kubernetes config map resources based on particular configurations on each one of them. There's also a [`secretGenerator` functionality](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/secretgenerator/) with has the same options as the [`configMapGenerator` one](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/configmapgenerator/).
- - In this case, there's only one config map resource configured, the one required by MetalLB for running properly.
- - This config map will be named `config` and will include the contents of the `config` file you created before in the `configs` subfolder.
- - The `disableNameSuffixHash` option is for disabling the default behavior of Kustomize regarding names of config maps and secrets. It adds a suffix to the name of those resources, a hash calculated from their contents like in `config-58565bck2t`. This can be problematic because certain apps don't expect such suffix, hence cannot find their config maps or secrets. MetalLB expects the generated config map `metadata.name` to be just the `config` string, making the use of this `disableNameSuffixHash` option necessary here.
+ - The first item points to the official kustomization file of MetalLB. Notice how the url also specifies which version of MetalLB to deploy: `ref=v0.15.2`.
- > **BEWARE!**
- > From the version `0.13.0` onwards, is **not** possible to configure MetalLB with configmaps as shown here. The config map has to be transformed into custom resources (or CRs), something indicated in this official [Backward Compatibility note](https://metallb.universe.tf/#backward-compatibility). Check the [guide G912 - Appendix 12](G912%20-%20Appendix%2012%20~%20Adapting%20MetalLB%20config%20to%20CR.md) to see how to adapt the MetalLB kustomize project you've created here.
+ - The other two items point to the local YAML files you have configured previously to define the alloted IP range for MetalLB.
-5. You can check how the final deployment would look as a manifest yaml with `kubectl`.
+6. You can check how the final deployment would look as a YAML manifest with `kubectl`:
- ~~~bash
+ ~~~sh
$ kubectl kustomize $HOME/k8sprjs/metallb/ | less
~~~
- With the `kustomize` option, `kubectl` builds the whole deployment yaml manifest resulting from processing the `kustomization.yaml` file. Since the output can be quite long, it's better to append a `| less` to the command for getting a paginated view of the yaml.
-
- The command takes a moment to finish because it has to download the MetalLB manifests first, then process and combine them with the configuration file in your client system. When you finally see the result, you'll get a quite long yaml output in which you'll find the `config` file embedded as a `ConfigMap` resource like shown below.
+ With the `kustomize` option, `kubectl` builds the whole deployment YAML manifest resulting from processing the `kustomization.yaml` file. Since the output can be quite long, it is better to append a `| less` to the command for getting a paginated view of the YAML.
- ~~~yaml
- ---
- apiVersion: v1
- data:
- config: |
- address-pools:
- - name: default
- protocol: layer2
- addresses:
- - 192.168.1.41-192.168.1.80
- kind: ConfigMap
- metadata:
- name: config
- namespace: metallb-system
- ---
- ~~~
+ The command takes a moment to finish because it has to download the MetalLB manifests first, then process and combine it with the other resource files found in your client system. When you finally see the result, you'll get a quite long YAML output that embeds all of the specified resources. Furthermore, you may notice in the resulting YAML that MetalLB is prepared to look for `L2Advertisement` resources automatically, which means that you do not have to explicitly tell MetalLB which one to use.
-### _Deploying MetalLB_
+### Deploying MetalLB
-Now that you have the configuration files ready, you're just one command away from deploying MetalLB.
+Now that you have your Kustomize project ready, you're just one command away from deploying MetalLB:
-~~~bash
+~~~sh
$ kubectl apply -k $HOME/k8sprjs/metallb/
~~~
-This command will look for a `kustomization.yaml` file in the folder you tell it to process. Then, it builds the whole deployment output like with the `kustomize` option but, instead of displaying it, `kubectl` takes that yaml and directly applies it on your Kubernetes cluster. In this case, the `kubectl` command will return an output like the following.
+This command will look for a `kustomization.yaml` file in the folder you specify. Then, `kubectl` builds the whole deployment output like with the `kustomize` option but, instead of displaying it, `kubectl` takes that YAML and directly applies it on your Kubernetes cluster. In this case, the `kubectl` command will return an output like the following.
-~~~bash
+~~~sh
namespace/metallb-system created
+customresourcedefinition.apiextensions.k8s.io/bfdprofiles.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/bgpadvertisements.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/bgppeers.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/communities.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/ipaddresspools.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/l2advertisements.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/servicebgpstatuses.metallb.io created
+customresourcedefinition.apiextensions.k8s.io/servicel2statuses.metallb.io created
serviceaccount/controller created
serviceaccount/speaker created
-Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
-podsecuritypolicy.policy/controller created
-podsecuritypolicy.policy/speaker created
-role.rbac.authorization.k8s.io/config-watcher created
role.rbac.authorization.k8s.io/controller created
role.rbac.authorization.k8s.io/pod-lister created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
-rolebinding.rbac.authorization.k8s.io/config-watcher created
rolebinding.rbac.authorization.k8s.io/controller created
rolebinding.rbac.authorization.k8s.io/pod-lister created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
-configmap/config created
+configmap/metallb-excludel2 created
+secret/metallb-webhook-cert created
+service/metallb-webhook-service created
deployment.apps/controller created
daemonset.apps/speaker created
+ipaddresspool.metallb.io/default-pool created
+l2advertisement.metallb.io/l2-ip created
+validatingwebhookconfiguration.admissionregistration.k8s.io/metallb-webhook-configuration created
~~~
-The lines are merely informative about the resources created by your deployment, or sporadic warnings about deprecated apis still used by the software you're installing in your cluster. So, if you don't see a lot of warnings or just errors, the deployment can be considered successful like in the output above.
+The lines inform about the resources created by your deployment. They could also show sporadic warnings about deprecated apis still used by the software you're installing in your cluster. If you don't get a lot of warnings or, worse, errors, the deployment can be considered successful like in the output above.
+
+Give MetalLB around a couple of minutes to get ready, then check with `kubectl` that it's been deployed in your cluster.
+
+~~~sh
+$ kubectl get -n metallb-system all -o wide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod/controller-58fdf44d87-q6l7w 1/1 Running 1 (14m ago) 15m 10.42.1.5 k3sagent02
+pod/speaker-8rrkg 1/1 Running 0 15m 172.16.2.1 k3sagent01
+pod/speaker-grsdm 1/1 Running 0 15m 172.16.2.2 k3sagent02
+pod/speaker-z6dcg 1/1 Running 0 15m 172.16.1.1 k3sserver01
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+service/metallb-webhook-service ClusterIP 10.43.126.18 443/TCP 15m component=controller
-Finally, you can check out how the MetalLB service is running in your cluster.
+NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
+daemonset.apps/speaker 3 3 3 3 3 kubernetes.io/os=linux 15m speaker quay.io/metallb/speaker:v0.15.2 app=metallb,component=speaker
+
+NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
+deployment.apps/controller 1/1 1 1 15m controller quay.io/metallb/controller:v0.15.2 app=metallb,component=controller
+
+NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
+replicaset.apps/controller-58fdf44d87 1 1 1 15m controller quay.io/metallb/controller:v0.15.2 app=metallb,component=controller,pod-template-hash=58fdf44d87
-~~~bash
-$ kubectl get pods -n metallb-system
-NAME READY STATUS RESTARTS AGE
-controller-7dcc8764f4-2bc78 1/1 Running 0 9m48s
-speaker-28nbv 1/1 Running 0 9m48s
-speaker-2x2x5 1/1 Running 0 9m47s
~~~
-The MetalLB resources are all under the `metallb-system` namespace, such as its pods. On the other hand, you can already see in your services the effects of having this load balancer available.
+The MetalLB resources are all under the `metallb-system` namespace. On the other hand, you can already see the effect on your existing services of having this load balancer running in the K3s cluster.
-~~~bash
+~~~sh
$ kubectl get svc -A
-NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-default kubernetes ClusterIP 10.43.0.1 443/TCP 122m
-kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 122m
-kube-system traefik LoadBalancer 10.43.110.37 192.168.1.41 80:30963/TCP,443:32446/TCP 11m
+NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+default kubernetes ClusterIP 10.43.0.1 443/TCP 3d5h
+kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 3d5h
+kube-system traefik LoadBalancer 10.43.174.63 10.7.0.0 80:30512/TCP,443:32647/TCP 3d5h
+metallb-system metallb-webhook-service ClusterIP 10.43.126.18 443/TCP 12m
~~~
-From all the services you have running at this point in your K3s cluster, the `traefik` service is the one set with the `LoadBalancer` type. Now it has an `EXTERNAL-IP` from the `default` address pool set in the MetalLB configmap. In particular, it has got the very first available IP in the `default` pool.
+From all the services you have running at this point in your K3s cluster, the `traefik` service is the one set with the `LoadBalancer` type. Now it has an `EXTERNAL-IP` address assigned from MetalLB's `default-pool`. In particular, it has got the very first available IP (`10.7.0.0`) in the `default-pool`.
-## MetalLB's Kustomize project attached to this guide series
+## MetalLB's Kustomize project attached to this guide
-You can find the Kustomize project for this MetaLB deployment in the following attached folder.
+You can find the Kustomize project for this MetalLB deployment in the following attached folder:
-- `k8sprjs/metallb`
-
-> **BEWARE!**
-> The main `kustomization.yaml` file has the configuration shown here, but also the only one valid from MetalLB `v0.13.0` onwards, although commented out. Be mindful of which one you want to deploy.
+- [`k8sprjs/metallb`](k8sprjs/metallb/)
## Relevant system paths
-### _Folders on remote kubectl client_
+### Folders on remote kubectl client
- `$HOME/k8sprjs`
- `$HOME/k8sprjs/metallb`
-- `$HOME/k8sprjs/metallb/configs`
+- `$HOME/k8sprjs/metallb/resources`
-### _Files on remote kubectl client_
+### Files on remote kubectl client
- `$HOME/k8sprjs/metallb/kustomization.yaml`
-- `$HOME/k8sprjs/metallb/configs/config`
+- `$HOME/k8sprjs/metallb/resources/default-pool.ipaddresspool.metallb.yaml`
+- `$HOME/k8sprjs/metallb/resources/l2-ip.l2advertisement.metallb.yaml`
## References
-### _MetalLB_
+### [MetalLB](https://metallb.io/)
+
+- [GitHub. MetalLB](https://github.com/metallb/metallb)
-- [MetalLB official webpage](https://metallb.universe.tf/)
-- [MetalLB Installation](https://metallb.universe.tf/installation/)
-- [MetalLB Configuration](https://metallb.universe.tf/configuration/)
-- [MetalLB on GitHub](https://github.com/metallb/metallb)
-- [Install and configure MetalLB as a load balancer for Kubernetes](https://blog.inkubate.io/install-and-configure-metallb-as-a-load-balancer-for-kubernetes/)
-- [Running metallb in Layer 2 mode](https://www.shashankv.in/kubernetes/metallb-layer2-mode/)
-- [K8S AND METALLB: A LOADBALANCER FOR ON-PREM DEPLOYMENTS](https://starkandwayne.com/blog/k8s-and-metallb-a-loadbalancer-for-on-prem-deployments/)
-- [Kubernetes Metal LB for On-Prem / BareMetal Cluster in 10 minutes](https://medium.com/@JockDaRock/kubernetes-metal-lb-for-on-prem-baremetal-cluster-in-10-minutes-c2eaeb3fe813)
-- [Configure MetalLB In Layer 2 Mode](https://docs.bitnami.com/kubernetes/infrastructure/metallb/administration/configure-layer2-mode/)
-- [K3s – lightweight kubernetes made ready for production – Part 1](https://digitalis.io/blog/kubernetes/k3s-lightweight-kubernetes-made-ready-for-production-part-1/)
-- [How to Build a Multi-Master Kubernetes Cluster on VMware with MetalLB](https://platform9.com/blog/how-to-build-a-multi-master-cluster-on-vmware-with-metallb/)
+- [Concepts](https://metallb.io/concepts/)
+ - [MetalLB in layer 2 mode](https://metallb.io/concepts/layer2/)
+ - [MetalLB in BGP mode](https://metallb.io/concepts/bgp/)
+- [Installation](https://metallb.io/installation/)
+- [MetalLB Configuration](https://metallb.io/configuration/)
-### _Kustomize_
+### [Kustomize](https://kustomize.io/)
+- [Github. Kustomize](https://github.com/kubernetes-sigs/kustomize)
- [Introduction to Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/)
- [Declarative Management of Kubernetes Objects Using Kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/)
- [The Kustomization File](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/)
-- [Kustomize on Github](https://github.com/kubernetes-sigs/kustomize)
- [Kustomize Tutorial: Creating a Kubernetes app out of multiple pieces](https://www.mirantis.com/blog/introduction-to-kustomize-part-1-creating-a-kubernetes-app-out-of-multiple-pieces/)
- [Modify your Kubernetes manifests with Kustomize](https://opensource.com/article/21/6/kustomize-kubernetes)
- [Kustomize Best Practices](https://www.openanalytics.eu/blog/2021/02/23/kustomize-best-practices/)
diff --git a/G028 - K3s cluster setup 11 ~ Deploying the metrics-server service.md b/G028 - K3s cluster setup 11 ~ Deploying the metrics-server service.md
index c7b2c15..62105c1 100644
--- a/G028 - K3s cluster setup 11 ~ Deploying the metrics-server service.md
+++ b/G028 - K3s cluster setup 11 ~ Deploying the metrics-server service.md
@@ -1,17 +1,34 @@
# G028 - K3s cluster setup 11 ~ Deploying the metrics-server service
-Another embedded service that was disabled in the installation of your K3s cluster was the metrics-server. This service scrapes resource usage data from your cluster nodes and offers it through its API. The problem with the embedded metrics-server, and with any other embedded service included in K3s, is that you cannot change their configuration, at least not permanently (meaning manipulation through `kubectl`), beyond what's configurable through the parameters you can set to the K3s service itself.
-
-In particular, the embedded metrics-server comes with a default configuration that is not adequate for the setup of your K3s cluster. Since you cannot change the default configuration permanently, it's better to deploy the metrics-server independently in your cluster, but with the proper configuration already set in it.
+- [Deploy a metric-server service that you can fully configure](#deploy-a-metric-server-service-that-you-can-fully-configure)
+- [Checking the metrics-server's manifest](#checking-the-metrics-servers-manifest)
+- [Deployment of metrics-server](#deployment-of-metrics-server)
+- [Checking the metrics-server service](#checking-the-metrics-server-service)
+- [Metrics-server's Kustomize project attached to this guide](#metrics-servers-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders on remote kubectl client](#folders-on-remote-kubectl-client)
+ - [Files on remote kubectl client](#files-on-remote-kubectl-client)
+- [References](#references)
+ - [Kubernetes](#kubernetes)
+ - [Kubernetes Metrics Server](#kubernetes-metrics-server)
+ - [Related to Kubernetes Metrics Server](#related-to-kubernetes-metrics-server)
+- [Navigation](#navigation)
+
+## Deploy a metric-server service that you can fully configure
+
+The other embedded service disabled in your K3s cluster deployment is the metrics-server. This service scrapes resource usage data from your cluster nodes and offers it through its API. The problem with the embedded metrics-server, and with any other embedded service included in K3s, is that you cannot change their configuration directly. You can adjust what's configurable through the parameters you can set to the K3s service itself, or make manual temporary changes through `kubectl`.
+
+In particular, the embedded metrics-server comes with a default configuration that is not adequate for the setup of your K3s cluster. Since you cannot change the default configuration permanently, it is better to deploy the metrics-server independently in your cluster, but with the proper configuration already set in it.
## Checking the metrics-server's manifest
-First you would need to check out the manifest used for deploying the metrics-server and see where you have to apply the required change. This also means that you have to be aware of which version you're going to deploy in your cluster. K3s `v1.22.1+k3s1` comes with the `v0.5.0` release of metrics-server but, at the time of writing this, there's already a `v0.5.2` available which is the one you'll see deployed in this guide.
+First you would need to check out the manifest used for deploying the metrics-server and see where you have to apply the required change. This also means that you have to be aware of which version you're going to deploy in your cluster. K3s `v1.33.4+k3s1` comes with the `v0.8.0` release of metrics-server which is, at the time of writing this, the latest version available.
-> **BEWARE!**
-> As with any other software, each release of any service comes with its own particularities regarding compatibilities, in particular with the Kubernetes engine you have in your cluster. Always check that the release of a software you want to deploy in your cluster is compatible with the Kubernetes version running your cluster.
+> [!IMPORTANT]
+> **Ensure the service's version is compatible with your cluster's Kubernetes version**\
+> Each release of any service comes with its own particularities regarding compatibilities, in particular with your cluster's Kubernetes engine. Always check that the release of a software you want to deploy in your cluster is compatible with the Kubernetes version running your cluster.
-You'll find the yaml manifest for metrics-server `v0.5.2` in the **Assets** section found [at this Github release page](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2). It's the `components.yaml` file. Download and open it, then look for the `Deployment` object in it. It should be as the yaml below.
+Download the `components.yaml` manifest for metrics-server `v0.8.0` from the **Assets** section found [at this Github release page](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.8.0). Open it and look for the `Deployment` object declared in it:
~~~yaml
---
@@ -37,11 +54,11 @@ spec:
containers:
- args:
- --cert-dir=/tmp
- - --secure-port=4443
+ - --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
+ image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -52,7 +69,7 @@ spec:
periodSeconds: 10
name: metrics-server
ports:
- - containerPort: 4443
+ - containerPort: 10250
name: https
protocol: TCP
readinessProbe:
@@ -68,9 +85,15 @@ spec:
cpu: 100m
memory: 200Mi
securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp-dir
@@ -84,35 +107,29 @@ spec:
---
~~~
-This is the object you'll need to modify to adapt metrics-server to your particular cluster setup. Some of the values will also be taken from the yaml manifest used by K3s to deploy this service, a yaml you'll find in [the K3s GitHub page](https://github.com/k3s-io/k3s/blob/master/manifests/metrics-server/metrics-server-deployment.yaml).
+This is the object you need to modify to adapt metrics-server to your particular cluster setup. You also have to take some values from the yaml manifest used to deploy this service embedded in K3s, a yaml you can find in [the K3s GitHub page](https://github.com/k3s-io/k3s/blob/master/manifests/metrics-server/metrics-server-deployment.yaml).
## Deployment of metrics-server
-As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#deploying-metallb-on-your-k3s-cluster), you're going to use a Kustomize project to deploy the metrics-server in your cluster.
+As you did with MetalLB in the [previous **G027** chapter](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#deploying-metallb-on-your-k3s-cluster), you are going to use a Kustomize project to deploy the metrics-server in your cluster:
-1. In your kubectl client system, create a folder structure for the Kustomize project.
+1. In your `kubectl` client system, create a folder structure for the Kustomize project:
- ~~~bash
+ ~~~sh
$ mkdir -p $HOME/k8sprjs/metrics-server/patches
~~~
- In the command above you can see that, inside the metrics-server folder, I've created a `patches` one. The idea is to patch the default configuration of the service by adding a couple of parameters.
-
-2. Create a new `metrics-server.deployment.containers.args.patch.yaml` file under the `patches` folder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/metrics-server/patches/metrics-server.deployment.containers.args.patch.yaml
- ~~~
+ In the command above you can see that, inside the `metrics-server` folder, I have created a `patches` one. The idea is to patch the default configuration of the service by adding a couple of parameters.
- Notice the structure of this yaml file's name. It has the pattern below.
+2. Create a new `metrics-server.deployment.patch.yaml` file under the `patches` folder:
- ~~~bash
- ..[extra_details].[...].yaml
+ ~~~sh
+ $ touch $HOME/k8sprjs/metrics-server/patches/metrics-server.deployment.patch.yaml
~~~
- You can use any other pattern that suits you, but try to keep the same one so the yaml files in your Kustomize projects have names that hint you about what's inside of them.
+ This file will contain only the patch to modify the metrics-server deployment object.
-3. Fill `metrics-server.deployment.containers.args.patch.yaml` with the following yaml.
+3. Declare in `metrics-server.deployment.patch.yaml` the patch for the metrics-server deployment:
~~~yaml
apiVersion: apps/v1
@@ -129,42 +146,61 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
- - key: "node-role.kubernetes.io/master"
- operator: "Exists"
- effect: "NoSchedule"
containers:
- name: metrics-server
args:
- --cert-dir=/tmp
- - --secure-port=4443
+ - --secure-port=10250
- --kubelet-preferred-address-types=InternalIP
- --kubelet-use-node-status-port
- --metric-resolution=15s
+ - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
~~~
- See how the yaml manifest contains the necessary information to identify the resource to be patched, up to the container's name, and only the values to add or modify.
+ This patch only contains the necessary information to identify the resource to be patched and the properties to add or change:
+
+ - `tolerations`\
+ This section has been taken directly [from the `Deployment` object K3s uses](https://github.com/k3s-io/k3s/blob/master/manifests/metrics-server/metrics-server-deployment.yaml) to deploy its embedded metrics-server. These [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) will make the metrics-server pod to be scheduled or not (`effect: "NoSchedule"`) in nodes that are tainted with those keys.
- - The `tolerations` section has been taken directly [from the Deployment object K3s uses](https://github.com/k3s-io/k3s/blob/master/manifests/metrics-server/metrics-server-deployment.yaml) to deploy metrics-server. These [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) will make the metrics-server pod to be scheduled or not (`effect: "NoSchedule"`) in nodes that are tainted with those keys. For instance, remember that the server node is tainted with `"k3s-controlplane=true:NoExecute"` which restricts what pods can run on it, also excluding the metrics-server one.
+ For instance, since the server node is tainted with `"k3s-controlplane=true:NoExecute"` it will not run a pod for the metrics-server service (nor for other regular apps or services).
- - `--cert-dir`: apparently, a directory for certificates, although I haven't found a proper explanation for this parameter.
+ - `args`\
+ Under this section are configured certain parameters that affect how the metric-server service runs:
- - `--secure-port`: the https port used to connect to the metrics-server server.
+ - `--cert-dir`\
+ The directory where the TLS certs are located for this service. Here set to a temporary folder, proper for a containerized service.
- - `--kubelet-preferred-address-types`: indicates the priority of node address types used when determining an address for connecting to a particular node. In your cluster's case, the only one that is really needed is the internal IP, so that's the only option specified. The possible values are `Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP`.
+ - `--secure-port`\
+ The port on which to serve HTTPS with authentication and authorization. Here is set to the same one used to connect to kubelets.
- - `--kubelet-use-node-status-port`: I haven't found a proper explanation for this parameter, but by the name it seems that makes the metrics-server check the status port (`10250` by default) that a kubelet process opens in the node where it runs.
+ - `--kubelet-preferred-address-types`\
+ Priority of node address types used when determining an address for connecting to a particular node. In your K3s cluster's case, the only one that is really needed is the internal IP.
- - `--metric-resolution`: the metrics-server service will scrape the resource usage stats from the kubelets every this time interval. By default, it's 60 seconds.
+ By only setting the `InternalIP` value, you ensure that metrics-server only communicates through the isolated secondary network you have in your setup.
- Notice that the parameters in the `args` list are exactly the same as in the original `components.yaml` file, except for the `kubelet-preferred-address-types`. This one is set just with the `InternalIP` value to ensure that metrics-server only communicates through the isolated secondary network you have in your setup.
+ - `--kubelet-use-node-status-port`\
+ When enabled, it makes the metrics-server check the status port (`10250` by default) that a kubelet process opens in the node where it runs.
-4. Create the `kustomization.yaml` file.
+ - `--metric-resolution`\
+ How long the metric-server will retain the last metrics scraped from the kubelets. By default is one minute.
- ~~~bash
+ - `tls-cipher-suites`\
+ Comma-separated list of cipher suites admitted for the server. The list specified in the yaml snippet is the one K3s applies to deploy its embedded metric-server service.
+
+ > [!NOTE]
+ > [These and other metric-server flags are explained in this help document](https://github.com/kubernetes-sigs/metrics-server/blob/master/docs/command-line-flags.txt).
+
+ > [!IMPORTANT]
+ > **Review this patch whenever you update the metrics-server!**\
+ > Every time you update the metrics-server service in your setup, do not forget to see how the patched values look in the official deployment declaration of the newer version you deploy. Otherwise, you could end up having errors due to using deprecated arguments or incorrect values.
+
+4. Create the `kustomization.yaml` file:
+
+ ~~~sh
$ touch $HOME/k8sprjs/metrics-server/kustomization.yaml
~~~
-5. Put the following content in the `kustomization.yaml` file.
+5. Fill the `kustomization.yaml` file like this:
~~~yaml
# Metrics server setup
@@ -172,27 +208,25 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
kind: Kustomization
resources:
- - https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.2/components.yaml
+ - https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.8.0/components.yaml
- patchesStrategicMerge:
- - patches/metrics-server.deployment.containers.args.patch.yaml
+ patches:
+ - path: patches/metrics-server.deployment.patch.yaml
~~~
- Notice the following.
-
- - There's an `apiVersion` and a `kind` parameter specified in this `kustomization.yaml` file, unlike the one you used for deploying the MetalLB service. I haven't found an explanation about why these two parameters can be omitted for this kind of object.
+ Notice that:
- In the `resources` list you have the URL to the `components.yaml` file, although you could reference here the downloaded file too.
- - The `patchesStrategicMerge` section is probably the simplest way to patch in Kustomize, [read about it here](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/patchesstrategicmerge/). See how the `metrics-server.deployment.containers.args.patch.yaml` file is listed as the sole patch to apply here.
+ - The `patches` section is where you specify all the patches you want to apply over the resources you deploy in the Kustomize project. This section supports different ways to declare and apply patches on resources, [check them out in its official Kubernetes documentation](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/patches/). The method used here is probably the cleanest one, since it only needs specifying the path to the patch file.
-6. Test the Kustomize project with kubectl.
+6. Test the Kustomize project with `kubectl`:
- ~~~bash
+ ~~~sh
$ kubectl kustomize $HOME/k8sprjs/metrics-server | less
~~~
- In the output, look for the `Deployment` object. It should have the `args` parameters and the `tolerations` set as below.
+ In the output, look for the `Deployment` object and ensure that the `args` parameters and the `tolerations` section are set as expected:
~~~yaml
---
@@ -218,11 +252,12 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
containers:
- args:
- --cert-dir=/tmp
- - --secure-port=4443
+ - --secure-port=10250
- --kubelet-preferred-address-types=InternalIP
- --kubelet-use-node-status-port
- --metric-resolution=15s
- image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
+ - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
+ image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -233,7 +268,7 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
periodSeconds: 10
name: metrics-server
ports:
- - containerPort: 4443
+ - containerPort: 10250
name: https
protocol: TCP
readinessProbe:
@@ -249,9 +284,15 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
cpu: 100m
memory: 200Mi
securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp-dir
@@ -265,99 +306,114 @@ As you did with MetalLB in the [**G027** guide](G027%20-%20K3s%20cluster%20setup
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- - effect: NoSchedule
- key: node-role.kubernetes.io/master
- operator: Exists
volumes:
- emptyDir: {}
name: tmp-dir
---
~~~
-7. Apply this Kustomize project to finally deploy metrics-server in your cluster.
+7. Apply the Kustomize project to finally deploy metrics-server in your cluster:
- ~~~bash
+ ~~~sh
$ kubectl apply -k $HOME/k8sprjs/metrics-server/
~~~
-8. After a minute or so, check if the metrics-server pod and service is running.
+8. After a minute or so, check if the metrics-server pod and service are running:
- ~~~bash
+ ~~~sh
$ kubectl get pods,svc -n kube-system | grep metrics
- pod/metrics-server-5b45cf8dbb-nv477 1/1 Running 0 5m12s
- service/metrics-server ClusterIP 10.43.133.41 443/TCP 5m13s
+ pod/metrics-server-5f87696c77-j7zgd 1/1 Running 0 41s
+ service/metrics-server ClusterIP 10.43.50.63 443/TCP 41s
~~~
You should get two lines regarding metrics-server. Also notice that the metrics-server is set in the `kube-system` namespace.
## Checking the metrics-server service
-To see the resource usage values scraped by metrics-server, you have to use the `kubectl top` command. You can get values from nodes and from pods.
+To see the resource usage values scraped by metrics-server, you have to use the `kubectl top` command. You can get values both from nodes and pods:
-- Get values from nodes with `kubectl top node`.
+- Get values from nodes with `kubectl top node`:
- ~~~bash
- $ kubectl top node
- NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
- k3sagent01 200m 5% 347Mi 17%
- k3sagent02 166m 4% 335Mi 21%
- k3sserver01 351m 8% 647Mi 49%
+ ~~~sh
+ $ kubectl top node
+ NAME CPU(cores) CPU(%) MEMORY(bytes) MEMORY(%)
+ k3sagent01 64m 2% 561Mi 28%
+ k3sagent02 69m 3% 516Mi 26%
+ k3sserver01 156m 7% 785Mi 54%
~~~
-- Get values from pods with `kubectl top pods`, although always specifying a namespace (remember, pods are namespaced in Kubernetes).
+- Get values from pods with `kubectl top pods`, although always specifying a namespace (remember, pods are namespaced in Kubernetes):
- ~~~bash
+ ~~~sh
$ kubectl top pods -A
- NAMESPACE NAME CPU(cores) MEMORY(bytes)
- kube-system coredns-85cb69466-9l6ws 6m 11Mi
- kube-system local-path-provisioner-64ffb68fd-zxm2v 1m 7Mi
- kube-system metrics-server-5b45cf8dbb-nv477 15m 16Mi
- kube-system traefik-74dd4975f9-tdv42 2m 18Mi
- metallb-system controller-7dcc8764f4-gskm7 1m 6Mi
- metallb-system speaker-6rrwf 10m 10Mi
- metallb-system speaker-kntk2 16m 10Mi
+ NAMESPACE NAME CPU(cores) MEMORY(bytes)
+ kube-system coredns-64fd4b4794-phpd5 5m 14Mi
+ kube-system local-path-provisioner-774c6665dc-bzp9n 1m 8Mi
+ kube-system metrics-server-5f87696c77-j7zgd 7m 18Mi
+ kube-system traefik-c98fdf6fb-z87fh 1m 28Mi
+ metallb-system controller-58fdf44d87-kfc2f 5m 24Mi
+ metallb-system speaker-jqcxt 12m 24Mi
+ metallb-system speaker-v7qkb 12m 24Mi
+ metallb-system speaker-xfqft 13m 61Mi
~~~
- In this case the `top pod` command has a `-A` option to get pods running in all namespaces of a cluster.
+ Here the `top pod` command has a `-A` option to get the metrics from pods running in all namespaces of the cluster.
To see all the options available for both `top` commands, use the `--help` option.
-## Metrics-server's Kustomize project attached to this guide series
+## Metrics-server's Kustomize project attached to this guide
-You can find the Kustomize project for this metrics-server deployment in the following attached folder.
+You can find the Kustomize project for this metrics-server deployment in the following attached folder:
-- `k8sprjs/metrics-server`
+- [`k8sprjs/metrics-server`](k8sprjs/metrics-server/)
## Relevant system paths
-### _Folders on remote kubectl client_
+### Folders on remote kubectl client
- `$HOME/k8sprjs`
- `$HOME/k8sprjs/metrics-server`
- `$HOME/k8sprjs/metrics-server/patches`
-### _Files on remote kubectl client_
+### Files on remote kubectl client
- `$HOME/k8sprjs/metrics-server/kustomization.yaml`
-- `$HOME/k8sprjs/metrics-server/patches/metrics-server.deployment.containers.args.patch.yaml`
+- `$HOME/k8sprjs/metrics-server/patches/metrics-server.deployment.patch.yaml`
## References
-### _Kubernetes Metrics Server_
+### [Kubernetes](https://kubernetes.io/)
+
+- [Reference. Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/)
+ - [kustomization. patches](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/patches/)
+
+- [Kubernetes Documentation. Concepts](https://kubernetes.io/docs/concepts/)
+ - [Scheduling, Preemption and Eviction. Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
+
+- [Kubernetes Documentation. Reference](https://kubernetes.io/docs/reference/)
+ - [Well-Known Labels, Annotations and Taints](https://kubernetes.io/docs/reference/labels-annotations-taints/)
+
+### [Kubernetes Metrics Server](https://github.com/kubernetes-sigs/metrics-server)
+
+- [Kubernetes Metrics Server v0.8.0 release](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.8.0)
+- [Configuration](https://github.com/kubernetes-sigs/metrics-server#configuration)
+- [command-line-flags.txt](https://github.com/kubernetes-sigs/metrics-server/blob/master/docs/command-line-flags.txt)
+- [Metrics server issue with hostname resolution of kubelet and apiserver unable to communicate with metric-server clusterIP](https://github.com/kubernetes-sigs/metrics-server/issues/131)
+
+### Related to Kubernetes Metrics Server
+
+- [K3s v1.33.4+k3s1 release](https://github.com/k3s-io/k3s/releases/tag/v1.33.4%2Bk3s1)
+
+- [How To Install Metrics Server on a Kubernetes Cluster](https://computingforgeeks.com/how-to-deploy-metrics-server-to-kubernetes-cluster/)
-- [Kubernetes Metrics Server](https://github.com/kubernetes-sigs/metrics-server)
-- [Kubernetes Metrics Server v0.5.2 release](https://github.com/kubernetes-sigs/metrics-server/releases/tag/v0.5.2)
-- [K3s v1.22.3+k3s1 release](https://github.com/k3s-io/k3s/releases/tag/v1.22.3+k3s1)
-- [Install Metrics Server on a Kubernetes Cluster](https://computingforgeeks.com/how-to-deploy-metrics-server-to-kubernetes-cluster/)
- [How to troubleshoot metrics-server on kubeadm?](https://stackoverflow.com/questions/57137683/how-to-troubleshoot-metrics-server-on-kubeadm)
+
- [[learner] Debugging issue with metrics-server](https://www.reddit.com/r/kubernetes/comments/ktuour/learner_debugging_issue_with_metricsserver/)
-- [Metrics server issue with hostname resolution of kubelet and apiserver unable to communicate with metric-server clusterIP](https://github.com/kubernetes-sigs/metrics-server/issues/131)
+
- [The case of disappearing metrics in Kubernetes](https://dev.to/shyamala_u/the-case-of-disappearing-metrics-in-kubernetes-1kdh)
-- [Kubernetes Metrics Server. Configuration](https://github.com/kubernetes-sigs/metrics-server#configuration)
+
- [Query on kubernetes metrics-server metrics values](https://stackoverflow.com/questions/55684789/query-on-kubernetes-metrics-server-metrics-values)
-- [Kubernetes Pod Created with hostNetwork](https://docs.datadoghq.com/security_platform/default_rules/kubernetes-pod-created-with-hostnetwork/)
-- [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
## Navigation
-[<< Previous (**G027. K3s cluster setup 10**)](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G029. K3s cluster setup 12**) >>](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md)
+[<< Previous (**G027. K3s cluster setup 10**)](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G029. K3s cluster setup 12**) >>](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md)
diff --git a/G029 - K3s cluster setup 12 ~ Setting up cert-manager and self-signed CA.md b/G029 - K3s cluster setup 12 ~ Setting up cert-manager and self-signed CA.md
new file mode 100644
index 0000000..8774309
--- /dev/null
+++ b/G029 - K3s cluster setup 12 ~ Setting up cert-manager and self-signed CA.md
@@ -0,0 +1,544 @@
+# G029 - K3s cluster setup 12 ~ Setting up cert-manager and self-signed CA
+
+- [Use cert-manager to handle certificates in your cluster](#use-cert-manager-to-handle-certificates-in-your-cluster)
+- [Deploying cert-manager](#deploying-cert-manager)
+ - [Verifying the deployment of cert-manager](#verifying-the-deployment-of-cert-manager)
+ - [Installing the cert-manager plugin in your `kubectl` client system](#installing-the-cert-manager-plugin-in-your-kubectl-client-system)
+- [Setting up a self-signed CA for your cluster](#setting-up-a-self-signed-ca-for-your-cluster)
+- [Checking your certificates with the cert-manager command line tool](#checking-your-certificates-with-the-cert-manager-command-line-tool)
+- [Cert-manager's Kustomize project attached to this guide](#cert-managers-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders in `kubectl` client system](#folders-in-kubectl-client-system)
+ - [Files in `kubectl` client system](#files-in-kubectl-client-system)
+- [References](#references)
+ - [cert-manager](#cert-manager)
+ - [About setting up cert-manager](#about-setting-up-cert-manager)
+ - [About the self-signed CA](#about-the-self-signed-ca)
+- [Navigation](#navigation)
+
+## Use cert-manager to handle certificates in your cluster
+
+Although Traefik has some capabilities to handle certificates, it's better to use a service specialized on such task. Enter **cert-manager**, a popular certificate management service in the Kubernetes landscape.
+
+## Deploying cert-manager
+
+At the time of writing this, there is no official Kustomize way for deploying cert-manager. The closest method is by applying a YAML manifest, but you can build your own Kustomize procedure with it (as you've done for the `metrics-server` deployment in the [previous **G028** guide](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#deployment-of-metrics-server)).
+
+1. In your `kubectl` client system, create a folder structure for the cert-manager deployment project:
+
+ ~~~sh
+ $ mkdir -p $HOME/k8sprjs/cert-manager/deployment/
+ ~~~
+
+ The deployment project has to be put in its own `deployment` subfolder because, later, you will need to create another project for creating a self-signed root CA ([_Certificate Authority_](https://en.wikipedia.org/wiki/Certificate_authority)).
+
+2. Create a `kustomization.yaml` file in the `deployment` subfolder:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/cert-manager/deployment/kustomization.yaml
+ ~~~
+
+3. Declare the cert-manager setup in the `kustomization.yaml` file:
+
+ ~~~yaml
+ # cert-manager setup
+ apiVersion: kustomize.config.k8s.io/v1beta1
+ kind: Kustomization
+
+ resources:
+ - https://github.com/cert-manager/cert-manager/releases/download/v1.19.0/cert-manager.yaml
+ ~~~
+
+ > [!NOTE]
+ > **Find the URL for the newest cert-manager version [in its official installation procedure with `kubectl`](https://cert-manager.io/docs/installation/kubectl/)**\
+ > You can also find the YAML file in the assets list of [each release](https://github.com/jetstack/cert-manager/releases).
+
+4. Deploy cert-manager with `kubectl`:
+
+ ~~~sh
+ $ kubectl apply -k $HOME/k8sprjs/cert-manager/deployment/
+ ~~~
+
+ This command prints a long output of lines indicating the many components that get created in the deployment of cert-manager.
+
+### Verifying the deployment of cert-manager
+
+After the deployment has finished successfully, give it around a minute to allow cert-manager to initialize itself and start its pods. Then, you can verify that cert-manager has deployed properly just by checking if its pods are `Running`:
+
+~~~sh
+$ kubectl -n cert-manager get pods
+NAME READY STATUS RESTARTS AGE
+cert-manager-b5cc5b7c5-84x2m 1/1 Running 0 63s
+cert-manager-cainjector-7cf6557c49-2bdr2 1/1 Running 0 63s
+cert-manager-webhook-58f4cff74d-v6chb 1/1 Running 0 63s
+~~~
+
+Notice the namespace `cert-manager` specified with the `-n` option in the `kubectl` command. The cert-manager service deploys itself in its own `cert-manager` namespace.
+
+> [!NOTE]
+> **Use `kubectl` to discover the namespaces existing in your cluster**\
+> Get a list of all the existing namespaces within your K3s cluster with `kubectl` like this:
+>
+> ~~~sh
+> $ kubectl get namespaces
+> NAME STATUS AGE
+> cert-manager Active 94s
+> default Active 14d
+> kube-node-lease Active 14d
+> kube-public Active 14d
+> kube-system Active 14d
+> metallb-system Active 11d
+> ~~~
+
+### Installing the cert-manager plugin in your `kubectl` client system
+
+To help you to manage the certificates you put in your cluster, [cert-manager offers an independent command line tool](https://cert-manager.io/docs/reference/cmctl/). You have to install it in your `kubectl` client system, then add it to your user's `$PATH`.
+
+You can install this cert-manager command line tool in a `kubectl` client system like the one configured in the [**G026** chapter](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md) as follows:
+
+1. From the [cert-manager command line tool GitHub releases page](https://github.com/cert-manager/cmctl/releases), download the `tar.gz` file of the latest release available (v2.3.0 when writing this):
+
+ ~~~sh
+ $ wget https://github.com/cert-manager/cmctl/releases/download/v2.3.0/cmctl_linux_amd64.tar.gz -O $HOME/bin/cmctl_linux_amd64.tar.gz
+ ~~~
+
+2. Extract the content of the downloaded `cmctl_linux_amd64.tar.gz`:
+
+ ~~~sh
+ $ cd $HOME/bin
+ $ tar xf cmctl_linux_amd64.tar.gz
+ ~~~
+
+ This will extract these files:
+
+ - The command line tool as a `cmctl` binary.
+
+ - A `LICENSE` and a `README.md` file that you can remove together with the `cmctl_linux_amd64.tar.gz`:
+
+ ~~~sh
+ $ rm cmctl_linux_amd64.tar.gz LICENSE README.md
+ ~~~
+
+3. Restrict the binary's permissions:
+
+ ~~~sh
+ $ chmod 700 cmctl
+ ~~~
+
+4. Make a symbolic link to the new `cmctl` command named `kubectl-cert_manager`:
+
+ ~~~sh
+ $ ln -s cmctl kubectl-cert_manager
+ ~~~
+
+ This allows you to use the `cmctl` command as a plugin integrated with `kubectl`.
+
+5. Test `cmctl` with `kubectl` by checking its version:
+
+ ~~~sh
+ $ kubectl cert-manager version
+ Client Version: util.Version{GitVersion:"v2.3.0", GitCommit:"29b59b934c5a6f533b2d278f4541dca89d1eb288", GitTreeState:"", GoVersion:"go1.24.5", Compiler:"gc", Platform:"linux/amd64"}
+ Server Version: &versionchecker.Version{Detected:"v1.19.0", Sources:map[string]string{"crdLabelVersion":"v1.19.0"}}
+ ~~~
+
+6. You can also check if the cert-manager API is accessible:
+
+ ~~~sh
+ $ kubectl cert-manager check api
+ The cert-manager API is ready
+ ~~~
+
+Know that the cert-manager's `kubectl` plugin has other commands available, [check them out in its official page](https://cert-manager.io/docs/reference/cmctl/).
+
+## Setting up a self-signed CA for your cluster
+
+You have the tools deployed in your cluster, now you can create a self-signed CA for it. This CA will be necessary to be able to issue self-signed certificates for the other services you will deploy in later chapters of this guide:
+
+1. Create a folder structure for a Kustomize project within the already existing `cert-manager` path:
+
+ ~~~sh
+ $ mkdir -p $HOME/k8sprjs/cert-manager/certificates/resources
+ ~~~
+
+2. In the `resources` directory, create five empty YAML files:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/cert-manager/certificates/resources/{homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml,homelab.cloud-root-ca-tls.certificate.cert-manager.yaml,homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml,homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml,homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml}
+ ~~~
+
+ Each YAML will describe a particular resource required for setting up the root CA properly.
+
+3. In the `resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml` file, configure the self-signed root CA `ClusterIssuer` for your entire cluster:
+
+ ~~~yaml
+ # Self-signed cluster-wide issuer for the root CA's certificate
+ apiVersion: cert-manager.io/v1
+ kind: ClusterIssuer
+
+ metadata:
+ name: homelab.cloud-root-ca-issuer-selfsigned
+ spec:
+ selfSigned: {}
+ ~~~
+
+ This cluster issuer will be dedicated only to "self-sign" your root CA's certificate:
+
+ - The `apiVersion` points to the cert-manager API, not to the Kubernetes one.
+
+ - The `kind` is `ClusterIssuer` (a cert-manager kind, not a Kubernetes one), meaning this particular issuer will be available for all the namespaces in your cluster.
+
+ - The `name` is a descriptive string, like the YAML filename.
+
+ - Within the `spec` section, you see the empty parameter `selfSigned`. This means that this issuer is of the simplest type you can have, the self-signed one. **It is not trusted by browsers**, but it is enough to generate certificates that you can use within your own local or home network.
+
+4. Issue a certificate with the self-signed root CA issuer in `resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml`:
+
+ ~~~yaml
+ # Certificate for root CA
+ apiVersion: cert-manager.io/v1
+ kind: Certificate
+
+ metadata:
+ name: homelab.cloud-root-ca-tls
+ namespace: cert-manager
+ spec:
+ isCA: true
+ commonName: homelab.cloud-root-ca-tls
+ secretName: homelab.cloud-root-ca-tls
+ duration: 8760h # 1 year
+ renewBefore: 720h # Certificates must be renewed some time before they expire (30 days)
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-root-ca-issuer-selfsigned
+ kind: ClusterIssuer
+ group: cert-manager.io
+ ~~~
+
+ To know more about all the parameters shown above, check [the cert-manager v1 api document here](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1). Still, I'll explain you some particularities of this certificate declaration:
+
+ - Many of the parameters are optional, and there are more that are not used here.
+
+ - Here the API is also a cert-manager one. Be careful of the `apiVersion` you use. Cert-manager has several, [each with its own API documentation](https://cert-manager.io/docs/reference/api-docs/).
+
+ - The `namespace` is the same one where cert-manager has been deployed. Therefore, both this certificate and its associated secret will be kept under the `cert-manager` namespace of your cluster, which is also where cert-manager looks for certificates by default.
+
+ - This certificate is not associated to any particular domain for security reasons. Root CA certificates are not meant to be exposed in any HTTPS communication, they are only used to sign other certificates.
+
+ - The `duration` determines how long the certificate lasts. Since this particular certificate is only used to sign others, you want it to last longer than the ones generated from it. On the other hand, it is also good to refresh it as frequently as possible. Therefore, a duration of one year is a good compromise, at least for a homelab environment.
+
+ - The `renewBefore` is about when to start the renewal of the certificate. This time period should be as short as possible, but always bearing in mind that the certificates derived from this one will also have to be renewed. In a small homelab setup, this period could be even shorter than the thirty days specified in the YAML above.
+
+ - The parameter `spec.isCA` allows you to turn a certificate into a Certificate Authority. When the value is `true`, you can use this certificate to sign other certificates issued by other issuers that rely on this CA's secret.
+
+ - In the `spec.privateKey` section, be careful of always having `rotationPolicy` set as `Always`. This makes cert-manager regenerate the certificate's secret rather than reusing the current one. This policy about private key rotation is also [described in the cert-manager documentation](https://cert-manager.io/docs/usage/certificate/#configuring-private-key-rotation).
+
+ - In the `spec.issuerRef` you specify the issuer of this certificate, in this case the `homelab.cloud-root-ca-issuer-selfsigned` one you created in previous steps. Be careful of always also specifying its `kind`, in particular for `ClusterIssuer` types, so you know clearly what kind of issuer you are using with each certificate.
+
+5. Declare another `ClusterIssuer` in the `resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml` file:
+
+ ~~~yaml
+ # Cluster-wide issuer using root CA's secret
+ apiVersion: cert-manager.io/v1
+ kind: ClusterIssuer
+
+ metadata:
+ name: homelab.cloud-root-ca-issuer
+ spec:
+ ca:
+ secretName: homelab.cloud-root-ca-tls
+ ~~~
+
+ This is a different cluster wide issuer that uses the root CA's `homelab.cloud-root-ca-tls` secret to issue and sign other certificates. In particular, you will use this issuer only to issue intermediate CA certificates.
+
+6. Declare a certificate for an intermediate CA in the `resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml`:
+
+ ~~~yaml
+ # Certificate for intermediate CA 01
+ apiVersion: cert-manager.io/v1
+ kind: Certificate
+
+ metadata:
+ name: homelab.cloud-intm-ca01-tls
+ namespace: cert-manager
+ spec:
+ isCA: true
+ commonName: homelab.cloud-intm-ca01-tls
+ secretName: homelab.cloud-intm-ca01-tls
+ duration: 4380h # 6 months
+ renewBefore: 360h # Certificates must be renewed some time before they expire (15 days)
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-root-ca-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io
+ ~~~
+
+ This certificate is like the one for the root CA issuer, but is meant for an _intermediate_ CA issuer. Since this certificate's secret will be the one used to issue and sign the certificates for the apps you will deploy in later chapters, it has a shorter `duration` and `renewBefore` time periods. Also notice that this certificate's name is numbered (`01`), hinting at the possibility of having more than one intermediate CA. And like the root CA's certificate, see how this certificate is not attached to any particular domain.
+
+7. Declare in `resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml` an intermediate CA cluster issuer:
+
+ ~~~yaml
+ # Cluster-wide issuer using intermediate CA 01's secret
+ apiVersion: cert-manager.io/v1
+ kind: ClusterIssuer
+
+ metadata:
+ name: homelab.cloud-intm-ca01-issuer
+ spec:
+ ca:
+ secretName: homelab.cloud-intm-ca01-tls
+ ~~~
+
+ This is the intermediate CA cluster issuer you will use to issue and sign the "leaf" certificates for the apps and services you will deploy in later chapters. In this case, this issuer uses the corresponding secret of the intermediate CA 01's certificate declared in the previous step.
+
+ > [!NOTE]
+ > **Cluster issuers can issue certificates in any namespace**\
+ > The apps and services you will deploy in later chapters of this guide are going to run in other namespaces than `cert-manager`. This makes necessary the use of a cluster issuer to issue the certificates and their corresponding secrets, since **secrets in Kubernetes are not shared among namespaces**.
+
+8. Next, create the `kustomization.yaml` file in the `certificates` folder:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/cert-manager/certificates/kustomization.yaml
+ ~~~
+
+9. Copy in `kustomization.yaml` the following yaml.
+
+ ~~~yaml
+ # Certificates deployment
+ apiVersion: kustomize.config.k8s.io/v1beta1
+ kind: Kustomization
+
+ resources:
+ - resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml
+ - resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml
+ - resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml
+ - resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml
+ - resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml
+ ~~~
+
+ The resources are ordered to ensure that the issuer created first is the root CA, then the intermediate CA 01.
+
+10. Apply the Kustomize project into your cluster:
+
+ ~~~sh
+ $ kubectl apply -k $HOME/k8sprjs/cert-manager/certificates
+ ~~~
+
+11. Confirm that the resources have been deployed in the cluster:
+
+ ~~~sh
+ $ kubectl -n kube-system get clusterissuer
+ NAME READY AGE
+ homelab.cloud-intm-ca01-issuer True 24s
+ homelab.cloud-root-ca-issuer True 24s
+ homelab.cloud-root-ca-issuer-selfsigned True 24s
+
+ $ kubectl -n cert-manager get certificates
+ NAME READY SECRET AGE
+ homelab.cloud-intm-ca01-tls True homelab.cloud-intm-ca01-tls 60s
+ homelab.cloud-root-ca-tls True homelab.cloud-root-ca-tls 59s
+
+ $ kubectl -n cert-manager get secrets
+ NAME TYPE DATA AGE
+ cert-manager-webhook-ca Opaque 3 45m
+ homelab.cloud-intm-ca01-tls kubernetes.io/tls 3 73s
+ homelab.cloud-root-ca-tls kubernetes.io/tls 3 73s
+ ~~~
+
+12. As a final verification, use `kubectl` to get a detailed description of your new issuers' current status:
+
+ ~~~sh
+ $ kubectl describe ClusterIssuer
+ Name: homelab.cloud-intm-ca01-issuer
+ Namespace:
+ Labels:
+ Annotations:
+ API Version: cert-manager.io/v1
+ Kind: ClusterIssuer
+ Metadata:
+ Creation Timestamp: 2025-10-10T10:33:47Z
+ Generation: 1
+ Resource Version: 191304
+ UID: 649e52c3-2be0-4f35-8adf-5b20d45e70ee
+ Spec:
+ Ca:
+ Secret Name: homelab.cloud-intm-ca01-tls
+ Status:
+ Conditions:
+ Last Transition Time: 2025-10-10T10:33:47Z
+ Message: Signing CA verified
+ Observed Generation: 1
+ Reason: KeyPairVerified
+ Status: True
+ Type: Ready
+ Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning ErrGetKeyPair 2m52s (x2 over 2m52s) cert-manager-clusterissuers Error getting keypair for CA issuer: secrets "homelab.cloud-intm-ca01-tls" not found
+ Warning ErrInitIssuer 2m52s (x2 over 2m52s) cert-manager-clusterissuers Error initializing issuer: secrets "homelab.cloud-intm-ca01-tls" not found
+ Normal KeyPairVerified 2m47s (x3 over 2m52s) cert-manager-clusterissuers Signing CA verified
+
+
+ Name: homelab.cloud-root-ca-issuer
+ Namespace:
+ Labels:
+ Annotations:
+ API Version: cert-manager.io/v1
+ Kind: ClusterIssuer
+ Metadata:
+ Creation Timestamp: 2025-10-10T10:33:47Z
+ Generation: 1
+ Resource Version: 191295
+ UID: bb097533-12c0-43d5-b07c-4e59b9b68113
+ Spec:
+ Ca:
+ Secret Name: homelab.cloud-root-ca-tls
+ Status:
+ Conditions:
+ Last Transition Time: 2025-10-10T10:33:47Z
+ Message: Signing CA verified
+ Observed Generation: 1
+ Reason: KeyPairVerified
+ Status: True
+ Type: Ready
+ Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning ErrGetKeyPair 2m52s (x2 over 2m52s) cert-manager-clusterissuers Error getting keypair for CA issuer: secrets "homelab.cloud-root-ca-tls" not found
+ Warning ErrInitIssuer 2m52s (x2 over 2m52s) cert-manager-clusterissuers Error initializing issuer: secrets "homelab.cloud-root-ca-tls" not found
+ Normal KeyPairVerified 2m47s (x3 over 2m52s) cert-manager-clusterissuers Signing CA verified
+
+
+ Name: homelab.cloud-root-ca-issuer-selfsigned
+ Namespace:
+ Labels:
+ Annotations:
+ API Version: cert-manager.io/v1
+ Kind: ClusterIssuer
+ Metadata:
+ Creation Timestamp: 2025-10-10T10:33:47Z
+ Generation: 1
+ Resource Version: 191265
+ UID: 74f8fc02-1d86-40d9-be3f-5965670309c0
+ Spec:
+ Self Signed:
+ Status:
+ Conditions:
+ Last Transition Time: 2025-10-10T10:33:47Z
+ Observed Generation: 1
+ Reason: IsReady
+ Status: True
+ Type: Ready
+ Events:
+ ~~~
+
+ The three issuers are ready, although the `homelab.cloud-intm-ca01-issuer` and `homelab.cloud-root-ca-issuer` ones had initialization problems (reported as `Warning` events) due probably to a delay in the creation of the secrets they use.
+
+## Checking your certificates with the cert-manager command line tool
+
+Remember that the cert-manager command line tool can help you in handling your certificates. For instance, you can execute the following command to see the status of the root CA `homelab.cloud-root-ca-tls` certificate you have created before:
+
+~~~sh
+$ kubectl cert-manager status certificate -n cert-manager homelab.cloud-root-ca-tls
+Name: homelab.cloud-root-ca-tls
+Namespace: cert-manager
+Created at: 2025-10-10T12:33:47+02:00
+Conditions:
+ Ready: True, Reason: Ready, Message: Certificate is up to date and has not expired
+DNS Names:
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Issuing 5m20s cert-manager-certificates-trigger Issuing certificate as Secret does not exist
+ Normal Generated 5m20s cert-manager-certificates-key-manager Stored new private key in temporary Secret resource "homelab.cloud-root-ca-tls-bc4dl"
+ Normal Requested 5m20s cert-manager-certificates-request-manager Created new CertificateRequest resource "homelab.cloud-root-ca-tls-1"
+ Normal Issuing 5m20s cert-manager-certificates-issuing The certificate has been successfully issued
+Issuer:
+ Name: homelab.cloud-root-ca-issuer-selfsigned
+ Kind: ClusterIssuer
+ Conditions:
+ Ready: True, Reason: IsReady, Message:
+ Events:
+Secret:
+ Name: homelab.cloud-root-ca-tls
+ Issuer Country:
+ Issuer Organisation:
+ Issuer Common Name: homelab.cloud-root-ca-tls
+ Key Usage: Digital Signature, Key Encipherment, Cert Sign
+ Extended Key Usages:
+ Public Key Algorithm: Ed25519
+ Signature Algorithm: Ed25519
+ Subject Key ID: 7156a59ffd7553cec1c9d424b959ef41fc5521ed
+ Authority Key ID:
+ Serial Number: 2e656bafa35257d9aba094782966b0e10868c232
+ Events:
+Not Before: 2025-10-10T12:33:47+02:00
+Not After: 2026-10-10T12:33:47+02:00
+Renewal Time: 2026-09-10T12:33:47+02:00
+No CertificateRequest found for this Certificate
+~~~
+
+## Cert-manager's Kustomize project attached to this guide
+
+You can find the Kustomize project for the cert-manager deployment in this folder:
+
+- [`k8sprjs/cert-manager`](k8sprjs/cert-manager/)
+
+## Relevant system paths
+
+### Folders in `kubectl` client system
+
+- `$HOME/bin`
+- `$HOME/k8sprjs/cert-manager`
+- `$HOME/k8sprjs/cert-manager/deployment`
+- `$HOME/k8sprjs/cert-manager/deployment/patches`
+- `$HOME/k8sprjs/cert-manager/deployment/resources`
+- `$HOME/k8sprjs/cert-manager/certificates`
+- `$HOME/k8sprjs/cert-manager/certificates/resources`
+
+### Files in `kubectl` client system
+
+- `$HOME/bin/cmctl`
+- `$HOME/bin/kubectl-cert_manager`
+- `$HOME/k8sprjs/cert-manager/deployment/kustomization.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/kustomization.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml`
+- `$HOME/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml`
+
+## References
+
+### [cert-manager](https://cert-manager.io/)
+
+- [Documentation](https://cert-manager.io/docs/)
+ - [cert-manager installation with Kubectl](https://cert-manager.io/docs/installation/kubectl/)
+ - [The cert-manager Command Line Tool (cmctl)](https://cert-manager.io/docs/reference/cmctl/)
+ - [API reference](https://cert-manager.io/docs/reference/api-docs/)
+
+- [cert-manager on GitHub](https://github.com/jetstack/cert-manager)
+
+### About setting up cert-manager
+
+- [Deep Dive into cert-manager and Cluster Issuers in Kubernetes](https://support.tools/cert-manager-deep-dive/)
+- [Setting up HTTPS with cert-manager (self-signed, LetsEncrypt) in kubernetes](https://someweb.github.io/devops/cert-manager-kubernetes/)
+- [Creating Self Signed Certificates on Kubernetes](https://tech.paulcz.net/blog/creating-self-signed-certs-on-kubernetes/)
+- [Install Certificate Manager Controller in Kubernetes](https://blog.zachinachshon.com/cert-manager/#self-signed-certificate)
+- [How to configure Traefik on Kubernetes with Cert-manager?](https://cloud.theodo.com/en/blog/traefik-kubernetes-certmanager)
+- [PKCS#1 and PKCS#8 format for RSA private key](https://stackoverflow.com/questions/48958304/pkcs1-and-pkcs8-format-for-rsa-private-key)
+
+### About the self-signed CA
+
+- [Self-signed Root CA in Kubernetes with k3s, cert-manager and traefik. Bonus howto on regular certificates](https://raymii.org/s/tutorials/Self_signed_Root_CA_in_Kubernetes_with_k3s_cert-manager_and_traefik.html)
+- [Wikipedia. Certificate authority](https://en.wikipedia.org/wiki/Certificate_authority)
+
+## Navigation
+
+[<< Previous (**G028. K3s cluster setup 11**)](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G030. K3s cluster setup 13**) >>](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md)
diff --git a/G029 - K3s cluster setup 12 ~ Setting up cert-manager and wildcard certificate.md b/G029 - K3s cluster setup 12 ~ Setting up cert-manager and wildcard certificate.md
deleted file mode 100644
index 706225d..0000000
--- a/G029 - K3s cluster setup 12 ~ Setting up cert-manager and wildcard certificate.md
+++ /dev/null
@@ -1,478 +0,0 @@
-# G029 - K3s cluster setup 12 ~ Setting up cert-manager and wildcard certificate
-
-Although Traefik has some capabilities to handle certificates, it's better to use a service specialized on such task. Enter **cert-manager**, a popular certificate management service in the Kubernetes landscape.
-
-## Warning about cert-manager performance
-
-The first time I tried to deploy cert-manager, the deployment failed because the nodes (in particular the server node) didn't had enough CPU cores to run the process properly. And when I managed to deploy cert-manager successfully, I noticed how my cluster's performance degraded severely. Eventually, I could fix these performance issues just by increasing the cores assigned as vCPUs to each VM in my K3s cluster.
-
-Therefore, be aware that, depending on how you've configured your VMs, you may need to improve their assigned hardware capabilities (CPU in particular).
-
-## Deploying cert-manager
-
-At the time of writing this, there's no official Kustomize way for deploying cert-manager. The default method is by applying a yaml manifest, but you can build your own Kustomize procedure with it (as you've done for the `metrics-server` deployment in the previous [**G028** guide](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md#deployment-of-metrics-server)).
-
-1. In your kubectl client system, create a folder structure for cert-manager.
-
- ~~~bash
- $ mkdir -p $HOME/k8sprjs/cert-manager/deployment
- ~~~
-
- The deployment project will be in its own `deployment` subfolder because, later, you'll need to create another project for creating a self-signed wildcard certificate. This second Kustomize project will require the cert-manager service already deployed in your cluster, but must be kept independent from its deployment process.
-
-2. Create a `kustomization.yaml` file in the `deployment` subfolder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/cert-manager/deployment/kustomization.yaml
- ~~~
-
-3. Edit the `kustomization.yaml` file so it has the yaml content below.
-
- ~~~yaml
- # cert-manager setup
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- resources:
- - 'https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml'
- ~~~
-
- You can find the URL for the most recent version of cert-manager right [in the official documentation about the installation procedure](https://cert-manager.io/docs/installation/), although you can also find it on the assets list of [each release](https://github.com/jetstack/cert-manager/releases).
-
-4. Deploy cert-manager with `kubectl`.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8sprjs/cert-manager/deployment/
- ~~~
-
- You'll get a long output of lines indicating the many resources created by the deployment.
-
-### _Verifying the deployment of cert-manager_
-
-After the deployment has finished successfully, give it a minute or so to allow cert-manager to initialize itself and start its pods. Then, you can verify that cert-manager has deployed properly just by checking if its pods are `Running`.
-
-~~~bash
-$ kubectl -n cert-manager get pods
-NAME READY STATUS RESTARTS AGE
-cert-manager-cainjector-967788869-wg8pd 1/1 Running 0 116s
-cert-manager-55658cdf68-6m5k2 1/1 Running 0 116s
-cert-manager-webhook-6668fbb57d-f5xmc 1/1 Running 0 116s
-~~~
-
-Notice the namespace `cert-manager` specified with the `-n` option in the `kubectl` command. The cert-manager service deploys itself in its own `cert-manager` namespace, as MetalLB deployed in its own `metallb-system` namespace.
-
-Remember that you can also check out the all existing namespaces in your K3s cluster with `kubectl`.
-
-~~~bash
-$ kubectl get namespaces
-NAME STATUS AGE
-default Active 2d1h
-kube-system Active 2d1h
-kube-public Active 2d1h
-kube-node-lease Active 2d1h
-metallb-system Active 47h
-cert-manager Active 2m51s
-~~~
-
-### _Installing the cert-manager plugin in your kubectl client system_
-
-To help you to manage the certificates you put in your cluster, [cert-manager has a plugin for kubectl](https://cert-manager.io/docs/usage/kubectl-plugin/). You'll have to install it in your kubectl client system and make it reachable through your user's `$PATH`. Assuming the same scenario as in the [**G026** guide](G026%20-%20K3s%20cluster%20setup%2009%20~%20Setting%20up%20a%20kubectl%20client%20for%20remote%20access.md), where you saw how to set up your kubectl client system, the installation of this cert-manager plugin requires the following steps
-
-1. From the [cert-manager GitHub releases page](https://github.com/jetstack/cert-manager/releases), download the `tar.gz` file that corresponds to the cert-manager version you installed in your cluster and to your kubectl client system. In this guide, you'll install cert-manager `v1.6.1` and the client system is assumed to be a Linux OS running on an amd64 hardware.
-
- ~~~bash
- $ wget https://github.com/jetstack/cert-manager/releases/download/v1.6.1/kubectl-cert_manager-linux-amd64.tar.gz -O $HOME/bin/kubectl-cert_manager-linux-amd64.tar.gz
- ~~~
-
-2. Extract the content of the downloaded `kubectl-cert_manager-linux-amd64.tar.gz`.
-
- ~~~bash
- $ cd $HOME/bin
- $ tar xf kubectl-cert_manager-linux-amd64.tar.gz
- ~~~
-
- This will extract two files, the `kubectl-cert_manager` binary and a `LICENSES` text file that you can remove together with the `kubectl-cert_manager-linux-amd64.tar.gz`.
-
- ~~~bash
- $ rm kubectl-cert_manager-linux-amd64.tar.gz LICENSES
- ~~~
-
-3. Restrict the binary's permissions.
-
- ~~~bash
- $ chmod 700 kubectl-cert_manager
- ~~~
-
-4. Test the cert-manager plugin with `kubectl` by checking its version.
-
- ~~~bash
- $ kubectl cert-manager version
- Client Version: util.Version{GitVersion:"v1.6.1", GitCommit:"5ecf5b5617a4813ea8115da5dcfe3cd18b8ff047", GitTreeState:"clean", GoVersion:"go1.17.1", Compiler:"gc", Platform:"linux/amd64"}
- Server Version: &versionchecker.Version{Detected:"v1.6.1", Sources:map[string]string{"crdLabelVersion":"v1.6.1", "webhookPodImageVersion":"v1.6.1", "webhookPodLabelVersion":"v1.6.1", "webhookServiceLabelVersion":"v1.6.1"}}
- ~~~
-
-5. You can also check if the cert-manager API is accessible.
-
- ~~~bash
- $ kubectl cert-manager check api
- The cert-manager API is ready
- ~~~
-
-Know that the cert-manager's kubectl plugin has other several commands available, [check them out in its official page](https://cert-manager.io/docs/usage/kubectl-plugin/).
-
-## Reflector, a solution for syncing secrets and configmaps
-
-In Kubernetes, a certificate has an associated _secret_ object which is what truly contains the encrypted key. This is a problem in the sense that **secret and configmap objects are not shared among namespaces**. For instance, if you created a certificate in the `cert-manager` namespace, you wouldn't be able to use it directly (meaning, its associated secret) in the `kube-system` namespace. You need to replicate, or sync, the secret somehow in all the namespaces you want to use it.
-
-### _Solving the problem of syncing secrets cluster wide_
-
-What can you do to sync secrets among namespaces in your K3s cluster? There are some options to solve this problem.
-
-- The most basic procedure would be to create a certificate on each namespace in which it's required. This method would force you to create those certificates configured for concrete domains, since you don't want to have several different certificates for the same wildcard domain. Doable only when dealing with a small number of certificates, hard to maintain when their number increases.
-
-- Other procedure is creating one certificate and then cloning it on each namespace. The problem is that, although initially the certificates' secrets would be the same, right after the first renovation, those secrets would change and stop being in sync. This would force you to "sync" them (that is, overwriting the cloned secrets with the one you want to have in common) manually, something you can imagine is rather cumbersome.
-
-- [In this GitHub page](https://github.com/zakkg3/ClusterSecret) there's a definition for a Kubernetes object called **ClusterSecret**. As it's name implies, this object is designed to be a secret shared cluster wide. It's a very interesting and valid option, but it has been designed with only secret objects in mind, and also implies some manual tinkering on your certificates' secrets.
-
-- The ideal option is to have an addon capable of handling certificates cluster wide properly. And no, cert-manager is not capable of doing this, but [in its documentation](https://cert-manager.io/docs/faq/kubed/) they recommend using an addon called [**kubed**](https://github.com/kubeops/kubed): "_Kubed can keep ConfigMaps and Secrets synchronized across namespaces and/or clusters_". Although it sounds that it could fit our case, this addon presents two problems:
- - Kubed doesn't handle the cert-manager certificates themselves, you would still be forced to tinker with their corresponding secrets so they can be handled properly by kubed.
- - Kubed [needs Helm to be installed](https://appscode.com/products/kubed/v0.12.0/setup/install/), no matter what. This is not a problem per se (it would only imply installing Helm in your kubectl client system), but in this guide series I want to stick with `kubectl` since it's the standard basic way of managing any Kubernetes cluster.
-
-- There's another addon, called [Reflector](https://github.com/EmberStack/kubernetes-reflector), which not only handles "mirroring" of **secrets and configmaps** among different namespaces present in a cluster, but also has an extension that explicitly manages the secrets of cert-manager certificates **automatically**. Furthermore, it has a manifest file deployable with `kubectl`. Hence why I've chosen this addon to make your domain's certificate available cluster wide.
-
-### _Deploying Reflector_
-
-Let's create its own Kustomize project for Reflector and deploy it in your K3s cluster.
-
-1. Create the folder for the Kustomize project.
-
- ~~~bash
- $ mkdir -p $HOME/k8prjs/reflector
- ~~~
-
-2. Create the `kustomization.yaml` file.
-
- ~~~bash
- $ touch $HOME/k8prjs/reflector/kustomization.yaml
- ~~~
-
-3. Put in `kustomization.yaml` the following content.
-
- ~~~yaml
- # Reflector setup
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- namespace: kube-system
-
- resources:
- - https://github.com/emberstack/kubernetes-reflector/releases/latest/download/reflector.yaml
- ~~~
-
- See that the link to the yaml manifest specifies "_latest_" as version which, at the time of writing this, is [the _v6.0.42_ one](https://github.com/emberstack/kubernetes-reflector/releases/tag/v6.0.42). Also notice how there's a `namespace` parameter pointing to `kube-system`, this will make Reflector be deployed in that namespace.
-
-4. Deploy the Reflector Kustomize project.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8prjs/reflector/
- ~~~
-
-5. The addon will be deployed in the `kube-system` namespace and start a pod on one of your agents. Check it out with `kubectl`.
-
- ~~~bash
- $ kubectl -n kube-system get pods
- NAME READY STATUS RESTARTS AGE
- helm-install-traefik-crd--1-bjv95 0/1 Completed 0 2d4h
- helm-install-traefik--1-zb5gb 0/1 Completed 1 2d4h
- local-path-provisioner-64ffb68fd-zxm2v 1/1 Terminated 5 (60m ago) 2d4h
- coredns-85cb69466-9l6ws 1/1 Terminated 5 (146m ago) 2d4h
- traefik-74dd4975f9-tdv42 1/1 Terminated 5 (146m ago) 2d2h
- metrics-server-5b45cf8dbb-nv477 1/1 Terminated 3 (146m ago) 21h
- reflector-5f484c4868-8wgkz 1/1 Running 0 64s
- ~~~
-
- Also notice how other pods appear with their `STATUS` as `Terminated`, although they count as `READY` and, therefore, they should have the `Running` status like your newest reflector pod. This is an example of an odd consequence of configuring the graceful shutdown on your K3s nodes, as I already warned you about in the last section of the [G025 guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md).
-
-## Setting up a wildcard certificate for a domain
-
-You have the tools deployed in your cluster, now you can create a wildcard certificate for a domain. In this case, I'll configure the certificate for the domain I've been using all along this guide series, `deimos.cloud`, as an example.
-
-1. Create a folder structure for a Kustomize project within the already existing `cert-manager` path.
-
- ~~~bash
- $ mkdir -p $HOME/k8sprjs/cert-manager/certificates/resources
- ~~~
-
-2. In the `resources` directory, create three empty files as follows.
-
- ~~~bash
- $ touch $HOME/k8sprjs/cert-manager/certificates/resources/{certificates.namespace.yaml,cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml,wildcard.deimos.cloud-tls.certificate.cert-manager.yaml}
- ~~~
-
- Each one will contain the yaml describing a particular resource required for setting up the certificate.
-
-3. In `certificates.namespace.yaml` put the following yaml.
-
- ~~~yaml
- apiVersion: v1
- kind: Namespace
-
- metadata:
- name: certificates
- ~~~
-
- This is the `certificates` namespace, which will help you to organize your own certificates and distinguish them from any other certificates and secrets already present in your cluster.
-
-4. In the `cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml` file, copy the yaml below.
-
- ~~~yaml
- # Generic self-signed cluster-wide issuer for certificates
- apiVersion: cert-manager.io/v1
- kind: ClusterIssuer
-
- metadata:
- name: cluster-issuer-selfsigned
- spec:
- selfSigned: {}
- ~~~
-
- This is the issuer that will sign your certificates. Notice several things in the short yaml above.
-
- - The `apiVersion` points to the cert-manager API, not to the Kubernetes one.
-
- - The `kind` is `ClusterIssuer` (a cert-manager kind, not a Kubernetes one), meaning this particular issuer will be available cluster wide.
-
- - The `name` is a descriptive string, like the yaml filename.
-
- - Within the `spec` section, you see the empty parameter `selfSigned`. This means that this issuer is of the simplest type you can have, the self signed one. It's **not trusted** by browsers, but it's enough to generate certificates that you can use within your own local or home network.
-
-5. In `wildcard.deimos.cloud-tls.certificate.cert-manager.yaml`, copy the whole yaml below.
-
- ~~~yaml
- # Wilcard certificate for deimos.cloud
- apiVersion: cert-manager.io/v1
- kind: Certificate
-
- metadata:
- name: wildcard.deimos.cloud-tls
- namespace: certificates
- spec:
- secretName: wildcard.deimos.cloud-tls
- secretTemplate:
- annotations:
- reflector.v1.k8s.emberstack.com/reflection-allowed: "true"
- reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "kube-system"
- reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true"
- reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "kube-system"
- duration: 8760h # No certificate should last more than a year
- renewBefore: 720h # Certificates must be renewed some time before they expire (30 days)
- isCA: false
- subject:
- organizations:
- - "Deimos"
- privateKey:
- algorithm: ECDSA
- size: 384
- encoding: PKCS8
- rotationPolicy: Always
- dnsNames:
- - "*.deimos.cloud"
- - "deimos.cloud"
- issuerRef:
- name: cluster-issuer-selfsigned
- kind: ClusterIssuer
- group: cert-manager.io
- ~~~
-
- To know more about all the parameters shown above, check [the cert-manager v1 api document here](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1) and also [the Reflector's documentation about its cert-manager support](https://github.com/EmberStack/kubernetes-reflector#cert-manager-support). Still, I'll explain below some particular details of this yaml file.
-
- - Many of the parameters are optional, and there are more that are not used here.
-
- - Here the API is also a cert-manager one. Be careful of the `apiVersion` you use. Cert-manager has several, [each with its own API documentation](https://cert-manager.io/docs/reference/api-docs/).
-
- - The `spec.secretTemplate` section allows you to put metadata annotations in the secret generated for this certificate, something you need to use to put the values Reflector needs to clone this secret in other namespaces. The `reflector.v1.k8s.emberstack.com` parameters are the ones that enable Reflector to manage the secret of this certificate.
- - `reflection-allowed` allows Reflector to mirror this certificate's secret in other namespaces.
- - `reflection-allowed-namespaces` contains the list of namespaces in which Reflector has to clone this certificate's secret.
- - `reflection-auto-enabled` allows Reflector to clone automatically this certificate's secret in other namespaces.
- - `reflection-auto-namespaces` is the list of namespaces in which Reflector can clone this certificate's secret automatically.
- > **BEWARE!**
- > Reflector won't notices the changes done to the annotations in the certificate resource itself. It's only aware of what's specified in the directly related secret generated from this certificate. In upcoming guides I'll show you how to deal with changes in these annotations so Reflector does its thing as expected.
-
- - The parameter `spec.isCA` allows you to turn a certificate into a Certificate Authority. When the value is `true`, you can use this certificate to sign other certificates issued by other issuers that rely on this CA's secret. In this case is left as `false` for not complicating things further at this point. You can find an example of how to bootstrap an issuer with a self-signed CA [in this cert-manager page](https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers).
-
- - In the `spec.privateKey` section, be careful of always having `rotationPolicy` set as `Always`. This makes cert-manager regenerate the certificate's secret rather than reusing the current one. This policy about private key rotation is also [described in the cert-manager documentation](https://cert-manager.io/docs/usage/certificate/#configuring-private-key-rotation).
-
- - In the `spec.dnsNames` you can put any domain names you like, not necessarily just the ones related to a particular main domain. For instance, you can have `your.domain.com` and `another.domain.io` put in that list.
-
- - In the `spec.issuerRef` you specify the issuer of this certificate, in this case the `cluster-issuer-selfsigned-main` one you created in previous steps. Be careful of always also specifying its `kind`, in particular for `ClusterIssuer` types, so you know clearly what kind of issuer you've used with each certificate.
-
-6. Next, create the `kustomization.yaml` file in the `certificates` folder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/cert-manager/certificates/kustomization.yaml
- ~~~
-
-7. Copy in `kustomization.yaml` the following yaml.
-
- ~~~yaml
- # Certificates deployment
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- resources:
- - resources/certificates.namespace.yaml
- - resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml
- - resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml
- ~~~
-
-8. Apply the Kustomize project into your cluster.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8sprjs/cert-manager/certificates
- ~~~
-
-9. Confirm that the resources have been deployed in the cluster.
-
- ~~~bash
- $ kubectl get namespaces
- NAME STATUS AGE
- default Active 2d6h
- kube-system Active 2d6h
- kube-public Active 2d6h
- kube-node-lease Active 2d6h
- metallb-system Active 2d4h
- cert-manager Active 4h41m
- certificates Active 87s
-
- $ kubectl -n kube-system get clusterissuer
- NAME READY AGE
- cluster-issuer-selfsigned True 3m22s
-
- $ kubectl -n certificates get certificate
- NAME READY SECRET AGE
- wildcard.deimos.cloud-tls True wildcard.deimos.cloud-tls 4m
-
- $ kubectl get secrets -A | grep wildcard
- certificates wildcard.deimos.cloud-tls kubernetes.io/tls 3 4m30s
- kube-system wildcard.deimos.cloud-tls kubernetes.io/tls 3 4m29s
- ~~~
-
- The cluster issuer shows up and also your certificate is there, in the namespace `certificates`. Your certificate's secret is too in the same `certificates` namespace, but Reflector has done its job automatically and has reflected the secret in the `kube-system` namespace.
-
-> **BEWARE!**
-> If you delete the certificate from the cluster, the copies of its secret won't be removed with it. You'll have to delete them manually.
-
-## Checking your certificate with the `kubectl` cert-manager plugin
-
-Remember that the kubectl cert-manager plugin can help you in handling your certificates. For instance, you would execute the following command to see the status of the certificate you've created before.
-
-~~~bash
-$ kubectl cert-manager status certificate -n certificates wildcard.deimos.cloud-tls
-Name: wildcard.deimos.cloud-tls
-Namespace: certificates
-Created at: 2021-11-30T18:41:42+01:00
-Conditions:
- Ready: True, Reason: Ready, Message: Certificate is up to date and has not expired
-DNS Names:
-- *.deimos.cloud
-- deimos.cloud
-Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal Issuing 10m cert-manager Issuing certificate as Secret does not exist
- Normal Generated 10m cert-manager Stored new private key in temporary Secret resource "wildcard.deimos.cloud-tls-6rfrc"
- Normal Requested 10m cert-manager Created new CertificateRequest resource "wildcard.deimos.cloud-tls-b4jx4"
- Normal Issuing 10m cert-manager The certificate has been successfully issued
-Issuer:
- Name: cluster-issuer-selfsigned
- Kind: ClusterIssuer
- Conditions:
- Ready: True, Reason: IsReady, Message:
- Events:
-Secret:
- Name: wildcard.deimos.cloud-tls
- Issuer Country:
- Issuer Organisation: Deimos
- Issuer Common Name:
- Key Usage: Digital Signature, Key Encipherment
- Extended Key Usages:
- Public Key Algorithm: ECDSA
- Signature Algorithm: ECDSA-SHA384
- Subject Key ID:
- Authority Key ID:
- Serial Number: efd14f0cb15d9f179b9e5c68bb6a3205
- Events:
-Not Before: 2021-11-30T18:41:43+01:00
-Not After: 2022-11-30T18:41:43+01:00
-Renewal Time: 2022-10-31T18:41:43+01:00
-No CertificateRequest found for this Certificate
-~~~
-
-## Cert-manager and Reflector's Kustomize projects attached to this guide series
-
-You can find the Kustomize projects for the cert-manager and Reflector deployments in the following attached folders.
-
-- `k8sprjs/cert-manager`
-- `k8sprjs/reflector`
-
-## Relevant system paths
-
-### _Folders in `kubectl` client system_
-
-- `$HOME/bin`
-- `$HOME/k8sprjs/cert-manager`
-- `$HOME/k8sprjs/cert-manager/deployment`
-- `$HOME/k8sprjs/cert-manager/certificates`
-- `$HOME/k8sprjs/cert-manager/certificates/resources`
-- `$HOME/k8sprjs/reflector`
-
-### _Files in `kubectl` client system_
-
-- `$HOME/bin/kubectl-cert_manager`
-- `$HOME/k8sprjs/cert-manager/deployment/kustomization.yaml`
-- `$HOME/k8sprjs/cert-manager/certificates/kustomization.yaml`
-- `$HOME/k8sprjs/cert-manager/certificates/resources/certificates.namespace.yaml`
-- `$HOME/k8sprjs/cert-manager/certificates/resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml`
-- `$HOME/k8sprjs/cert-manager/certificates/resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml`
-- `$HOME/k8sprjs/reflector/kustomization.yaml`
-
-## References
-
-### _cert-manager_
-
-- [cert-manager official site](https://cert-manager.io/)
-- [cert-manager on GitHub](https://github.com/jetstack/cert-manager)
-- [cert-manager installation with Kubectl](https://cert-manager.io/docs/installation/kubectl/)
-- [Using Kubectl's new Kustomize support for per-environment deployment of cert-manager resources](https://www.jetstack.io/blog/kustomize-cert-manager/)
-- [cert-manager compatibility with Kubernetes Platform Providers](https://cert-manager.io/docs/installation/compatibility/)
-- [cert-manager kubectl plugin](https://cert-manager.io/docs/usage/kubectl-plugin/)
-- [cert-manager API reference docs](https://cert-manager.io/docs/reference/api-docs/)
-- [cert-manager docs](https://cert-manager.io/docs/)
-- [cert-manager on GitHub](https://github.com/jetstack/cert-manager)
-- [Installing and using Cert-Manager in Kubernetes](https://headworq.eu/en/installing-and-using-cert-manager-in-kubernetes/)
-- [Use of Let's Encrypt wildcard certs in Kubernetes](https://rimusz.net/lets-encrypt-wildcard-certs-in-kubernetes)
-- [Setting up HTTPS with cert-manager (self-signed, LetsEncrypt) in kubernetes](https://someweb.github.io/devops/cert-manager-kubernetes/)
-- [Creating Self Signed Certificates on Kubernetes](https://tech.paulcz.net/blog/creating-self-signed-certs-on-kubernetes/)
-- [Installing and using Cert-Manager in Kubernetes](https://headworq.eu/en/installing-and-using-cert-manager-in-kubernetes/)
-- [Install Certificate Manager Controller in Kubernetes](https://blog.zachinachshon.com/cert-manager/#self-signed-certificate)
-- [How to configure Traefik on Kubernetes with Cert-manager?](https://www.padok.fr/en/blog/traefik-kubernetes-certmanager)
-- [PKCS#1 and PKCS#8 format for RSA private key](https://stackoverflow.com/questions/48958304/pkcs1-and-pkcs8-format-for-rsa-private-key)
-- [Add to documentation: change default port of webhook when using hostNetwork and default Kubelet port settings](https://github.com/jetstack/cert-manager/issues/3472)
-- [add "webhook.hostNetwork" to helm chart](https://github.com/jetstack/cert-manager/issues/3163)
-- [Compatibility with Kubernetes Platform Providers. AWS EKS](https://cert-manager.io/docs/installation/compatibility/#aws-eks)
-
-### _Reflector_
-
-- [Reflector on GitHub](https://github.com/EmberStack/kubernetes-reflector)
-
-### _The problem of syncing secrets_
-
-- [ClusterSecret](https://github.com/zakkg3/ClusterSecret)
-- [Kubed official page](https://appscode.com/products/kubed/)
-- [Kubed on GitHub](https://github.com/kubeops/kubed)
-- [Faq cert-manager. Syncing Secrets Across Namespaces](https://cert-manager.io/docs/faq/kubed/)
-- [Sharing secret across namespaces](https://stackoverflow.com/questions/46297949/sharing-secret-across-namespaces)
-
-## Navigation
-
-[<< Previous (**G028. K3s cluster setup 11**)](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G030. K3s cluster setup 13**) >>](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md)
diff --git a/G030 - K3s cluster setup 13 ~ Deploying the Kubernetes Dashboard.md b/G030 - K3s cluster setup 13 ~ Deploying the Kubernetes Dashboard.md
deleted file mode 100644
index c2dbed2..0000000
--- a/G030 - K3s cluster setup 13 ~ Deploying the Kubernetes Dashboard.md
+++ /dev/null
@@ -1,229 +0,0 @@
-# G030 - K3s cluster setup 13 ~ Deploying the Kubernetes Dashboard
-
-To monitor what's going on in your K3s cluster in a more visual manner, you can use the Kubernetes Dashboard. It's a web UI from which you can manage the resources running in your Kubernetes cluster.
-
-> **BEWARE!**
-> To have metrics and graphics available, the Kubernetes Dashboard requires having the **metrics-server** service already running in your cluster.
-
-## Deploying Kubernetes Dashboard
-
-Deploying Kubernetes Dashboard is rather simple, although you also need to setup an administrator user for using this app. Let's do all this in a Kustomize project.
-
-1. In your kubectl client system, create the folder structure for the Kubernetes Dashboard's Kustomize project.
-
- ~~~bash
- $ mkdir -p $HOME/k8sprjs/k8sdashboard/resources
- ~~~
-
-2. Next, create two files under the `resources` folder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/k8sdashboard/resources/{admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml,admin-user.serviceaccount.yaml}
- ~~~
-
-3. In `admin-user.serviceaccount.yaml`, copy the content below.
-
- ~~~yaml
- apiVersion: v1
- kind: ServiceAccount
-
- metadata:
- name: admin-user
- ~~~
-
- This is the resource definition of the Kubernetes Dashboard's administrator user.
-
-4. Fill the `admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml` file with the following yaml.
-
- ~~~yaml
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
-
- metadata:
- name: admin-user
- roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
- subjects:
- - kind: ServiceAccount
- name: admin-user
- ~~~
-
- This file describes a role named `cluster-admin` and assigns it to the `admin-user` (referenced in the `subjects` section) defined in the previous file.
-
-5. Create the `kustomization.yaml` file for the Kustomize project.
-
- ~~~bash
- $ touch $HOME/k8sprjs/k8sdashboard/kustomization.yaml
- ~~~
-
-6. Put in the `kustomization.yaml` file the following lines.
-
- ~~~yaml
- # Kubernetes Dashboard setup
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- namespace: kubernetes-dashboard
-
- resources:
- - https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
- - resources/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml
- - resources/admin-user.serviceaccount.yaml
- ~~~
-
- Notice how the resources listed up there are a mix of a remote one and the local ones defining the cluster role and the administrator user. Also see how I've specified the `namespace` as `kubernetes-dashboard`. This namespace comes declared in the `recommended.yaml` file and will be applied to all resources of this deployment, except the `ClusterRoleBinding` you defined before and other non-namespaced resources.
-
-7. Apply the Kustomize project to your cluster.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8sprjs/k8sdashboard
- ~~~
-
-8. Verify that the Kubernetes Dashboard's corresponding pods and services are running in the `kubernetes-dashboard` namespace.
-
- ~~~bash
- $ kubectl get pods,svc -n kubernetes-dashboard
- NAME READY STATUS RESTARTS AGE
- pod/dashboard-metrics-scraper-c45b7869d-fl9tg 1/1 Running 0 86s
- pod/kubernetes-dashboard-576cb95f94-2szjp 1/1 Running 0 86s
-
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- service/dashboard-metrics-scraper ClusterIP 10.43.181.120 8000/TCP 86s
- service/kubernetes-dashboard ClusterIP 10.43.101.112 443/TCP 86s
- ~~~
-
-### _Getting the administrator user's secret token_
-
-To log in your Kubernetes Dashboard with the administrator user also created in the deployment, you'll need to use it's secret token for authenticating in the app. You can get this token with the `kubectl` command, but in a different way depending on what version of Kubernetes you're working.
-
-#### **On v1.23 and older.**
-
-Execute the following.
-
-~~~bash
-$ kubectl -n kubernetes-dashboard describe secret admin-user-token
-~~~
-
-This will output you the whole description of the `admin-user-token` secret as follows.
-
-~~~bash
-Name: admin-user-token-qb2t5
-Namespace: kubernetes-dashboard
-Labels:
-Annotations: kubernetes.io/service-account.name: admin-user
- kubernetes.io/service-account.uid: bc9adea8-4b16-37d0-8097-7c0d5068dc5a
-
-Type: kubernetes.io/service-account-token
-
-Data
-====
-ca.crt: 570 bytes
-namespace: 20 bytes
-token: eycm5ldGJpdWJlVI6ImFkbWlzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9ueyJhc3MiOiJriJSUImtpZCI6zI1NiIsrdWJlcm5ldIlJUeVVjUzN0TnloUFZTYTgxMnNYUDZYYmFoX1lzeXlTRm81bXV3QktBY3MifQ.11c2VyLYW1lc3BhY2bGciOUiOiJGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbiXR0ZDA1LTQva2VuLXdoODJxIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSuLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhZDlhYmFhOC05ZDAtODA51kYXNoYm9hcmQ6YWNy03YzFkZTAyOGNiNWEiLCJzdWIiOiJzeXN0ZW06c2VydmljZ3ViWFjY291bnQ6aZXJuZXRlcyRtaW4tdXNlciJ9.rACsldkM7A0_Om-akoEh_5GJxfYo1_Wk6hM_TkiyLZuG-RxrtpEfDk23L3QeO1LyJmx1nQOyPSsqhL8A6d4LrDau-WMKNbI1CFYT3FMusMObi66ved7mnOz99M4MWB48ULUDeh0kAENeiMIeYpSIUs-B61J8G6LyHid0_18zWAVytRPwciksWteX8zB_2XW_oB6T0-6TICepPREke5NUwow0dYWy5kFvFx6MNeOT_iRfbdaGI1ROo4MI2_mmBFft0B7Ri6xJ4Hinfo2Q-cI95410tpJC0iX9B_t-Gt-XaDu2oX-9MZTUEJrD2XxdVx3aZM3PopmlTMb44DC149lllw
-~~~
-
-The value you need is the `token` one. Notice that it's just a encrypted string like the ones you created in previous guides for accessing your VMs through SSH. Save it in a safe place like a password manager.
-
-To get only the token line, execute the same `kubectl` command but piped with `grep`.
-
-~~~bash
-$ kubectl -n kubernetes-dashboard describe secret admin-user-token | grep '^token'
-~~~
-
-#### **On v1.24+**
-
-Execute the command below.
-
-~~~bash
-$ kubectl -n kubernetes-dashboard create token admin-user
-eyJhbGciOiJSUzI1NiIsImtpZCI6ImtxNzh0bmk3cDAzVU4zXzFnMVgwZXVSR3c0U1FnNVZ3OUtSdDBSTkw2WmsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXFiMnQ1Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4MjU4Mjc4ZC02YjBmLTQwZDItOTI1Yy1kMzEwMmY3MTkxYzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.PG-4qfeT3C6vFfwhdGDoXVmjDEU7TJDTftcmIa2kQO0HtWM8ZN45wDGk4ZSWUR5mO5HlXpYORiGkKHq6GNPFRr_qCo4tKIONyZbgXtV98P6OpOIrfDTJCwxjFf0aqOmEs1N3BqViFs3MgBRCLElx98rD6AXehdxPADXlAksnaypKKx6q1WFgNmOTHfC9WrpQzX-qoo8CbRRCuSyTagm3qkpa5hV5RjyKjE7IaOqQGwFOSbTqMy6eghTYSufC-uUxcOWw3OPVa9QzINOn9_tioxj7tH7rpw_eOHzUW_-Cr_HE89DygnuZAqQEsWxBLfYcrBKtnMhxn49E22SyCaJldA
-~~~
-
-See how the command outputs your admin-user's secret token string directly. Remember to copy and save it somewhere safe such as a password manager.
-
-## Testing Kubernetes Dashboard
-
-Now that you both have the app deployed and the user created, you can test the dashboard.
-
-1. First you need to enable a proxy on your kubectl client system.
-
- ~~~bash
- $ kubectl proxy
- Starting to serve on 127.0.0.1:8001
- ~~~
-
- This proxy gives you access to the services running in your cluster as if they were running locally in your own client system. The command won't return control to the bash shell, so you'll have to stop it with `Ctrl+C`. If you want to make this proxy run in the background, execute the command as follows.
-
- ~~~bash
- $ kubectl proxy &
- [1] 5151
- $ Starting to serve on 127.0.0.1:8001
- ~~~
-
- Notice the `&` at the command's end, that tells bash to execute the command as a background process. The `[1]` line tells you the PID this process has, so you can identify it for stopping it later with the `kill` command or with `htop`. You can also use the `fg` command to return this program to your shell's foreground and then stop it with `Ctrl+C`.
-
-2. Open a browser in your client system and go to `http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`. You'll see the following page.
-
- 
-
- See that it offers you two ways for signing in your dashboard, with a token or using a kubeconfig file. If you try to use the kubeconfig file you already have in your client (in the `.kube/config` path), you'll get the warning `Internal error (500): Not enough data to create auth info structure`. So, you can only use the token you got before to get into the dashboard. Enter the token and click on the `Sign In` button.
-
-3. After signing in, you'll get directly into the `Workloads` page, which will look like the snapshot below.
-
- 
-
- It's empty! But you already have several services deployed and running in your cluster! Worry not, this is just a matter of **namespaces**. Next to the `Search` bar you'll find a folded namespaces list.
-
- 
-
- Just select the `All namespaces` option to see all the workloads running in your cluster.
-
- 
-
- This is something else, right? Now you can see all the workloads running under all the namespaces present in your K3s cluster. Remember this, many Kubernetes resources are namespaced and you'll usually end having several different namespaces active in your cluster.
-
-4. To stop the dashboard, just `Ctrl+C` out of the `kubectl proxy` process you started before, although not before signing out of the dashboard itself.
-
- 
-
- If you made the kubectl proxy run in the background with `&`, you'll need its PID so you can tell the `kill` command what process to stop.
-
- ~~~bash
- $ kill -15 5151
- ~~~
-
- Up there I've used the `SIGTERM` (`15`) signal to stop the process but, if for some reason the process doesn't stop with it, use the `SIGKILL` (`9`) signal instead. And if you don't remember the process' PID, just use `htop` to locate and stop the process in a more visual manner.
-
-## Kubernetes Dashboard's Kustomize project attached to this guide series
-
-You can find the Kustomize project for this Kubernetes Dashboard deployment in the following attached folder.
-
-- `k8sprjs/k8sdashboard`
-
-## Relevant system paths
-
-### _Folders in `kubectl` client system_
-
-- `$HOME/k8sprjs/k8sdashboard`
-- `$HOME/k8sprjs/k8sdashboard/resources`
-
-### _Files in `kubectl` client system_
-
-- `$HOME/k8sprjs/k8sdashboard/kustomization.yaml`
-- `$HOME/k8sprjs/k8sdashboard/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml`
-- `$HOME/k8sprjs/k8sdashboard/admin-user.serviceaccount.yaml`
-
-## References
-
-### _Kubernetes Dashboard_
-
-- [Kubernetes Web UI (Dashboard)](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
-- [Kubernetes Dashboard on GitHub](https://github.com/kubernetes/dashboard)
-- [Kubernetes Dashboard on K3s](https://rancher.com/docs/k3s/latest/en/installation/kube-dashboard/)
-- [My Kubernetes Dashboard and How To Deploy Yours](https://ikarus.sg/k8s-dashboard/)
-
-## Navigation
-
-[<< Previous (**G029. K3s cluster setup 12**)](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G031. K3s cluster setup 14**) >>](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md)
diff --git a/G030 - K3s cluster setup 13 ~ Enabling the Traefik dashboard.md b/G030 - K3s cluster setup 13 ~ Enabling the Traefik dashboard.md
new file mode 100644
index 0000000..e2bdcbe
--- /dev/null
+++ b/G030 - K3s cluster setup 13 ~ Enabling the Traefik dashboard.md
@@ -0,0 +1,438 @@
+# G030 - K3s cluster setup 13 ~ Enabling the Traefik dashboard
+
+- [Traefik is the embedded ingress controller of K3s](#traefik-is-the-embedded-ingress-controller-of-k3s)
+- [Enabling access to the Traefik dashboard](#enabling-access-to-the-traefik-dashboard)
+ - [Defining a user for the Traefik dashboard](#defining-a-user-for-the-traefik-dashboard)
+ - [Kustomize project for enabling access to the Traefik dashboard](#kustomize-project-for-enabling-access-to-the-traefik-dashboard)
+- [Getting into the Traefik dashboard](#getting-into-the-traefik-dashboard)
+- [What to do if Traefik's dashboard has bad performance](#what-to-do-if-traefiks-dashboard-has-bad-performance)
+- [Traefik dashboard's Kustomize project attached to this guide](#traefik-dashboards-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders in `kubectl` client system](#folders-in-kubectl-client-system)
+ - [Files in `kubectl` client system](#files-in-kubectl-client-system)
+- [References](#references)
+ - [Traefik documentation](#traefik-documentation)
+ - [Traefik in K3s](#traefik-in-k3s)
+ - [Kubernetes Documentation](#kubernetes-documentation)
+ - [Traefik IngressRoute Vs Ingress](#traefik-ingressroute-vs-ingress)
+ - [Kustomize](#kustomize)
+- [Navigation](#navigation)
+
+## Traefik is the embedded ingress controller of K3s
+
+Traefik is the default ingress controller that already comes embedded in K3s. In other words, you can enable access to services running in your cluster through Traefik ingresses, instead of just assigning them external IPs directly (in particular, with the MetalLB load balancer).
+
+Traefik in K3s comes with its embedded web dashboard enabled by default, but reaching it requires a particular setup not clearly explained in the official documentation of neither [Traefik](https://doc.traefik.io/traefik/reference/install-configuration/api-dashboard/) nor [K3s](https://docs.k3s.io/networking/networking-services?_highlight=traefik#traefik-ingress-controller).
+
+## Enabling access to the Traefik dashboard
+
+This chapter shows you how to enable HTTPS access to your Traefik dashboard by doing the following:
+
+1. Defining a user to restrict access to the Traefik dashboard.
+2. Declaring a `Service` pointing to the websecure `443` port of the pod running Traefik in your cluster.
+3. Declaring an `IngressRoute` to the Traefik dashboard `Service` enabling access to Traefik's websecure port.
+
+The first step is just the execution of a command on your `kubectl` client. The other two go together in the corresponding Kustomize project.
+
+### Defining a user for the Traefik dashboard
+
+Secure the access to your Traefik dashboard by defining at least one user with a password. Traefik demands passwords hashed using MD5, SHA1, or BCrypt, and recommends using the `htpasswd` command to generate them:
+
+1. Start by installing, in your `kubectl` client system, the package providing the `htpasswd` command. The package is `apache2-utils` and, on a Debian based system, you can install it with `apt`:
+
+ ~~~sh
+ $ sudo apt install -y apache2-utils
+ ~~~
+
+2. Next, use `htpasswd` to generate a user called, for instance, `tfkuser` with the password hashed with the BCrypt encryption:
+
+ ~~~sh
+ $ htpasswd -nb -B -C 9 tfkuser Pu7Y0urV3ryS3cr3tP4ssw0rdH3r3
+ tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
+ ~~~
+
+ > [!IMPORTANT]
+ > **Be careful with the value you set to the `-C` option!**\
+ > This option indicates the computing time used by the BCrypt algorithm for hashing and, if you set it too high, the Traefik dashboard could end not loading at all. The value you can type here must be between 4 and 17, and the default is 5.
+
+Keep the `htpasswd`'s output at hand, you will use that encrypted string in the next procedure.
+
+### Kustomize project for enabling access to the Traefik dashboard
+
+The next steps set up and deploy the Kustomize project enabling access to your Traefik dashboard:
+
+1. Create the Kustomize project's folder structure:
+
+ ~~~sh
+ $ mkdir -p $HOME/k8sprjs/traefik-dashboard/{resources,secrets}
+ ~~~
+
+ This project has one `resources` subfolder for storing Kubenetes resources declarations, and a `secrets` one to keep a file with the secret string describing the user for accessing the Traefik dashboard.
+
+2. Create the following files within the Kustomize project:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/traefik-dashboard/resources/{traefik-dashboard-basicauth.middleware.traefik.yaml,traefik-dashboard.ingressroute.traefik.yaml,traefik-dashboard.service.yaml} $HOME/k8sprjs/traefik-dashboard/secrets/users
+ ~~~
+
+3. In `resources/traefik-dashboard-basicauth.middleware.traefik.yaml` declare the authorization method that will be used for login in the Traefik dashboard:
+
+ ~~~yaml
+ # Basic authentication method for Traefik dashboard
+ apiVersion: traefik.io/v1alpha1
+ kind: Middleware
+
+ metadata:
+ name: traefik-dashboard-basicauth
+ namespace: kube-system
+ spec:
+ basicAuth:
+ secret: traefik-dashboard-basicauth-secret
+ ~~~
+
+ A `Middleware` is a custom Traefik resource, used in this case for configuring a basic authentication method (a user and password login system). In the `spec.basicAuth.secret` parameter, this middleware invokes a `secret` resource which you'll declare in a later step of this procedure.
+
+4. Using `kubectl`, see the current external IP MetalLB has assigned to the services running in your cluster:
+
+ ~~~sh
+ $ kubectl get svc -A
+ NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ cert-manager cert-manager ClusterIP 10.43.113.216 9402/TCP 13d
+ cert-manager cert-manager-cainjector ClusterIP 10.43.206.23 9402/TCP 13d
+ cert-manager cert-manager-webhook ClusterIP 10.43.118.166 443/TCP,9402/TCP 13d
+ default kubernetes ClusterIP 10.43.0.1 443/TCP 28d
+ kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 28d
+ kube-system metrics-server ClusterIP 10.43.50.63 443/TCP 15d
+ kube-system traefik LoadBalancer 10.43.174.63 10.7.0.0 80:30512/TCP,443:32647/TCP 28d
+ metallb-system metallb-webhook-service ClusterIP 10.43.126.18 443/TCP 25d
+ ~~~
+
+ At this point, the only service with an external IP assigned is Traefik, but you cannot use that address even for accessing its own dashboard. You have to pick the next IP available [in the address pool you enabled in MetalLB](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#choosing-the-ip-ranges-for-metallb), which for this guide's setup is `10.7.0.1`.
+
+ > [!NOTE]
+ > **The Traefik dashboard is not accessible through the existing `traefik` service**\
+ > It is not possible to reach the Traefik dashboard through the already present `traefik` `Service` object in the K3s setup. You need to create a different service with its own IP address to access the dashboard, as declared in the next step.
+
+5. Declare the `Service` object for the Traefik dasboard in `traefik-dashboard.service.yaml`:
+
+ ~~~yaml
+ # Traefik dashboard service
+ apiVersion: v1
+ kind: Service
+
+ metadata:
+ name: traefik-dashboard
+ namespace: kube-system
+ labels:
+ app.kubernetes.io/instance: traefik-kube-system
+ app.kubernetes.io/name: traefik-dashboard
+ spec:
+ type: LoadBalancer
+ loadBalancerIP: 10.7.0.1
+ ports:
+ - name: websecure
+ port: 443
+ targetPort: websecure
+ protocol: TCP
+ selector:
+ app.kubernetes.io/instance: traefik-kube-system
+ app.kubernetes.io/name: traefik
+ ~~~
+
+ This `Service` has the following particularities:
+
+ - In its `metadata` section there are `labels`:
+
+ - `app.kubernetes.io/instance`\
+ The `traefik-kube-system` value groups this service in the same instance as the pod and service already existing in your cluster.
+
+ - `app.kubernetes.io/name`\
+ The `traefik-dashboard` string identifies this service within the Traefik instance.
+
+ - The `spec.type` makes this `Service` managed by your cluster's load balancer (MetalLB). This is required to be able to use the `spec.loadBalancerIP` property to specify the IP you want for this `Service` from those provided by MetalLB.
+
+ - The `ports` configuration exposes the `websecure` port of the Traefik pod through the `443` (HTTPS) `port`.
+
+ > [!NOTE]
+ > **Traefik's pod has four named ports opened**\
+ > To check those ports out, first discover the name of the `Running` Traefik pod:
+ >
+ > ~~~sh
+ > $ kubectl -n kube-system get pods | grep traefik
+ > traefik-c98fdf6fb-ndqbk 1/1 Running 0 25m
+ > traefik-c98fdf6fb-t8bkp 0/1 Completed 0 44h
+ > traefik-c98fdf6fb-vdwbf 0/1 Completed 0 43h
+ > ~~~
+ >
+ > You can have several pods listed for Traefik, but most of them are just references to old pods that you can remove from your cluster with the `kubectl -n kube-system delete pods` command. Copy the name for the `Running` Traefik pod, then extract the ports information from the pod's description:
+ >
+ > ~~~sh
+ > $ kubectl -n kube-system describe pod traefik-c98fdf6fb-ndqbk | grep Ports
+ > Ports: 9100/TCP (metrics), 8080/TCP (traefik), 8000/TCP (web), 8443/TCP (websecure)
+ > Host Ports: 0/TCP (metrics), 0/TCP (traefik), 0/TCP (web), 0/TCP (websecure)
+ > ~~~
+ >
+ > You could also get this same information from older `Completed` pods, but it is better to get the most up-to-date details from a currently `Running` pod.
+
+ - The `selector` links this `Service` object with the running Traefik pod that has been labeled with the same specified tags.
+
+ > [!NOTE]
+ > **See the labels applied to the running Traefik pod**\
+ > Check out the [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) applied to the running Traefik pod with `kubectl`:
+ >
+ > ~~~sh
+ > $ kubectl -n kube-system describe pods traefik-c98fdf6fb-ndqbk
+ > Name: traefik-c98fdf6fb-ndqbk
+ > Namespace: kube-system
+ > Priority: 2000000000
+ > Priority Class Name: system-cluster-critical
+ > Service Account: traefik
+ > Node: k3sagent02/172.16.2.2
+ > Start Time: Wed, 08 Oct 2025 09:00:04 +0200
+ > Labels: app.kubernetes.io/instance=traefik-kube-system
+ > app.kubernetes.io/managed-by=Helm
+ > app.kubernetes.io/name=traefik
+ > helm.sh/chart=traefik-34.2.1_up34.2.0
+ > pod-template-hash=c98fdf6fb
+ > ...
+ > ~~~
+ >
+ > In the output, look at the `Labels` section to find there the labels `app.kubernetes.io/instance` and `app.kubernetes.io/name`. Also notice that the labels applied are [equality-based ones](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement).
+
+6. Declare in `resources/traefik-dashboard.ingressroute.traefik.yaml` the `IngressRoute` resource for enabling access to the Traefik dashboard:
+
+ ~~~yaml
+ # Ingress for Traefik's dashboard
+ apiVersion: traefik.io/v1alpha1
+ kind: IngressRoute
+
+ metadata:
+ name: traefik-dashboard
+ namespace: kube-system
+ spec:
+ entryPoints:
+ - websecure
+ routes:
+ - kind: Rule
+ match: Host(`10.7.0.1`) || Host(`traefik.homelab.cloud`) || Host(`tfk.homelab.cloud`)
+ services:
+ - name: api@internal
+ kind: TraefikService
+ middlewares:
+ - name: traefik-dashboard-basicauth
+ ~~~
+
+ This is a Traefik `IngressRoute` resource defining the route and the authentication method to access your Traefik dashboard:
+
+ > [!IMPORTANT]
+ > **The `IngressRoute` is NOT a standard Kubernetes resource**\
+ > It is a customized alternative to the standard `Ingress` Kubernetes object **offered only by Traefik**. Other Ingress controllers may have their own alternatives to the standard Kubernetes Ingress object.
+
+ - In the `spec.entryPoints` there is only the `websecure` option enabled. This means that only the `443` port is enabled as entry point to this route.
+
+ - The `spec.routes.match` parameter indicates to Traefik the valid URL patterns reachable through this `IngressRoute`:
+
+ - The external IP of the Traefik service is added as a possible `Host` that can appear in the route. If you do not add it, you will not be able to access this route with that address.
+
+ - Two subdomains are setup as possible `Host` values. This way, you can put any number of alternative subdomains that can lead to the same web resource.
+
+ > [!NOTE]
+ > **The domains or subdomains you set up as `Host` values will not work just by being put there**\
+ > You have to enable them in your network's router or gateway, local DNS or associate them with their corresponding IP in the `hosts` file of any client systems connected to your network that require to know the correct IP for those domains or subdomains.
+
+ - Do not forget any of the backticks characters ( \` ) enclosing the strings in the `Host` directives.
+
+ - The `spec.routes.services` links the `IngressRoute` with the `traefik-dashboard` `Service` declared earlier.
+
+ > [!NOTE]
+ > **This is invoking an `api@internal` `TraefikService` resource, not the `traefik-dashboard` `Service` object!**\
+ > Why it works this way is something I have not found an explanation for.
+
+ - The `spec.routes.match.middlewares` only invokes the basic authentication middleware.
+
+7. In the `secrets/users` file, just paste the encrypted string you got from the `htpasswd` command earlier:
+
+ ~~~sh
+ tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
+ ~~~
+
+8. Generate a `kustomization.yaml` file at the root folder of this Kustomization project:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/traefik-dashboard/kustomization.yaml
+ ~~~
+
+9. In the `kustomization.yaml` file declare your Kustomization project for enabling the Traefik dashboard:
+
+ ~~~yaml
+ # Traefik dashboard setup
+ apiVersion: kustomize.config.k8s.io/v1beta1
+ kind: Kustomization
+
+ resources:
+ - resources/traefik-dashboard-basicauth.middleware.traefik.yaml
+ - resources/traefik-dashboard.service.yaml
+ - resources/traefik-dashboard.ingressroute.traefik.yaml
+
+ secretGenerator:
+ - name: traefik-dashboard-basicauth-secret
+ namespace: kube-system
+ files:
+ - secrets/users
+ options:
+ disableNameSuffixHash: true
+ ~~~
+
+ See that there is a `secretGenerator` block in this Kustomization declaration:
+
+ - This is a Kustomize feature that generates `Secret` objects in a Kubernetes cluster from a given configuration.
+
+ - The secret is configured with a concrete `name` and `namespace`. It also has under `files` a reference to the `users` file you created previously under the `secrets` subfolder.
+
+ - The `disableNameSuffixHash` option is required to be `true`. Otherwise, Kustomize will add a hash suffix to the secret's name and your `Middleware` will not be able to find it in the cluster.
+
+ > [!NOTE]
+ > **This is an issue between Traefik and Kubernetes Kustomize**\
+ > The suffix problem happens because the `Middleware` declares the `Secret`'s name in a non-Kubernetes-standard parameter which Kustomize does not recognize. Therefore, Kustomize cannot replace the name with its hashed version in the `spec.basicAuth.secret` parameter.
+
+ To see how the `Secret` object would look, you can review it with `kubectl kustomize`:
+
+ ~~~sh
+ $ kubectl kustomize $HOME/k8sprjs/traefik-dashboard | less
+ ~~~
+
+ Look for the `Secret` object in the resulting paginated yaml, it should look like below:
+
+ ~~~yaml
+ apiVersion: v1
+ data:
+ users: |
+ dGZrdXNlcjokMnkkMTckMG1kUDRXTGRiajhCV2oxbElKTURiLmJYeVlLNzVxUjVBZlJOenV1blp1
+ Q2FtdkFscURsby4K
+ kind: Secret
+ metadata:
+ name: traefik.homelab.cloud-basic-auth-secret
+ namespace: kube-system
+ type: Opaque
+ ~~~
+
+ Notice the following details in this `Secret` object:
+
+ - In the `data.users` parameter there is an odd looking string. This is the content of the `secrets/user` file referenced in the `secretGenerator` block, automatically encoded by Kustomize in base64. You can check that it is the same string on the file by decoding it with `base64 -d` as follows.
+
+ ~~~sh
+ $ echo dGZrdXNlcjokMnkkMTckMG1kUDRXTGRiajhCV2oxbElKTURiLmJYeVlLNzVxUjVBZlJOenV1blp1Q2FtdkFscURsby4K | base64 -d
+ tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
+ ~~~
+
+ See that I've entered the base64 string in one line, while it has been returned split in two in the `Secret` object.
+
+ - The `metadata.name` and `metadata.namespace` are exactly as specified in the `kustomization.yaml` file.
+
+ - The `type` `Opaque` means that the content under `data` is base64-encoded.
+
+10. At last, apply this Kustomization project:
+
+ ~~~sh
+ $ kubectl apply -k $HOME/k8sprjs/traefik-dashboard
+ ~~~
+
+## Getting into the Traefik dashboard
+
+Let's suppose you do not have the subdomains you've defined as `Host` enabled in your network. You will have to access the dashboard using traefik service's external IP. In this case, the external IP happens to be `10.7.0.1` so browse to `https://10.7.0.1/`.
+
+1. The first thing you'll probably see is a warning by your browser telling you that the connection is not secure because the certificate isn't either. If you check the certificate's information, you will see that it is one self-generated by Traefik itself ("verified" by `CN=TRAEFIK DEFAULT CERT`).
+
+2. Right after accepting "the risk" in the security warning, a generic login window will pop up in your browser:
+
+ 
+
+3. Type your user and password, press on `Sign in` and you will be redirected to the Traefik dashboard's main page available under the `/dashboard/#/` path:
+
+ 
+
+Finally, when you have the subdomain or subdomains for your traefik's external IP ready in your network, or in the `hosts` file of your client systems, try accessing the Traefik dashboard using them.
+
+## What to do if Traefik's dashboard has bad performance
+
+If your Traefik dashboard seems to load extremely slowly, or just returning a blank page, it could be that you set the `-C` value in the `htpasswd` command too high. For some reason this affects the Traefik dashboard's performance, hitting badly the node were the traefik service is being executed. So, if this is happening to you, try the following.
+
+1. Generate a new user string with `htpasswd` as you saw previously, but with a lower `-C` value than the one you used in the first place. Then replace the string you already have in the `secrets/users` file with the new one.
+
+2. Delete and then reapply the Kustomize project again.
+
+ ~~~sh
+ $ kubectl delete -k $HOME/k8sprjs/traefik-dashboard
+ $ kubectl apply -k $HOME/k8sprjs/traefik-dashboard
+ ~~~
+
+ The `delete` command is to make sure that the `IngressRoute` resource is regenerated with the change applied.
+
+3. Try to access your Traefik dashboard and see how it runs now.
+
+## Traefik dashboard's Kustomize project attached to this guide
+
+Find the Kustomize project for this Traefik dashboard deployment in the following attached folder:
+
+- [`k8sprjs/traefik-dashboard`](k8sprjs/traefik-dashboard/)
+
+## Relevant system paths
+
+### Folders in `kubectl` client system
+
+- `$HOME/k8sprjs/traefik-dashboard`
+- `$HOME/k8sprjs/traefik-dashboard/resources`
+- `$HOME/k8sprjs/traefik-dashboard/secrets`
+
+### Files in `kubectl` client system
+
+- `$HOME/k8sprjs/traefik-dashboard/kustomization.yaml`
+- `$HOME/k8sprjs/traefik-dashboard/resources/traefik-dashboard.ingressroute.traefik.yaml`
+- `$HOME/k8sprjs/traefik-dashboard/resources/traefik-dashboard.service.yaml`
+- `$HOME/k8sprjs/traefik-dashboard/resources/traefik-dashboard-basicauth.middleware.traefik.yaml`
+- `$HOME/k8sprjs/traefik-dashboard/secrets/users`
+
+## References
+
+### [Traefik documentation](https://doc.traefik.io/)
+
+- [Reference. Install Configuration. API & Dashboard](https://doc.traefik.io/traefik/reference/install-configuration/api-dashboard/)
+- [Reference. Routing Configuration. Kubernetes. Ingress](https://doc.traefik.io/traefik/reference/routing-configuration/kubernetes/ingress/)
+- [Reference. Routing Configuration. Kubernetes. Kubernetes CRD. HTTP. IngressRoute](https://doc.traefik.io/traefik/reference/routing-configuration/kubernetes/crd/http/ingressroute/)
+- [Reference. Routing Configuration. Kubernetes. Kubernetes CRD. HTTP. Middleware](https://doc.traefik.io/traefik/reference/routing-configuration/kubernetes/crd/http/middleware/)
+
+### Traefik in K3s
+
+- [K3s docs. Networking](https://docs.k3s.io/networking)
+ - [Traefik Ingress Controller](https://docs.k3s.io/networking/networking-services?_highlight=traefik#traefik-ingress-controller)
+
+- [K3s Rocks. Traefik dashboard](https://k3s.rocks/traefik-dashboard/)
+- [Exposing services in a K3S/K3D cluster with Traefik](https://www.ivankrizsan.se/2024/07/12/exposing-services-in-a-k3s-k3d-cluster-with-traefik/)
+
+### [Kubernetes Documentation](https://kubernetes.io/docs/home/)
+
+- [Concepts. Overview. Objects In Kubernetes](https://kubernetes.io/docs/concepts/overview/working-with-objects/)
+ - [Labels and Selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors)
+ - [Labels selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors)
+ - [Equality-based requirement](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement)
+
+- [Concepts. Services, Load Balancing, and Networking](https://kubernetes.io/docs/concepts/services-networking/)
+ - [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/)
+
+### Traefik IngressRoute Vs Ingress
+
+- [What is the difference between a Kubernetes Ingress and a IngressRoute?](https://stackoverflow.com/questions/60177488/what-is-the-difference-between-a-kubernetes-ingress-and-a-ingressroute)
+- [Steps to expose services using Kubernetes Ingress](https://www.golinuxcloud.com/steps-to-expose-services-using-kubernetes-ingress/)
+- [Directing Kubernetes traffic with Traefik](https://opensource.com/article/20/3/kubernetes-traefik)
+- [Ingress with Traefik on K3s](https://itnext.io/ingress-with-treafik-on-k3s-53db6e751ed3)
+- [K3s issue. Documentation on ingress](https://github.com/k3s-io/k3s/issues/436)
+
+### [Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/)
+
+- [configMapGenerator](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/configmapgenerator/)
+- [secretGenerator](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/secretgenerator/)
+- [generatorOptions](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/generatoroptions/)
+
+## Navigation
+
+[<< Previous (**G029. K3s cluster setup 12**)](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G031. K3s cluster setup 14**) >>](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md)
diff --git a/G031 - K3s cluster setup 14 ~ Deploying the Headlamp dashboard.md b/G031 - K3s cluster setup 14 ~ Deploying the Headlamp dashboard.md
new file mode 100644
index 0000000..32418b4
--- /dev/null
+++ b/G031 - K3s cluster setup 14 ~ Deploying the Headlamp dashboard.md
@@ -0,0 +1,351 @@
+# G031 - K3s cluster setup 14 ~ Deploying the Headlamp dashboard
+
+- [Headlamp is an alternative to the Kubernetes Dashboard](#headlamp-is-an-alternative-to-the-kubernetes-dashboard)
+- [Deploying Headlamp](#deploying-headlamp)
+ - [Getting the administrator user's service account token](#getting-the-administrator-users-service-account-token)
+- [Testing Headlamp](#testing-headlamp)
+- [Headlamp's Kustomize project attached to this guide](#headlamps-kustomize-project-attached-to-this-guide)
+- [Relevant system paths](#relevant-system-paths)
+ - [Folders in `kubectl` client system](#folders-in-kubectl-client-system)
+ - [Files in `kubectl` client system](#files-in-kubectl-client-system)
+- [References](#references)
+ - [Headlamp](#headlamp)
+ - [Kubernetes Documentation](#kubernetes-documentation)
+ - [Traefik Reference](#traefik-reference)
+ - [Cert-manager](#cert-manager)
+- [Navigation](#navigation)
+
+## Headlamp is an alternative to the Kubernetes Dashboard
+
+To monitor what's going on in your K3s cluster in a more visual manner, [Kubernetes offers its own native web-based dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/). The problem is that, at the time of writing this, [the Kubernetes Dashboard can only be deployed with Helm charts](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#deploying-the-dashboard-ui). Since this guide sticks to the Kustomize way of deploying apps in the K3s cluster, I picked [Headlamp](https://headlamp.dev/) as an alternative dashboard since it can be deployed with `kubectl`.
+
+> [!WARNING]
+> **Ensure having the metrics-server service running in your cluster first!**\
+> To be able to show stats from your K3s cluster, Headlamp (like the Kubernetes Dashboard) requires having [the **metrics-server** service already running in your cluster](G028%20-%20K3s%20cluster%20setup%2011%20~%20Deploying%20the%20metrics-server%20service.md).
+
+## Deploying Headlamp
+
+For deploying [Headlamp v0.36.0](https://github.com/kubernetes-sigs/headlamp/releases/tag/v0.36.0) (the _latest_ version at the time of writing this) in your homelab cluster, you need:
+
+- A user with the cluster administrator role.
+
+- A patch to set the Headlamp service with a static IP picked from the IP range provided by MetalLB.
+
+- A Traefik IngressRoute resource that will handle the ingress to Headlamp through HTTP.
+
+All the components will be part of the same Kustomize project for deploying Headlamp:
+
+1. In your `kubectl` client system, create the folder structure for the Headlamp Kustomize project:
+
+ ~~~sh
+ $ mkdir -p $HOME/k8sprjs/headlamp/{patches,resources}
+ ~~~
+
+2. Create the necessary files under the `patches` and `resources` folder:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/headlamp/patches/headlamp.service.patch.yaml $HOME/k8sprjs/headlamp/resources/{headlamp-admin.serviceaccount.yaml,cluster-admin-users.clusterrolebinding.yaml,headlamp.homelab.cloud-tls.certificate.cert-manager.yaml,headlamp.ingressroute.traefik.yaml}
+ ~~~
+
+3. Check out with `kubectl` which external IPs your cluster's services are using at this point:
+
+ ~~~sh
+ $ kubectl get services -A
+ NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ cert-manager cert-manager ClusterIP 10.43.153.243 9402/TCP 3d23h
+ cert-manager cert-manager-cainjector ClusterIP 10.43.131.203 9402/TCP 3d23h
+ cert-manager cert-manager-webhook ClusterIP 10.43.118.87 443/TCP,9402/TCP 3d23h
+ default kubernetes ClusterIP 10.43.0.1 443/TCP 33d
+ kube-system kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP,9153/TCP 33d
+ kube-system metrics-server ClusterIP 10.43.50.63 443/TCP 20d
+ kube-system traefik LoadBalancer 10.43.174.63 10.7.0.0 80:30512/TCP,443:32647/TCP 33d
+ kube-system traefik-dashboard LoadBalancer 10.43.216.2 10.7.0.1 443:31622/TCP 5d22h
+ metallb-system metallb-webhook-service ClusterIP 10.43.126.18 443/TCP 30d
+ ~~~
+
+ After enabling the Traefik dashboard in the [previous chapter **G030**](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md), you have two Traefik-related services with external IPs assigned. Pick the next IP available in MetalLB's `default-pool` IP range for Headlamp, which for this guide's setup is `10.7.0.2`.
+
+4. Create a patch to specify the static IP for the Headlamp service in the `patches/headlamp.service.patch.yaml` file:
+
+ ~~~yaml
+ # Headlamp service patch
+ kind: Service
+ apiVersion: v1
+
+ metadata:
+ name: headlamp
+ namespace: kube-system
+
+ spec:
+ type: LoadBalancer
+ loadBalancerIP: 10.7.0.2
+ ~~~
+
+ This patch ensures that the Headlamp service uses the static IP `10.7.0.2` provided by the MetalLB load balancer.
+
+5. In `resources/headlamp-admin.serviceaccount.yaml`, declare the `headlamp-admin` service account:
+
+ ~~~yaml
+ # Headlamp administrator user
+ apiVersion: v1
+ kind: ServiceAccount
+
+ metadata:
+ name: headlamp-admin
+ namespace: kube-system
+ ~~~
+
+ This service account will be your administrator user for Headlamp:
+
+ - The name `headlamp-admin` is the one expected in the official Headlamp installation. Its deployment YAML already points to a secret token for a service account called `headlamp-admin`.
+
+ - The `kube-system` namespace is where the Headlamp service is going to be deployed. The service account must be in the same namespace as Headlamp to authorize this service to access the cluster information it needs to work.
+
+ - This declaration only creates the `headlamp-admin` service account without any special privileges in the cluster. The account needs to be bound to a cluster role to be authorized to access your K3s cluster information, something you will declare in the next step.
+
+ > [!IMPORTANT]
+ > **Service accounts are not meant for regular users**\
+ > For Kubernetes, [user accounts are for humans and service accounts are for application processes](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#user-accounts-versus-service-accounts). Still, [Headlamp's official installation documentation explicitly **recommends** (at the time of writing this) using a service account](https://headlamp.dev/docs/latest/installation/).
+
+6. In `resources/cluster-admin-users.clusterrolebinding.yaml`, bind the `headlamp-admin` with the `cluster-admin` cluster role:
+
+ ~~~yaml
+ # Administrator cluster role bindings
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+
+ metadata:
+ name: cluster-admin-users
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+ subjects:
+ - kind: ServiceAccount
+ name: headlamp-admin
+ namespace: kube-system
+ ~~~
+
+ With this declaration, you make the `headlamp-admin` account an administrator of your cluster under the `kube-system` namespace:
+
+ - The `roleRef` attribute specifies that the cluster role to be bound is `cluster-admin`.
+
+ - In `subjects` you list all the users you want bounded to the role indicated in `roleRef`. In this case, there is only the `headlamp-admin` service account.
+
+7. Declare a self-signed "leaf" certificate in `resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml`:_
+
+ ~~~yaml
+ # Certificate for Headlamp
+ apiVersion: cert-manager.io/v1
+ kind: Certificate
+
+ metadata:
+ name: headlamp.homelab.cloud-tls
+ namespace: kube-system
+ spec:
+ isCA: false
+ secretName: headlamp.homelab.cloud-tls
+ duration: 2190h # 3 months
+ renewBefore: 168h # Certificates must be renewed some time before they expire (7 days)
+ dnsNames:
+ - headlamp.homelab.cloud
+ - hdl.homelab.cloud
+ ipAddresses:
+ - 10.7.0.2
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-intm-ca01-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io
+ ~~~
+
+ This certificate is similar to the ones created for the [self-signed CA issuers already explained in the chapter **G029**](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20self-signed%20CA.md), except for:
+
+ - The namespace is `kube-system`, meaning that this certificate and its associated secret will be created in the same namespace where the Headlamp service will be deployed and run. This is necessary to ensure that the secret is accessible by the ingress route resource you will create in the next step.
+
+ - The `spec.isCA` parameter set to `false` makes this certificate a "leaf" one that cannot be used to issue other certificates.
+
+ - There is no `spec.commonName` because the official cert-manager documentation recommends setting this attribute only in CA certificates, and leaving it unset in "leaf" certificates.
+
+ - Its `duration` and `renewBefore` values are half of the ones set to the certificate of the intermediate CA issuing this Headlamp certificate.
+
+ - The list in `spec.dnsNames` specifies which domains this certificate corresponds to.
+
+ - The list in `spec.ipAddresses` indicates the IP addresses this certificate corresponds to.
+
+ - The `spec.issuerRef` invokes the intermediate CA already available in your cluster as issuer of this Headlamp certificate.
+
+8. In `resources/headlamp.ingressroute.traefik.yaml`, specify the Traefik IngressRoute resource for accessing Headlamp through HTTPS:
+
+ ~~~yaml
+ # Traefik IngressRoute for Headlamp
+ apiVersion: traefik.io/v1alpha1
+ kind: IngressRoute
+
+ metadata:
+ name: headlamp
+ namespace: kube-system
+ spec:
+ entryPoints:
+ - web
+ routes:
+ - match: Host(`10.7.0.2`) || Host(`headlamp.homelab.cloud`) || Host(`hdl.homelab.cloud`)
+ kind: Rule
+ services:
+ - name: headlamp
+ kind: Service
+ port: 80
+ tls:
+ secretName: headlamp.homelab.cloud-tls
+ ~~~
+
+ Details to highlight in this Traefik IngressRoute declaration are:
+
+ - The `spec.entryPoints` is set to `web` only, which corresponds to HTTP.
+
+ - The `spec.routes.match` is a list that informs Traefik of the possible routes to call Headlamp, including its static IP.
+
+ > [!NOTE]
+ > **The domain names must be enabled in your local network**\
+ > Your equivalent of the host names seen in the YAML above will not be reachable unless you enable them from a DNS service (your local network's router could provide it), or specify them in the `hosts` file of the clients you want to access from.
+
+ - The `spec.routes.services` only has an entry for the Headlamp service, linking it to this IngressRoute.
+
+ - The `spec.tls.secretName` points to the secret associated to the Headlamp certificate declared in the previous step. This enables the TLS termination at the ingress level, while the communication with Headlamp will still be done in HTTP.
+
+9. Create the `kustomization.yaml` file for the Kustomize project:
+
+ ~~~sh
+ $ touch $HOME/k8sprjs/headlamp/kustomization.yaml
+ ~~~
+
+10. Put in the `kustomization.yaml` file the following lines:
+
+ ~~~yaml
+ # Headlamp setup
+ apiVersion: kustomize.config.k8s.io/v1beta1
+ kind: Kustomization
+
+ resources:
+ - resources/headlamp-admin.serviceaccount.yaml
+ - resources/cluster-admin-users.clusterrolebinding.yaml
+ - resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml
+ - resources/headlamp.ingressroute.traefik.yaml
+ - https://raw.githubusercontent.com/kubernetes-sigs/headlamp/main/kubernetes-headlamp.yaml
+
+ patches:
+ - path: patches/headlamp.service.patch.yaml
+ ~~~
+
+11. Apply the Kustomize project to your cluster:
+
+ ~~~sh
+ $ kubectl apply -k $HOME/k8sprjs/headlamp
+ ~~~
+
+12. Give Headlamp about a minute to boot up, then verify that its corresponding pod and service are running in the `kube-system` namespace:
+
+ ~~~sh
+ $ kubectl get pods,svc -n kube-system | grep headlamp
+ pod/headlamp-747b5f4d5-g92rf 1/1 Running 0 107s
+ service/headlamp LoadBalancer 10.43.119.9 10.7.0.2 80:31146/TCP 108s
+ ~~~
+
+### Getting the administrator user's service account token
+
+To log in Headlamp with the `headlamp-admin` user you created in its deployment, you need to authenticate with a service account token associated to that user. This is a secret token you can create with `kubectl`:
+
+~~~sh
+$ kubectl -n kube-system create token headlamp-admin --duration=8760h
+eyJhbGciOiJSUzI1NiIsImtpZCI6ImtxNzh0bmk3cDAzVU4zXzFnMVgwZXVSR3c0U1FnNVZ3OUtSdDBSTkw2WmsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXFiMnQ1Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4MjU4Mjc4ZC02YjBmLTQwZDItOTI1Yy1kMzEwMmY3MTkxYzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.PG-4qfeT3C6vFfwhdGDoXVmjDEU7TJDTftcmIa2kQO0HtWM8ZN45wDGk4ZSWUR5mO5HlXpYORiGkKHq6GNPFRr_qCo4tKIONyZbgXtV98P6OpOIrfDTJCwxjFf0aqOmEs1N3BqViFs3MgBRCLElx98rD6AXehdxPADXlAksnaypKKx6q1WFgNmOTHfC9WrpQzX-qoo8CbRRCuSyTagm3qkpa5hV5RjyKjE7IaOqQGwFOSbTqMy6eghTYSufC-uUxcOWw3OPVa9QzINOn9_tioxj7tH7rpw_eOHzUW_-Cr_HE89DygnuZAqQEsWxBLfYcrBKtnMhxn49E22SyCaJldA
+~~~
+
+Be aware of the following:
+
+- This service account token is associated to the `headlamp-admin` service account which exists in the `kube-system` namespace.
+
+- **The command outputs your `headlamp-admin`'s secret token string directly in your shell**. Remember to copy and save it somewhere safe such as a password manager.
+
+- The `--duration=8760h` makes this token last for 365 days, although you may prefer it to expire sooner for security reasons. [By default, a service account token
+ expires after one hour, but it can also expire when the associated pod is deleted](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-token-volume). By setting a specific duration, the token will last for the time period set with `--duration`, regardless of what happens to the pod.
+
+ > [!IMPORTANT]
+ > **Use the `kubectl create token` command again for refreshing the service account token**\
+ > Whenever your current secret token for your `headlamp-admin` service account becomes invalid, generate a new service account token with the same `kubectl` command.
+
+## Testing Headlamp
+
+Now that you have Headlamp deployed, you can test it:
+
+1. Open a browser in your client system and go to `http://headlamp.homelab.cloud/` (or whatever URL or static IP you may have configured). The browser will warn you about the connection being insecure. Accept the warning and you'll reach Headlamp's authentication form:
+
+ 
+
+ This form requests an authentication token for signing into Headlamp. Use the `headlamp-admin` service account's token you generated previously to authenticate into Headlamp.
+
+2. After authenticating, you get directly into Headlamp's `Clusters` page:
+
+ 
+
+ This the main page of Headlamp. It provides you with a summarized view of your cluster status, including statistics about resources usage and a listing of events that have happened in your cluster. If you happen to have warning events, this page will automatically enable the "Only warnings" mode to show you just warning events.
+
+ Try out the other views Headlamp offers to get familiarized with this tool. In particular, you may like to try out the `Map`:
+
+ 
+
+ This view provides a zoomable map that can show your cluster components grouped under three different criteria: by namespace, by instance, or by node. This feature will help you to locate more easily any component deployed in your K3s cluster.
+
+## Headlamp's Kustomize project attached to this guide
+
+You can find the Kustomize project for this Headlamp deployment in this attached folder:
+
+- [`k8sprjs/headlamp`](k8sprjs/headlamp/)
+
+## Relevant system paths
+
+### Folders in `kubectl` client system
+
+- `$HOME/k8sprjs/headlamp`
+- `$HOME/k8sprjs/headlamp/patches`
+- `$HOME/k8sprjs/headlamp/resources`
+
+### Files in `kubectl` client system
+
+- `$HOME/k8sprjs/headlamp/kustomization.yaml`
+- `$HOME/k8sprjs/headlamp/patches/headlamp.service.patch.yaml`
+- `$HOME/k8sprjs/headlamp/resources/cluster-admin-users.clusterrolebinding.yaml`
+- `$HOME/k8sprjs/headlamp/resources/headlamp-admin.serviceaccount.yaml`
+- `$HOME/k8sprjs/headlamp/resources/headlamp.homelab.cloud.ingressroute.traefik.yaml`
+
+## References
+
+### [Headlamp](https://headlamp.dev/)
+
+- [Installation](https://headlamp.dev/docs/latest/installation/)
+
+### [Kubernetes Documentation](https://kubernetes.io/docs/)
+
+- [Reference. API Access Control. Managing Service Accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/)
+ - [User accounts versus service accounts](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#user-accounts-versus-service-accounts)
+ - [Bound service account token volume mechanism](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-token-volume)
+
+- [Reference. Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/)
+ - [kubectl reference. kubectl create. kubectl create token](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_token/)
+
+### [Traefik Reference](https://doc.traefik.io/traefik/reference/)
+
+- [Routing Configuration](https://doc.traefik.io/traefik/reference/routing-configuration/)
+ - [Kubernetes. Kubernetes CRD. HTTP. IngressRoute](https://doc.traefik.io/traefik/reference/routing-configuration/kubernetes/crd/http/ingressroute/)
+
+### [Cert-manager](https://cert-manager.io/docs/)
+
+- [Requesting Certificates. Certificate resource](https://cert-manager.io/docs/usage/certificate/)
+- [Reference. API Reference. cert-manager.io/v1](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1)
+ - [CertificateSpec](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec)
+
+## Navigation
+
+[<< Previous (**G030. K3s cluster setup 13**)](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G032. Deploying services 01**) >>](G032%20-%20Deploying%20services%2001%20~%20Considerations.md)
diff --git a/G031 - K3s cluster setup 14 ~ Enabling the Traefik dashboard.md b/G031 - K3s cluster setup 14 ~ Enabling the Traefik dashboard.md
deleted file mode 100644
index d7cf172..0000000
--- a/G031 - K3s cluster setup 14 ~ Enabling the Traefik dashboard.md
+++ /dev/null
@@ -1,313 +0,0 @@
-# G031 - K3s cluster setup 14 ~ Enabling the Traefik dashboard
-
-Traefik is the ingress controller you already have running in your K3s cluster. In other words, you can give access to services running in your cluster through Traefik ingresses, instead of just assigning them external IPs directly (with MetalLB load balancer for instance). Traefik in K3s comes with its embedded web dashboard enabled by default, but you have to configure an `IngressRoute` to access it.
-
-## Creating an `IngressRoute` for Traefik dashboard
-
-Creating the `IngressRoute` implies three procedures: creation of a user to secure the route, declaring the `IngressRoute`, and enabling the Traefik service's IP on the Proxmox VE firewall.
-
-### _Creating a user for the Traefik dashboard_
-
-To secure your access to the Traefik dashboard, you'll need to define at least one user with a password. Traefik demands passwords hashed using MD5, SHA1, or BCrypt, and recommends using the `htpasswd` command to generate them.
-
-1. Start by installing, in your `kubectl` client system, the package providing the `htpasswd` command. The package is `apache2-utils` and, on a Debian based system, you can install it with `apt`.
-
- ~~~bash
- $ sudo apt install -y apache2-utils
- ~~~
-
-2. Next, use `htpasswd` to generate a user called, for instance, `tfkuser` with the password hashed with the BCrypt encryption.
-
- ~~~bash
- $ htpasswd -nb -B -C 9 tfkuser Pu7Y0urV3ryS3cr3tP4ssw0rdH3r3
- tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
- ~~~
-
- > **BEWARE!**
- > Be careful with the value you set to the `-C` option. This option indicates the computing time used by the BCrypt algorithm for hashing and, if you set it too high, the Traefik dashboard could end not loading at all. The value you can type here must be between 4 and 17, and the default is 5.
-
-Keep the `htpasswd`'s output at hand, you'll use that string in the next procedure.
-
-### _Enabling the `IngressRoute`_
-
-You need to setup a Kustomize project to deploy all the resources necessary for an `IngressRoute` but, before that, you also have to recover some information from your K3s cluster.
-
-1. Recover the name of the secret resource related to the self-signed certificate you created back in the [**G029** guide](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#setting-up-a-wildcard-certificate-for-a-domain). Remember that that certificate's secret was mirrored automatically, by the Reflector addon, into the `kube-system` namespace. Assuming the certificate secret's name started with the string "`wildcard`", you could narrow the search with `grep`.
-
- ~~~bash
- $ kubectl -n kube-system get secrets | grep wildcard
- wildcard.deimos.cloud-tls kubernetes.io/tls 3 22h
- ~~~
-
- In this case, there's only one secret with such "`wildcard`" string in its name.
-
-2. You'll also need to get the external IP assigned by MetalLB to the Traefik service.
-
- ~~~bash
- $ kubectl -n kube-system get svc traefik
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- traefik LoadBalancer 10.43.187.212 192.168.1.41 80:30963/TCP,443:32446/TCP 3d3h
- ~~~
-
-With those two pieces of data recovered, now you can start working on the Traefik Dashboard's `IngressRoute` Kustomize project.
-
-1. Create the Kustomize project's folder tree as follows.
-
- ~~~bash
- $ mkdir -p $HOME/k8sprjs/traefik_dashboard-ingressroute/{resources,secrets}
- ~~~
-
- As you above, there's a subfolder for `resources` and now there's also another for `secrets`.
-
-2. Create the following files within the `resources` folder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/traefik_dashboard-ingressroute/resources/{traefik-auth.middleware.basicauth.yaml,traefik-dashboard.ingressroute.yaml}
- ~~~
-
-3. Copy in `traefik-dashboard.ingressroute.yaml` the following lines.
-
- ~~~yaml
- apiVersion: traefik.containo.us/v1alpha1
- kind: IngressRoute
-
- metadata:
- name: traefik-dashboard
- namespace: kube-system
- spec:
- entryPoints:
- - websecure
- tls:
- secretName: wildcard.deimos.cloud-tls
- routes:
- - match: (Host(`192.168.1.41`) || Host(`traefik.deimos.cloud`) || Host(`tfk.deimos.cloud`)) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`))
- kind: Rule
- services:
- - name: api@internal
- kind: TraefikService
- middlewares:
- - name: traefik-auth
- ~~~
-
- This is a Traefik `IngressRoute`, defining the route and the authentication method to access your Traefik Dashboard.
-
- > **BEWARE!**
- > An `IngressRoute` object is **not** a standard Kubernetes resource, it's a customized alternative to the standard `Ingress` Kubernetes object **used only by Traefik**. Other Ingress controllers may have their own alternatives to the standard Ingress object.
-
- - In the `spec.entryPoints` there's only the `websecure` option enabled. This means that only the _https_ (`443` port) is enabled as entry point to this route.
-
- - Your certificate secret's name goes in the `spec.tls.secretname` parameter.
- > **BEWARE!**
- > When applying a certificate in a Traefik `IngressRoute`, this route can have enabled **only** the `websecure` entry point. If you also need an alternative standard _http_ entry point to the same route, you'll need to create a different `IngressRoute` resource with the `web` entry point option set and no `tls` section, while keeping the rest the same as in the `websecure` version of the route.
-
- - The `spec.routes.match` parameter indicates to Traefik the valid URL pattern to reach this ingress route.
- - The pattern is `Host/PathPrefix`, where `Host` and `PathPrefix` are placeholders for their possible values. Examples of valid paths admitted by the configuration above are:
- - `https://192.168.1.41/dashboard/`
- - `https://tfk.deimos.cloud/dashboard/`
- - `https://traefik.deimos.cloud/api/`
- - See how I added the external IP of the Traefik service as a possible `Host` that can appear in the route. If you don't add it, you won't be able to access this route with that IP.
- - Also see how I've set up two subdomains as possible `Host` values. This way, you can put any number of alternative subdomains that can lead to the same web resource.
- > **BEWARE!**
- > Remember that the domains or subdomains you set up as `Host` values won't work on your network just by being put here. You'll have to enable them in your network's router or gateway, local DNS or associate them with their corresponding IP in the `hosts` file of any client systems connected to your network that require to know the correct IP for those domains or subdomains.
- - Be very careful of not forgetting any of those backticks characters ( \` ) enclosing the strings in the `Host` and `PathPrefix` directives.
-
-4. Add to `traefik-auth.middleware.basicauth.yaml` the yaml next.
-
- ~~~yaml
- apiVersion: traefik.containo.us/v1alpha1
- kind: Middleware
-
- metadata:
- name: traefik-auth
- namespace: kube-system
- spec:
- basicAuth:
- secret: traefik-auth-secret
- ~~~
-
- A `Middleware` is another custom Traefik resource, used in this case for configuring a basic authentication method (user and password login system). In the the `spec.basicAuth.secret` parameter this middleware invokes a `Secret` resource which you'll define later in this procedure.
-
-5. Create a new `users` file under the `secrets` subfolder.
-
- ~~~bash
- $ touch $HOME/k8sprjs/traefik_dashboard-ingressroute/secrets/users
- ~~~
-
-6. In the new `users` file, paste the string you got from the `htpasswd` command earlier.
-
- ~~~bash
- tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
- ~~~
-
-7. Generate a `kustomization.yaml` file at the root folder of this Kustomization project.
-
- ~~~bash
- $ touch $HOME/k8sprjs/traefik_dashboard-ingressroute/kustomization.yaml
- ~~~
-
-8. In the `kustomization.yaml` file put the following yaml content.
-
- ~~~yaml
- # Traefik Dashboard IngressRoute setup
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- resources:
- - resources/traefik-auth.middleware.basicauth.yaml
- - resources/traefik-dashboard.ingressroute.yaml
-
- secretGenerator:
- - name: traefik-auth-secret
- namespace: kube-system
- files:
- - secrets/users
- options:
- disableNameSuffixHash: true
- ~~~
-
- See that there's a `secretGenerator` block in the yaml above.
-
- - This is a Kustomize feature that generates `Secret` objects in a Kubernetes cluster from a given configuration.
-
- - See how the secret is configured with a concrete `name` and `namespace`, and that has a reference to the `users` file you created previously in the `secrets` subfolder.
-
- - The `disableNameSuffixHash` option is required to be `true`, otherwise Kustomize will add a hash suffix to the secret's name and your `Middleware` won't be able to find it in the cluster.
- - This is because the `Middleware` declares the `Secret`'s name in a non-Kubernetes-standard parameter which Kustomize doesn't recognize. Thus, Kustomize can't replace the name with its hashed version in the `spec.basicAuth.secret` parameter.
- - This is a situation equivalent to what happened in the deployment of MetalLB, back in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#setting-up-the-configuration-files), where there was a `configMapGenerator` in which this option has also to be enabled to avoid a similar issue with a `ConfigMap` resource.
- > **BEWARE!**
- > Since the `secretGenerator` and `configMapGenerator` feature are Kustomize generators, both have [exactly the same generator options available](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/generatoroptions/), such as this `disableNameSuffixHash` feature.
-
- To see how the secret object would look in this case, just check it out with `kubectl kustomize`.
-
- ~~~bash
- $ kubectl kustomize $HOME/k8sprjs/traefik_dashboard-ingressroute | less
- ~~~
-
- Look for the `Secret` object in the resulting paginated yaml, it should look like below.
-
- ~~~yaml
- apiVersion: v1
- data:
- users: |
- dGZrdXNlcjokMnkkMTckMG1kUDRXTGRiajhCV2oxbElKTURiLmJYeVlLNzVxUjVBZlJOenV1blp1
- Q2FtdkFscURsby4K
- kind: Secret
- metadata:
- name: traefik-auth-secret
- namespace: kube-system
- type: Opaque
- ~~~
-
- Notice the following in this `Secret`.
-
- - In the `data.users` section there's a odd looking string. This is the content of the `secrets/user` file referenced in the `secretGenerator` block, automatically encoded by Kustomize in base64. You can check that its the same string on the file by decoding it with `base64 -d` as follows.
-
- ~~~bash
- $ echo dGZrdXNlcjokMnkkMTckMG1kUDRXTGRiajhCV2oxbElKTURiLmJYeVlLNzVxUjVBZlJOenV1blp1Q2FtdkFscURsby4K | base64 -d
- tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
- ~~~
-
- Notice that I've put the base64 string in one line, while it's splitted in two in the `Secret` object.
-
- - The `metadata.name` and `metadata.namespace` are exactly as specified in the `kustomization.yaml` file.
-
- - The `type` `Opaque` means that the content under `data` is base64-encoded.
-
-9. At last, apply this Kustomization project.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8sprjs/traefik_dashboard-ingressroute
- ~~~
-
-## Getting into the dashboard
-
-Let's suppose that you don't have the subdomains you've defined as `Host` enabled in your network. This means that you'll have to access the dashboard using traefik service's external IP. In this case, the external IP happens to be `192.168.1.41` so you should open a browser and go to `https://192.168.1.41/dashboard/`.
-
-1. The first thing you'll probably see is a warning by your browser telling you that the connection is not secure because the certificate isn't either. But, if you check the information of the certificate, you'll see that it's **not** the one you generated back in the [**G029** guide](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#setting-up-a-wildcard-certificate-for-a-domain), but another self-generated by Traefik itself ("verified" by `CN=TRAEFIK DEFAULT CERT`). Remember that the certificate in that guide was configured for a wildcard main domain, not for any concrete IP.
-
-2. After the security warning, a generic sign-in window will pop up in your browser.
-
- 
-
-3. Type your user and password, sign in and you'll reach the Traefik dashboard's main page.
-
- 
-
-Finally, when you have the subdomain or subdomains for your traefik's external IP ready in your network, or in the `hosts` file of your client systems, try to access the dashboard using them. Then check the certificate information and you'll see that it corresponds to the one that you created.
-
-## Traefik dashboard has bad performance
-
-If your Traefik dashboard seems to load extremely slowly, or just returning a blank page, it could be that you set the `-C` value in the `htpasswd` command too high. For some reason this affects the Traefik dashboard's performance, hitting badly the node were the traefik service is being executed. So, if this is happening to you, try the following.
-
-1. Generate a new user string with `htpasswd` as you saw previously, but with a lower `-C` value than the one you used in the first place. Then replace the string you already have in the `secrets/users` file with the new one.
-
-2. Delete and then reapply the Kustomize project again.
-
- ~~~bash
- $ kubectl delete -k $HOME/k8sprjs/traefik_dashboard-ingressroute
- $ kubectl apply -k $HOME/k8sprjs/traefik_dashboard-ingressroute
- ~~~
-
- The delete is to make sure that the `IngressRoute` resource is regenerated with the change applied.
-
-3. Try to access the Traefik Dashboard and see how it runs now.
-
-## Traefik dashboard's Kustomize project attached to this guide series
-
-You can find the Kustomize project for this Traefik dashboard deployment in the following attached folder.
-
-- `k8sprjs/traefik_dashboard-ingressroute`
-
-## Relevant system paths
-
-### _Folders in `kubectl` client system_
-
-- `$HOME/k8sprjs/traefik-ingressroute`
-- `$HOME/k8sprjs/traefik-ingressroute/resources`
-- `$HOME/k8sprjs/traefik-ingressroute/secrets`
-
-### _Files in `kubectl` client system_
-
-- `$HOME/k8sprjs/traefik-ingressroute/kustomization.yaml`
-- `$HOME/k8sprjs/traefik-ingressroute/resources/traefik-auth.middleware.basicauth.yaml`
-- `$HOME/k8sprjs/traefik-ingressroute/resources/traefik-dashboard.ingressroute.yaml`
-- `$HOME/k8sprjs/traefik-ingressroute/secrets/users`
-
-## References
-
-### _Traefik dashboard_
-
-- [Traefik Dashboard](https://doc.traefik.io/traefik/operations/dashboard/)
-- [Traefik Middleware Basic Auth](https://doc.traefik.io/traefik/middlewares/basicauth/)
-- [Install Traefik Ingress Controller in Kubernetes](https://blog.zachinachshon.com/traefik-ingress/)
-- [Install the Traefik Ingress Controller on k0s](https://docs.k0sproject.io/v1.21.2+k0s.0/examples/traefik-ingress/)
-- [Deploy Traefik on Kubernetes with Wildcard TLS Certs](https://ikarus.sg/deploy-traefik-v2/)
-
-### _Traefik IngressRoute Vs Ingress_
-
-- [What is the difference between a Kubernetes Ingress and a IngressRoute?](https://stackoverflow.com/questions/60177488/what-is-the-difference-between-a-kubernetes-ingress-and-a-ingressroute)
-- [The Kubernetes Ingress Controller, The Custom Resource Way.](https://doc.traefik.io/traefik/providers/kubernetes-crd/)
-- [The Kubernetes Ingress Controller.](https://doc.traefik.io/traefik/providers/kubernetes-ingress/)
-- [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/)
-- [Steps to expose services using Kubernetes Ingress](https://www.golinuxcloud.com/steps-to-expose-services-using-kubernetes-ingress/)
-- [Directing Kubernetes traffic with Traefik](https://opensource.com/article/20/3/kubernetes-traefik)
-- [Ingress with Traefik on K3s](https://itnext.io/ingress-with-treafik-on-k3s-53db6e751ed3)
-- [K3s issue. Documentation on ingress](https://github.com/k3s-io/k3s/issues/436)
-
-### _Traefik with cert-manager_
-
-- [How to configure Traefik on Kubernetes with Cert-manager?](https://www.padok.fr/en/blog/traefik-kubernetes-certmanager)
-- [Use Traefik and cert-manager to serve a secured website](https://community.hetzner.com/tutorials/howto-k8s-traefik-certmanager)
-- [How to use TLS in k8s IngressRoute](https://community.traefik.io/t/how-to-use-tls-in-k8s-ingressroute/7529)
-- [IngressRoute with “secretName” field still serves with default certificate](https://community.traefik.io/t/ingressroute-with-secretname-field-still-serves-with-default-certificate/991)
-- [Securing Ingress Resources with cert-manager](https://cert-manager.io/docs/usage/ingress/)
-
-### _Kustomize_
-
-- [configMapGenerator](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/configmapgenerator/)
-- [secretGenerator](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/secretgenerator/)
-- [generatorOptions](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/generatoroptions/)
-
-## Navigation
-
-[<< Previous (**G030. K3s cluster setup 13**)](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G032. Deploying services 01**) >>](G032%20-%20Deploying%20services%2001%20~%20Considerations.md)
diff --git a/G032 - Deploying services 01 ~ Considerations.md b/G032 - Deploying services 01 ~ Considerations.md
index 83e0134..ed4b548 100644
--- a/G032 - Deploying services 01 ~ Considerations.md
+++ b/G032 - Deploying services 01 ~ Considerations.md
@@ -1,21 +1,36 @@
# G032 - Deploying services 01 ~ Considerations
-In the upcoming guides I'll show you how to deploy a number of services in your K3s cluster. These services are the ones listed in the [**G018** guide](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#requirements-for-the-k3s-cluster-and-the-services-to-deploy-in-it), together with their requirements. Of course, you might want to deploy other apps, so consider the ones deployed in the following guides as examples of how you could do it in other deployments. But before you jump into the next guides, ponder the following points.
+- [Upcoming chapters are about deploying services in your K3s cluster](#upcoming-chapters-are-about-deploying-services-in-your-k3s-cluster)
+- [Be watchful of your system's resources usage](#be-watchful-of-your-systems-resources-usage)
+- [Do not fill your cluster up to the brim](#do-not-fill-your-cluster-up-to-the-brim)
+- [Navigation](#navigation)
+
+## Upcoming chapters are about deploying services in your K3s cluster
+
+The next chapters of this guide will show you how to deploy in your K3s cluster the services listed in the [chapter **G018**](G018%20-%20K3s%20cluster%20setup%2001%20~%20Requirements%20and%20arrangement.md#requirements-for-the-k3s-cluster-and-the-services-to-deploy-in-it). Of course, you might want to deploy other apps, so consider the ones deployed in the following chapters as examples of how you could do it for other deployments.
## Be watchful of your system's resources usage
-Your K3s Kubernetes cluster is not running "empty", it already has a fair number of services running which already eat up a good chunk of your hardware's resources. Be always aware of the current resources usage in your setup before you deploy any new app or service in your cluster. Remember that you can see the usages in three ways at least.
+Your K3s Kubernetes cluster is not running "empty" at this point, it already has a fair number of services running which already eat up a good chunk of your hardware's resources. Be always aware of the current resources usage in your setup before you deploy any new app or service in your cluster.
+
+Remember that you can get the resource usages from your setup in these ways:
+
+- **The Proxmox VE's web console has a `Summary` view on every level**\
+ Datacenter, node and VMs have all a `Summary` page in which you can see the current resource usages.
-- The Proxmox VE's web console has a `Summary` view on every level. Datacenter, node and VMs have all a `Summary` page in which you can see the current resource usages.
+- **From the OS point of view, using shell commands like `htop`, `free` or `df`**\
+ It is important that you also see the usage values from within your Proxmox VE host and VMs, because they are more fine grained and can indicate you better how resources are being used.
-- From the OS point of view, using shell commands like `htop`, `free` or `df`. It's important that you also see the usage values from within your Proxmox VE host and VMs, because they are more fine grained and can indicate you better how resources are being used.
+- **The `kubectl top` command**\
+ This will give you the view on resources usage from within your K3s cluster.
-- From within the K3s cluster, with the `kubectl top` command.
+- **Headlamp**\
+ The Headlamp dashboard you deployed in the [previous chapter **G031**](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md) offers a resources usage overview in its main `Clusters` page.
-## Don't fill your setup up to the brim
+## Do not fill your cluster up to the brim
-Just because you still have free RAM or a not so high CPU usage, it doesn't mean that you can keep on deploying more services in your setup. You must leave some room for possible usage spikes, and for the underlying platforms running everything (Proxmox VE and K3s) which also need resources to run. This way you can also run sporadic tasks like backup jobs or updates when required.
+Just because you still have free RAM or a not so high CPU usage, it does not mean that you can keep on deploying more services in your setup. You must leave some room for possible usage spikes, and for the underlying platforms running everything (Proxmox VE and K3s) which also need resources to run. This way you can also run sporadic tasks like backup jobs or updates when required.
## Navigation
-[<< Previous (**G031. K3s cluster setup 14**)](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G033. Deploying services 02. Nextcloud Part 1**) >>](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md)
+[<< Previous (**G031. K3s cluster setup 14**)](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G033. Deploying services 02. Nextcloud Part 1**) >>](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%201%20-%20Outlining%20setup%2C%20arranging%20storage%20and%20choosing%20service%20IPs.md)
diff --git a/G035 - Deploying services 04 ~ Monitoring stack - Part 4 - Prometheus server.md b/G035 - Deploying services 04 ~ Monitoring stack - Part 4 - Prometheus server.md
index 011478a..89302eb 100644
--- a/G035 - Deploying services 04 ~ Monitoring stack - Part 4 - Prometheus server.md
+++ b/G035 - Deploying services 04 ~ Monitoring stack - Part 4 - Prometheus server.md
@@ -426,7 +426,7 @@ To make the Prometheus server's service reachable only through HTTPS from outsid
scheme: http
~~~
- The yaml above is like the one describing the access to the Traefik Dashboard, back in the [**G031** guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#enabling-the-ingressroute).
+ The yaml above is like the one describing the access to the Traefik dashboard, back in the [**G030** guide](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#kustomize-project-for-enabling-access-to-the-traefik-dashboard).
- The `IngressRoute` points (`tls.secretName` parameter) to the secret of your wildcard certificate, encrypting the traffic coming and going to the `server-prometheus` service.
diff --git a/G035 - Deploying services 04 ~ Monitoring stack - Part 6 - Complete monitoring stack setup.md b/G035 - Deploying services 04 ~ Monitoring stack - Part 6 - Complete monitoring stack setup.md
index 9977ec5..deb9ee6 100644
--- a/G035 - Deploying services 04 ~ Monitoring stack - Part 6 - Complete monitoring stack setup.md
+++ b/G035 - Deploying services 04 ~ Monitoring stack - Part 6 - Complete monitoring stack setup.md
@@ -1341,7 +1341,7 @@ Since it's not the intention of this guide series to go as deep as explaining ho
### _On Prometheus_
-As you've seen, a basic installation of Prometheus doesn't have any kind of security. If you want to enforce login with a user, you can do as you already did when you configured the access to the Traefik web dashboard [in the **G031** guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#enabling-the-ingressroute): by enabling a basic auth login directly in the IngressRoute of your Prometheus server.
+As you've seen, a basic installation of Prometheus doesn't have any kind of security. If you want to enforce login with a user, you can do as you already did when you configured the access to the Traefik web dashboard [in the **G030** guide](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#kustomize-project-for-enabling-access-to-the-traefik-dashboard): by enabling a basic auth login directly in the IngressRoute of your Prometheus server.
This will protect a bit the external accesses to your Prometheus dashboard, while it won't affect the connections through the internal networking of your cluster. To enforce more advanced security methods, you'll have to check out [the official Prometheus documentation](https://prometheus.io/docs/prometheus/2.35/configuration/https/) and see what security options are available.
diff --git a/G036 - Host and K3s cluster ~ Monitoring and diagnosis.md b/G036 - Host and K3s cluster ~ Monitoring and diagnosis.md
index b58899f..3c61d3b 100644
--- a/G036 - Host and K3s cluster ~ Monitoring and diagnosis.md
+++ b/G036 - Host and K3s cluster ~ Monitoring and diagnosis.md
@@ -23,7 +23,7 @@ I'll remind you here all the tools you have to monitor the usage of resources in
Notice that `top pod` requires specyfing the namespace, while `top node` doesn't (cluster nodes are not namespaced).
- - Use the Kubernetes Dashboard, which you deployed and browsed into in the [**G030** guide](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md).
+ - Use the Headlamp dashboard you deployed and browsed into [back in the **G031** guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md).
## Checking the logs
diff --git a/G045 - System update 04 ~ Updating K3s and deployed apps.md b/G045 - System update 04 ~ Updating K3s and deployed apps.md
index a9b56b7..447aa79 100644
--- a/G045 - System update 04 ~ Updating K3s and deployed apps.md
+++ b/G045 - System update 04 ~ Updating K3s and deployed apps.md
@@ -216,8 +216,8 @@ You know what steps to follow when updating any app, but in what order do you ha
2. **Reflector**: deployment procedure found in [concrete section of **G029** guide](G029%20-%20K3s%20cluster%20setup%2012%20~%20Setting%20up%20cert-manager%20and%20wildcard%20certificate.md#deploying-reflector).
-3. **K8sdashboard**: deployment procedure found in [**G030** guide](G030%20-%20K3s%20cluster%20setup%2013%20~%20Deploying%20the%20Kubernetes%20Dashboard.md).
- - For this one, you created two standard Kubernetes resources to enable an administrator user. Usually, you won't need to change them but nevertheless be on the lookout for changes in k8sdashboard that may affect how this app uses them.
+3. **Headlamp**: deployment procedure found in [**G031** guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Deploying%20the%20Headlamp%20dashboard.md).
+ - For this one, you created two standard Kubernetes resources to enable an administrator user. Usually, you won't need to change them but nevertheless be on the lookout for changes in Headlamp that may affect how this app uses them.
4. **MetalLB**: deployment procedure found in [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md).
> **BEWARE!**
diff --git a/G902 - Appendix 02 ~ Vim vimrc configuration.md b/G902 - Appendix 02 ~ Vim vimrc configuration.md
index 2da41f2..9534ace 100644
--- a/G902 - Appendix 02 ~ Vim vimrc configuration.md
+++ b/G902 - Appendix 02 ~ Vim vimrc configuration.md
@@ -15,6 +15,8 @@ set expandtab
" Show line numbers
set nu
+" Disable autocomment of next line (bothersome when pasting texts with comments)
+autocmd FileType * set formatoptions-=cro
~~~
Just create a `.vimrc` file, with the lines above, in any user's $HOME folder.
diff --git a/G906 - Appendix 06 ~ Handling VM or VM template volumes.md b/G906 - Appendix 06 ~ Handling VM or VM template volumes.md
index 54d6611..f6dc8db 100644
--- a/G906 - Appendix 06 ~ Handling VM or VM template volumes.md
+++ b/G906 - Appendix 06 ~ Handling VM or VM template volumes.md
@@ -28,7 +28,7 @@ So, where in your system is the VM disk image of your VM template?
1. In the Proxmox VE web console, go to your VM template's `Hardware` view, and read the `Hard Disk` line.
- 
+ 
Remember the `ssd_disks:base-100-disk-0` string: it's the name of the hard disk volume within your Proxmox VE node.
diff --git a/G908 - Appendix 08 ~ K3s cluster with two or more server nodes.md b/G908 - Appendix 08 ~ K3s cluster with two or more server nodes.md
index 4b34490..ad4d835 100644
--- a/G908 - Appendix 08 ~ K3s cluster with two or more server nodes.md
+++ b/G908 - Appendix 08 ~ K3s cluster with two or more server nodes.md
@@ -1,6 +1,6 @@
# G908 - Appendix 08 ~ K3s cluster with two or more server nodes
-In the [**G025** guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md) you've seen how to create a K3s Kubernetes cluster with just one server node. This works fine and suits the constrained scenario set in this guide series. But if you want a more complete Kubernetes experience, you'll need to know how to set up two or more server nodes in your cluster.
+In the [**G025** guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md) you have seen how to create a K3s Kubernetes cluster with just one server node. This works fine and suits the constrained scenario set in this guide series. But if you want a more complete Kubernetes experience, you'll need to know how to set up two or more server nodes in your cluster.
In this supplementary guide I'll summarize you what to add or just do differently on the procedures explained in the [**G025** guide](G025%20-%20K3s%20cluster%20setup%2008%20~%20K3s%20Kubernetes%20cluster%20setup.md), with the goal of creating a K3s cluster with two server nodes.
diff --git a/G911 - Appendix 11 ~ Alternative Nextcloud web server setups.md b/G911 - Appendix 11 ~ Alternative Nextcloud web server setups.md
index fc12efe..14d5630 100644
--- a/G911 - Appendix 11 ~ Alternative Nextcloud web server setups.md
+++ b/G911 - Appendix 11 ~ Alternative Nextcloud web server setups.md
@@ -259,7 +259,7 @@ The Nextcloud server you configured in the guide is directly reachable through a
port: 443
~~~
- This yaml might look familiar to you, since it's very similar to the one you created for accessing the Traefik Dashboard, back in the [**G031** guide](G031%20-%20K3s%20cluster%20setup%2014%20~%20Enabling%20the%20Traefik%20dashboard.md#enabling-the-ingressroute).
+ This yaml might look familiar to you, since it's very similar to the one you created for accessing the Traefik dashboard, back in the [**G030** guide](G030%20-%20K3s%20cluster%20setup%2013%20~%20Enabling%20the%20Traefik%20dashboard.md#kustomize-project-for-enabling-access-to-the-traefik-dashboard).
- This `IngressRoute` references the secret of your wildcard certificate, encrypting the traffic with the referenced `server-apache-nextcloud` service.
@@ -1603,4 +1603,4 @@ You have to make the final changes at the `kustomization.yaml` file of this Next
## Navigation
-[<< Previous (**G910. Appendix 10**)](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G912. Appendix 12**) >>](G912%20-%20Appendix%2012%20~%20Adapting%20MetalLB%20config%20to%20CR.md)
+[<< Previous (**G910. Appendix 10**)](G910%20-%20Appendix%2010%20~%20Setting%20up%20virtual%20network%20with%20Open%20vSwitch.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G912. Appendix 12**) >>](G912%20-%20Appendix%2012%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md)
diff --git a/G912 - Appendix 12 ~ Adapting MetalLB config to CR.md b/G912 - Appendix 12 ~ Adapting MetalLB config to CR.md
deleted file mode 100644
index 5508e06..0000000
--- a/G912 - Appendix 12 ~ Adapting MetalLB config to CR.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# G912 - Appendix 12 ~ Adapting MetalLB config to CR
-
-The MetalLB software, from its `0.13.0` version onwards, has stopped supporting configmaps as a valid method of configuration. You can see the announcement in this [Backward Compatibility note on the official MetalLB page](https://metallb.universe.tf/#backward-compatibility). This means that the configmap-based configuration set in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md) is now completely invalid for MetalLB version `0.13.0` and beyond. In the note, they offer a convertion tool for transforming configmaps to CRs (Custom Resources), which are now the only supported way of configuring MetalLB. But this approach presents with a couple of problems for the setup used in this guide series.
-
-- The tool is executed with docker, but that's a tool not contemplated in this guide.
-- There are issues with the conversion tool, [as reported in this issue thread](https://github.com/metallb/metallb/issues/1473) or [this other one](https://github.com/metallb/metallb/issues/1495).
-
-Thankfully, someone else used an equivalent metallb setup and shared [in this article](https://tech.aufomm.com/convert-metallb-configinline-to-crs-for-layer-2-protocol/) the converted configuration. Based on this, I've prepared the following modification to the MetalLB kustomize project created in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md).
-
-1. Create a `resources` folder within your MetalLB kustomize project.
-
- ~~~bash
- $ mkdir $HOME/k8sprjs/metallb/resources
- ~~~
-
-2. In the `resources` folder, create the files `l2-ip.l2advertisement.yaml` and `default-pool.ipaddresspool.yaml`.
-
- ~~~bash
- $ touch $HOME/k8sprjs/metallb/resources/{l2-ip.l2advertisement.yaml,default-pool.ipaddresspool.yaml}
- ~~~
-
-3. In the file `l2-ip.l2advertisement.yaml` copy the following yaml.
-
- ~~~yaml
- apiVersion: metallb.io/v1beta1
- kind: L2Advertisement
-
- metadata:
- name: l2-ip
- spec:
- ipAddressPools:
- - default-pool
- ~~~
-
- This indicates to MetalLB the following.
- - The kind `L2Advertisement` sets the **protocol** used as L2. This is the equivalent of the `protocol` parameter in the `config` file you used for configuring MetalLB.
- - The `spec.ipAddressPool` parameter points to the pools of ips to be used, in this case just one named `default-pool`.
-
-4. Edit now the `default-pool.ipaddresspool.yaml` file, so it has the content below.
-
- ~~~yaml
- apiVersion: metallb.io/v1beta1
- kind: IPAddressPool
-
- metadata:
- name: default-pool
- spec:
- addresses:
- - 192.168.1.41-192.168.1.80
- ~~~
-
- Here you've configured a simple pool of ips.
- - The kind `IPAddressPool` indicates that this is just a MetalLB pool of IP addresses.
- - The name is the same `default-pool` one indicated in the `l2-ip.l2advertisement.yaml`.
- - The `spec.addresses` list is the equivalent to the `addresses` parameter you had in the `config` file used previously as a configmap.
-
-5. Modify the `kustomization.yaml` file of your MetalLB kustomize project so it looks like below.
-
- ~~~yaml
- # MetalLB setup
- apiVersion: kustomize.config.k8s.io/v1beta1
- kind: Kustomization
-
- namespace: metallb-system
-
- resources:
- - github.com/metallb/metallb/config/native?ref=v0.13.3
- - resources/l2-ip.l2advertisement.yaml
- - resources/default-pool.ipaddresspool.yaml
- ~~~
-
- In this `kustomization.yaml` file there's no longer a `configMapGenerator` section, as it was configured in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md#setting-up-the-configuration-files). Now there are only resources: the one for deploying the MetalLB service, and the other two you've just created for configuring the IP pool to use with MetalLB.
-
-6. Check the output of this kustomize project as usual.
-
- ~~~bash
- $ kubectl kustomize $HOME/k8sprjs/metallb/ | less
- ~~~
-
- The resources you manually created will be inside the yaml, just look for them by their metadata `name`. You'll may notice that MetalLB is "wired" to look for `L2Advertisement` resources automatically, which means that you don't have to explicitly tell MetalLB which one to use.
-
-7. Apply the kustomize project to your cluster.
-
- ~~~bash
- $ kubectl apply -k $HOME/k8sprjs/metallb
- ~~~
-
-8. Give MetalLB a couple of minutes or so to get ready, then check with `kubectl` that it's been deployed in your cluster.
-
- ~~~bash
- $ kubectl get -n metallb-system all -o wide
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
- pod/controller-55cd7dbf96-vrx29 1/1 Running 0 20m 10.42.1.214 k3sagent01
- pod/speaker-lllm5 1/1 Running 0 20m 10.0.0.11 k3sagent01
- pod/speaker-2nxf7 1/1 Running 0 19m 10.0.0.12 k3sagent02
-
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
- service/webhook-service ClusterIP 10.43.137.211 443/TCP 20m component=controller
-
- NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
- daemonset.apps/speaker 2 2 2 2 2 kubernetes.io/os=linux 232d speaker quay.io/metallb/speaker:v0.13.3 app=metallb,component=speaker
-
- NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
- deployment.apps/controller 1/1 1 1 232d controller quay.io/metallb/controller:v0.13.3 app=metallb,component=controller
-
- NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
- replicaset.apps/controller-55cd7dbf96 1 1 1 20m controller quay.io/metallb/controller:v0.13.3 app=metallb,component=controller,pod-template-hash=55cd7dbf96
- replicaset.apps/controller-7dcc8764f4 0 0 0 232d controller quay.io/metallb/controller:v0.11.0 app=metallb,component=controller,pod-template-hash=7dcc8764f4
- ~~~
-
- Notice that, at the end of the output above, there's an older `0.11.0` MetalLB controller still listed but not really running since its not ready (it has `0` containers in `READY` state).
-
-9. If you had created a config map as I explained in the [**G027** guide](G027%20-%20K3s%20cluster%20setup%2010%20~%20Deploying%20the%20MetalLB%20load%20balancer.md)), you'll have to remove it manually from your cluster with `kubectl`.
-
- - First, confirm that it exists within the `metallb-system` namespace.
-
- ~~~bash
- $ kubectl get -n metallb-system cm
- NAME DATA AGE
- kube-root-ca.crt 1 232d
- config 1 232d
- ~~~
-
- It's the resource named `config`. Remember that you can see its contents with `kubectl` too.
-
- ~~~bash
- $ kubectl get -n metallb-system cm config -o yaml
- apiVersion: v1
- data:
- config: |
- address-pools:
- - name: default
- protocol: layer2
- addresses:
- - 192.168.1.41-192.168.1.80
- kind: ConfigMap
- metadata:
- annotations:
- kubectl.kubernetes.io/last-applied-configuration: |
- {"apiVersion":"v1","data":{"config":"address-pools:\n- name: default\n protocol: layer2\n addresses:\n - 192.168.1.41-192.168.1.80\n"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"config","namespace":"metallb-system"}}
- creationTimestamp: "2021-11-28T13:24:03Z"
- name: config
- namespace: metallb-system
- resourceVersion: "2334"
- uid: 07eeb0be-ec96-440c-ad46-25da43f6b04c
- ~~~
-
- - Remove it from your cluster.
-
- ~~~bash
- $ kubectl delete -n metallb-system cm config
- configmap "config" deleted
- ~~~
-
- Confirm that's gone.
-
- ~~~bash
- $ kubectl get -n metallb-system cm
- NAME DATA AGE
- kube-root-ca.crt 1 232d
- ~~~
-
-## References
-
-### _MetalLB configmap conversion to CRs_
-
-- [Backward Compatibility note](https://metallb.universe.tf/#backward-compatibility)
-- [Convert Metallb configInline to CRs for Layer 2 Protocol](https://tech.aufomm.com/convert-metallb-configinline-to-crs-for-layer-2-protocol/)
-- [Heads up: breaking changes in 0.13.2](https://github.com/metallb/metallb/issues/1473)
-- [CR Converstion tool, failed to generate resources: invalid aggregation length 24](https://github.com/metallb/metallb/issues/1495)
-- [Problem with Kustomize installation resource for Metallb 0.13.z native bgp implementation](https://github.com/metallb/metallb/issues/1524)
-
-## Navigation
-
-[<< Previous (**G911. Appendix 11**)](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G913. Appendix 13**) >>](G913%20-%20Appendix%2013%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md)
diff --git a/G913 - Appendix 13 ~ Checking the K8s API endpoints status.md b/G912 - Appendix 12 ~ Checking the K8s API endpoints status.md
similarity index 91%
rename from G913 - Appendix 13 ~ Checking the K8s API endpoints status.md
rename to G912 - Appendix 12 ~ Checking the K8s API endpoints status.md
index a0aae63..ddd135a 100644
--- a/G913 - Appendix 13 ~ Checking the K8s API endpoints status.md
+++ b/G912 - Appendix 12 ~ Checking the K8s API endpoints status.md
@@ -1,4 +1,4 @@
-# G913 - Appendix 13 ~ Checking the K8s API endpoints' status
+# G912 - Appendix 12 ~ Checking the K8s API endpoints' status
If you want or need to know the status of your Kubernetes cluster's API endpoints, you can do it with the `kubectl` command. The trick is about invoking directly certain URLs active in your cluster with the `get` action and the `--raw` flag.
@@ -129,4 +129,4 @@ Notice the deprecation notice in the commands output, and also that is not reall
## Navigation
-[<< Previous (**G912. Appendix 12**)](G912%20-%20Appendix%2012%20~%20Adapting%20MetalLB%20config%20to%20CR.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G914. Appendix 14**) >>](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md)
+[<< Previous (**G911. Appendix 11**)](G911%20-%20Appendix%2011%20~%20Alternative%20Nextcloud%20web%20server%20setups.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G913. Appendix 13**) >>](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md)
diff --git a/G914 - Appendix 14 ~ Post-update manual maintenance tasks for Nextcloud.md b/G913 - Appendix 13 ~ Post-update manual maintenance tasks for Nextcloud.md
similarity index 95%
rename from G914 - Appendix 14 ~ Post-update manual maintenance tasks for Nextcloud.md
rename to G913 - Appendix 13 ~ Post-update manual maintenance tasks for Nextcloud.md
index 46f498d..09a52b6 100644
--- a/G914 - Appendix 14 ~ Post-update manual maintenance tasks for Nextcloud.md
+++ b/G913 - Appendix 13 ~ Post-update manual maintenance tasks for Nextcloud.md
@@ -1,4 +1,4 @@
-# G914 - Appendix 14 ~ Post-update manual maintenance tasks for Nextcloud
+# G913 - Appendix 13 ~ Post-update manual maintenance tasks for Nextcloud
When you update your Nextcloud instance to a new minor or major version, the update can also come with changes that affect Nextcloud's database structure. Nextcloud will warn you of this in the `Administration settings > Overview` page.
@@ -132,7 +132,7 @@ Next, I'll show you the procedure to execute the `occ` command in the container
### _Nextcloud_
- [Basil's Tech Diary ~ Nextcloud: Using occ in a FreeNAS jail](https://blog.udance.com.au/2021/02/25/nextcloud-using-occ-in-a-freenas-jail/)
-- [How2itsec ~ Nextcloud repairing missing indexes in database ](https://how2itsec.blogspot.com/2021/12/nextcloud-repairing-missing-indexes-in.html)
+- [How2itsec ~ Nextcloud repairing missing indexes in database](https://how2itsec.blogspot.com/2021/12/nextcloud-repairing-missing-indexes-in.html)
- [Some indices are missing in the database! How to add them manually](https://help.nextcloud.com/t/some-indices-are-missing-in-the-database-how-to-add-them-manually/37852)
- [Help with occ db:add-missing-indices](https://help.nextcloud.com/t/help-with-occ-db-add-missing-indices/90696)
@@ -144,4 +144,4 @@ Next, I'll show you the procedure to execute the `occ` command in the container
## Navigation
-[<< Previous (**G913. Appendix 13**)](G913%20-%20Appendix%2013%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G915. Appendix 15**) >>](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md)
+[<< Previous (**G912. Appendix 12**)](G912%20-%20Appendix%2012%20~%20Checking%20the%20K8s%20API%20endpoints%20status.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G914. Appendix 14**) >>](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md)
diff --git a/G915 - Appendix 15 ~ Updating MariaDB to a newer major version.md b/G914 - Appendix 14 ~ Updating MariaDB to a newer major version.md
similarity index 95%
rename from G915 - Appendix 15 ~ Updating MariaDB to a newer major version.md
rename to G914 - Appendix 14 ~ Updating MariaDB to a newer major version.md
index 6f87b5b..37cdc5f 100644
--- a/G915 - Appendix 15 ~ Updating MariaDB to a newer major version.md
+++ b/G914 - Appendix 14 ~ Updating MariaDB to a newer major version.md
@@ -1,4 +1,4 @@
-# G915 - Appendix 15 ~ Updating MariaDB to a newer major version
+# G914 - Appendix 14 ~ Updating MariaDB to a newer major version
MariaDB has been designed to be easily upgraded, something helpful specially when it has been containerized. The standard procedure is explained in [this official documentation page](https://mariadb.com/kb/en/upgrading-between-major-mariadb-versions/), but you won't need to do any of it since there's a much easier way available for containerized MariaDB instances such as the one you deployed in your [Nextcloud setup](G033%20-%20Deploying%20services%2002%20~%20Nextcloud%20-%20Part%203%20-%20MariaDB%20database%20server.md).
@@ -65,4 +65,4 @@ Rather than executing the update (or upgrade) process yourself, you'll just enab
## Navigation
-[<< Previous (**G914. Appendix 14**)](G914%20-%20Appendix%2014%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G916. Appendix 16**) >>](G916%20-%20Appendix%2016%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md)
+[<< Previous (**G913. Appendix 13**)](G913%20-%20Appendix%2013%20~%20Post-update%20manual%20maintenance%20tasks%20for%20Nextcloud.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md) | [Next (**G915. Appendix 15**) >>](G915%20-%20Appendix%2015%20~%20Updating%20PostgreSQL%20to%20a%20newer%20major%20version.md)
diff --git a/G916 - Appendix 16 ~ Updating PostgreSQL to a newer major version.md b/G915 - Appendix 15 ~ Updating PostgreSQL to a newer major version.md
similarity index 97%
rename from G916 - Appendix 16 ~ Updating PostgreSQL to a newer major version.md
rename to G915 - Appendix 15 ~ Updating PostgreSQL to a newer major version.md
index ef8d7b3..0a110d5 100644
--- a/G916 - Appendix 16 ~ Updating PostgreSQL to a newer major version.md
+++ b/G915 - Appendix 15 ~ Updating PostgreSQL to a newer major version.md
@@ -1,4 +1,4 @@
-# G916 - Appendix 16 ~ Updating PostgreSQL to a newer major version
+# G915 - Appendix 15 ~ Updating PostgreSQL to a newer major version
PostgreSQL runs well as a containerized instance and can be upgraded easily to new minor or debug versions. However, updating it to a new _MAJOR_ version is not a straightforward affair. Still, there's a way that simplifies this process a bit, and I'll show it to you in this guide.
@@ -1031,4 +1031,4 @@ You can find the Kustomize project meant **only for updating PostgreSQL database
## Navigation
-[<< Previous (**G915. Appendix 15**)](G915%20-%20Appendix%2015%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md)
\ No newline at end of file
+[<< Previous (**G914. Appendix 14**)](G914%20-%20Appendix%2014%20~%20Updating%20MariaDB%20to%20a%20newer%20major%20version.md) | [+Table Of Contents+](G000%20-%20Table%20Of%20Contents.md)
diff --git a/README.md b/README.md
index 6e5a826..2d567fb 100644
--- a/README.md
+++ b/README.md
@@ -1,45 +1,62 @@
# Small homelab K8s cluster on Proxmox VE
-Would you like to practice with K8s/Kubernetes? Do you happen to have one spare computer at hand? Then this guide series may be right for you! In it I explain how to configure a server able to run a small Kubernetes cluster, set up with just a few virtual machines.
+- [A complete guide for building a virtualized Kubernetes homelab](#a-complete-guide-for-building-a-virtualized-kubernetes-homelab)
+- [Main concepts](#main-concepts)
+- [Intended audience](#intended-audience)
+- [Goal of this guide](#goal-of-this-guide)
+- [Software used](#software-used)
+- [Table of contents](#table-of-contents)
+- [References](#references)
+ - [Software](#software)
+- [Navigation](#navigation)
-The title says "small homelab", meaning that I've written the guides having in mind the sole low-end consumer computer I had (and still have) available for it. Don't get me wrong, by the way. The hardware contemplated is limited but capable for the proposed task. You'll see what I mean in the very first [**G001** guide](G001%20-%20Hardware%20setup.md), in which I explain my hardware setup in detail.
+## A complete guide for building a virtualized Kubernetes homelab
-You might be wondering, aren't already out there guides explaining how to build such a server? Well, not exactly. Most of the guides I've seen expect you to have a number of computers (Raspberry PIs usually) available to use as nodes of your K3s-based Kubernetes cluster. What I had was one basic computer, nothing more, but I could surmount my lack of extra computers with virtual machines.
+Would you like to practice with Kubernetes? Do you happen to have one spare computer at hand? Then this guide may be right for you! It explains how to configure a virtualization server able to run a small Kubernetes cluster with a few virtual machines.
-On the other hand, most of those guides you'll find on internet use alternative tools (**k3sup** and **helm** come to mind) to handle the installation and configuration of those nodes. I wanted to go down the hard path first, building a Kubernetes cluster from scratch as close to the standard `kubectl` way as possible, so using those tools was out of the question. Still, some of those guides served me as reference in some cases, and you'll find some of them linked as references at the bottom of some of my guides.
+The title says "_small homelab_", meaning that I've written the guide having in mind the sole low-end consumer-grade computer I have available for it. Don't get me wrong, by the way. The hardware contemplated is limited but affordable and capable for the proposed task. You'll see what I mean in the very first [**G001** chapter](G001%20-%20Hardware%20setup.md), in which I explain my hardware setup in detail.
-Beyond those two previous considerations, there's also the fact that the information of the things I wanted, or needed, to do in my homelab is quite scattered on the internet. I knew that it would be very convenient for me to put in one place all the things I've done and the references I've followed. I also realized that, since my build is rather generic, I could go the extra mile and format the guides so they could be useful for anyone with a spare computer like mine (or a better one even).
+You might be wondering, aren't already out there guides explaining how to build such a server? Well, not exactly. Back when I researched this matter, most of the guides I found expected from you to have a number of computers (Raspberry PIs usually) available to use as nodes of your K3s-based Kubernetes cluster. What I have is one basic computer, nothing more, but I can surmount my lack of extra computers with virtual machines.
-So, in this guide series I offer you, **in one place**, a collection of procedures to run a small Kubernetes cluster with virtual machines in a single computer.
+On the other hand, most of the guides you'll find on internet use alternative tools (**k3sup**, **terraform**, or **helm** come to mind) to handle the installation and configuration of those nodes. I wanted to go down the hard path first, building a Kubernetes cluster from scratch as close to the "`kubectl` way" as possible. Hence, using those tools was out of the question. Still, some of those guides served me as reference in some cases, and you will find them linked here.
-## Description of contents
+Beyond those two previous considerations, there is also the fact that the information of the things I wanted, or needed, to do in my homelab is quite scattered on the internet. I knew that it would be very convenient for me to put in one place all the things I've done and the references I've followed. I also realized that, since my build is rather generic, I could go the extra mile and format the guide so it could be useful for anyone with a spare computer like mine (or a better one even).
-The procedures explained in this guide series deal mainly with three concepts:
+In short, this guide offers you, **in one place**, a complete set of procedures to build and run a small Kubernetes cluster with virtual machines in a single consumer-grade computer.
-- How to install and configure a virtualization platform.
+## Main concepts
+
+The procedures explained in this guide deal mainly with three concepts:
+
+- How to install and configure the Proxmox VE virtualization platform.
- How to setup a small Kubernetes cluster with VMs.
- How to deploy applications on the Kubernetes cluster.
-Within those main concepts, I've also covered (up to a point) things like hardening, firewalling, optimizations, backups and a few other things that came up while I was working on my server's setup.
+Within those main concepts, I've also covered (up to a point) things like hardening, firewalling, optimizations, backups and a few other things that came up while I was working on my homelab's setup.
-Each guide in the series is detailed and explanatory, only omitting things when they've been done in a previous step or guide, or is understood that the reader should know about them already.
+Each chapter in the guide is detailed and explanatory, only omitting things when they've been done in a previous step or guide, or is understood that the reader should know about them already. Also, each chapter is usually about one main concept or procedure, and the guide's setup serves as the example scenario illustrating how to implement it.
-Also, since the whole series is about building a concrete setup, the guides are related to each other, so they're more like chapters than independent guides. Still, each guide is usually about one main concept or procedure, and the setup serves as an example of how to implement it.
+> [!NOTE]
+> **This guide has been written in Markdown GitHub format**\
+> All the chapters of this guide are Markdown documents you will visualize better rendered as HTMLs either in GitHub directly or in compatible Markdown viewers or editors.
## Intended audience
-In general, anyone with some background in Linux and virtual machines that also has an interest in Kubernetes. And, more broadly, anyone with the need or the curiosity to run Kubernetes on a single capable-enough consumer-grade computer.
+Anyone with some background in Linux and virtual machines having an interest in Kubernetes. Also, those with the need or the curiosity to run Kubernetes on a single capable-enough consumer-grade computer.
+
+## Goal of this guide
+
+The main goal, for the build explained in this guide, is to turn a rather low-end consumer-grade computer into a small Kubernetes homelab.
-## Goals
+## Software used
-The main goal, for the build explained in this guide series, is to turn a rather low-end consumer computer into a small Kubernetes homelab.
+The core software used in this guide to build the homelab is:
-The core platforms I use in this guide series to build the homelab are:
+- [Proxmox Virtual Environment](https://www.proxmox.com/en/) in a standalone node as the virtualization platform of choice.
-- Virtualization platform: [Proxmox Virtual Environment](https://www.proxmox.com/en/) in a standalone node.
- [Rancher K3s](https://k3s.io/) [Kubernetes](https://kubernetes.io/) distribution for building the small Kubernetes cluster with KVM virtual machines run by the Proxmox VE standalone node.
-After setting up the Kubernetes cluster, the idea is to deploy in it the following.
+After setting up the Kubernetes cluster, the idea is to deploy in it the following software:
- File cloud: [Nextcloud](https://nextcloud.com/).
- Lightweight git server: [Gitea](https://gitea.io/).
@@ -49,7 +66,19 @@ Also, the whole system will have some backup procedures applied to it.
## Table of contents
-All the guides and their main sections are easily accessible through the [Table Of Contents](G000%20-%20Table%20Of%20Contents.md) of this guide series.
+All the chapters and their main sections are easily accessible through the [Table Of Contents](G000%20-%20Table%20Of%20Contents.md) of this guide.
+
+## References
+
+### Software
+
+- [Proxmox Virtual Environment](https://www.proxmox.com/en/)
+- [Rancher K3s](https://k3s.io/)
+- [Kubernetes](https://kubernetes.io/)
+- [Nextcloud](https://nextcloud.com/)
+- [Gitea](https://gitea.io/)
+- [Prometheus](https://prometheus.io/)
+- [Grafana](https://grafana.com/grafana/)
## Navigation
diff --git a/Small homelab K8s cluster on Proxmox VE.code-workspace b/Small homelab K8s cluster on Proxmox VE.code-workspace
index 517e0b2..568c440 100644
--- a/Small homelab K8s cluster on Proxmox VE.code-workspace
+++ b/Small homelab K8s cluster on Proxmox VE.code-workspace
@@ -4,5 +4,9 @@
"path": "."
}
],
- "settings": {}
+ "settings": {
+ "cSpell.words": [
+ "hugepages"
+ ]
+ }
}
\ No newline at end of file
diff --git a/images/g002/Installer-01_initial_screen.png b/images/g002/Installer-01_initial_screen.png
deleted file mode 100644
index 3078b21..0000000
Binary files a/images/g002/Installer-01_initial_screen.png and /dev/null differ
diff --git a/images/g002/Installer-02_virtualization_support_warning.png b/images/g002/Installer-02_virtualization_support_warning.png
deleted file mode 100644
index 7e1f2a4..0000000
Binary files a/images/g002/Installer-02_virtualization_support_warning.png and /dev/null differ
diff --git a/images/g002/Installer-03_EULA_screen.png b/images/g002/Installer-03_EULA_screen.png
deleted file mode 100644
index bbae63b..0000000
Binary files a/images/g002/Installer-03_EULA_screen.png and /dev/null differ
diff --git a/images/g002/Installer-04_target_harddisk.png b/images/g002/Installer-04_target_harddisk.png
deleted file mode 100644
index 703d3f2..0000000
Binary files a/images/g002/Installer-04_target_harddisk.png and /dev/null differ
diff --git a/images/g002/Installer-04_target_harddisk_list.png b/images/g002/Installer-04_target_harddisk_list.png
deleted file mode 100644
index 56615bd..0000000
Binary files a/images/g002/Installer-04_target_harddisk_list.png and /dev/null differ
diff --git a/images/g002/Installer-05_target_harddisk_options.png b/images/g002/Installer-05_target_harddisk_options.png
deleted file mode 100644
index ba69b13..0000000
Binary files a/images/g002/Installer-05_target_harddisk_options.png and /dev/null differ
diff --git a/images/g002/Installer-05_target_harddisk_options_adjusted.png b/images/g002/Installer-05_target_harddisk_options_adjusted.png
deleted file mode 100644
index 890ada0..0000000
Binary files a/images/g002/Installer-05_target_harddisk_options_adjusted.png and /dev/null differ
diff --git a/images/g002/Installer-06_localization_time_zone.png b/images/g002/Installer-06_localization_time_zone.png
deleted file mode 100644
index 41f382b..0000000
Binary files a/images/g002/Installer-06_localization_time_zone.png and /dev/null differ
diff --git a/images/g002/Installer-07_root_password.png b/images/g002/Installer-07_root_password.png
deleted file mode 100644
index a10b858..0000000
Binary files a/images/g002/Installer-07_root_password.png and /dev/null differ
diff --git a/images/g002/Installer-08_root_mail_valid.png b/images/g002/Installer-08_root_mail_valid.png
deleted file mode 100644
index 6fa1fa1..0000000
Binary files a/images/g002/Installer-08_root_mail_valid.png and /dev/null differ
diff --git a/images/g002/Installer-09_network_configuration.png b/images/g002/Installer-09_network_configuration.png
deleted file mode 100644
index 3bff596..0000000
Binary files a/images/g002/Installer-09_network_configuration.png and /dev/null differ
diff --git a/images/g002/Installer-10_summary.png b/images/g002/Installer-10_summary.png
deleted file mode 100644
index dc16ba7..0000000
Binary files a/images/g002/Installer-10_summary.png and /dev/null differ
diff --git a/images/g002/Installer-11_progress_screen.png b/images/g002/Installer-11_progress_screen.png
deleted file mode 100644
index 418603e..0000000
Binary files a/images/g002/Installer-11_progress_screen.png and /dev/null differ
diff --git a/images/g002/Installer-12_progress_screen_installing.png b/images/g002/Installer-12_progress_screen_installing.png
deleted file mode 100644
index f60d626..0000000
Binary files a/images/g002/Installer-12_progress_screen_installing.png and /dev/null differ
diff --git a/images/g002/Installer-EFI_boot_setup_error.webp b/images/g002/Installer-EFI_boot_setup_error.webp
new file mode 100644
index 0000000..8fc157d
Binary files /dev/null and b/images/g002/Installer-EFI_boot_setup_error.webp differ
diff --git a/images/g002/Installer-EULA_screen.webp b/images/g002/Installer-EULA_screen.webp
new file mode 100644
index 0000000..53b1761
Binary files /dev/null and b/images/g002/Installer-EULA_screen.webp differ
diff --git a/images/g002/Installer-initial_screen.webp b/images/g002/Installer-initial_screen.webp
new file mode 100644
index 0000000..8cb2e28
Binary files /dev/null and b/images/g002/Installer-initial_screen.webp differ
diff --git a/images/g002/Installer-initial_shell.webp b/images/g002/Installer-initial_shell.webp
new file mode 100644
index 0000000..8462265
Binary files /dev/null and b/images/g002/Installer-initial_shell.webp differ
diff --git a/images/g002/Installer-localization_time_zone.webp b/images/g002/Installer-localization_time_zone.webp
new file mode 100644
index 0000000..288ed75
Binary files /dev/null and b/images/g002/Installer-localization_time_zone.webp differ
diff --git a/images/g002/Installer-network_configuration.webp b/images/g002/Installer-network_configuration.webp
new file mode 100644
index 0000000..4b1aad0
Binary files /dev/null and b/images/g002/Installer-network_configuration.webp differ
diff --git a/images/g002/Installer-progress_screen.webp b/images/g002/Installer-progress_screen.webp
new file mode 100644
index 0000000..4bf30b7
Binary files /dev/null and b/images/g002/Installer-progress_screen.webp differ
diff --git a/images/g002/Installer-root_password.webp b/images/g002/Installer-root_password.webp
new file mode 100644
index 0000000..a262ea4
Binary files /dev/null and b/images/g002/Installer-root_password.webp differ
diff --git a/images/g002/Installer-success_screen.webp b/images/g002/Installer-success_screen.webp
new file mode 100644
index 0000000..b9fc48a
Binary files /dev/null and b/images/g002/Installer-success_screen.webp differ
diff --git a/images/g002/Installer-summary.webp b/images/g002/Installer-summary.webp
new file mode 100644
index 0000000..de8c8f5
Binary files /dev/null and b/images/g002/Installer-summary.webp differ
diff --git a/images/g002/Installer-target_harddisk.webp b/images/g002/Installer-target_harddisk.webp
new file mode 100644
index 0000000..1e42f51
Binary files /dev/null and b/images/g002/Installer-target_harddisk.webp differ
diff --git a/images/g002/Installer-target_harddisk_list.webp b/images/g002/Installer-target_harddisk_list.webp
new file mode 100644
index 0000000..d59b333
Binary files /dev/null and b/images/g002/Installer-target_harddisk_list.webp differ
diff --git a/images/g002/Installer-target_harddisk_options.webp b/images/g002/Installer-target_harddisk_options.webp
new file mode 100644
index 0000000..baee695
Binary files /dev/null and b/images/g002/Installer-target_harddisk_options.webp differ
diff --git a/images/g002/Installer-target_harddisk_options_adjusted.webp b/images/g002/Installer-target_harddisk_options_adjusted.webp
new file mode 100644
index 0000000..b3b8894
Binary files /dev/null and b/images/g002/Installer-target_harddisk_options_adjusted.webp differ
diff --git a/images/g002/Installer-virtualization_support_warning.webp b/images/g002/Installer-virtualization_support_warning.webp
new file mode 100644
index 0000000..7f5e937
Binary files /dev/null and b/images/g002/Installer-virtualization_support_warning.webp differ
diff --git a/images/g002/proxmox_ve_web_console_login.png b/images/g002/proxmox_ve_web_console_login.png
deleted file mode 100644
index 20053f0..0000000
Binary files a/images/g002/proxmox_ve_web_console_login.png and /dev/null differ
diff --git a/images/g002/proxmox_ve_web_console_login.webp b/images/g002/proxmox_ve_web_console_login.webp
new file mode 100644
index 0000000..d6e4093
Binary files /dev/null and b/images/g002/proxmox_ve_web_console_login.webp differ
diff --git a/images/g002/rufus_data_warning.png b/images/g002/rufus_data_warning.png
deleted file mode 100644
index 45a4d31..0000000
Binary files a/images/g002/rufus_data_warning.png and /dev/null differ
diff --git a/images/g002/rufus_main_window.png b/images/g002/rufus_main_window.png
deleted file mode 100644
index 2bff018..0000000
Binary files a/images/g002/rufus_main_window.png and /dev/null differ
diff --git a/images/g002/rufus_write_mode.png b/images/g002/rufus_write_mode.png
deleted file mode 100644
index 6cbba3e..0000000
Binary files a/images/g002/rufus_write_mode.png and /dev/null differ
diff --git a/images/g002/rufus_writing_iso.png b/images/g002/rufus_writing_iso.png
deleted file mode 100644
index 68a89d2..0000000
Binary files a/images/g002/rufus_writing_iso.png and /dev/null differ
diff --git a/images/g002/rufus_writing_iso_finished.png b/images/g002/rufus_writing_iso_finished.png
deleted file mode 100644
index f5ec966..0000000
Binary files a/images/g002/rufus_writing_iso_finished.png and /dev/null differ
diff --git a/images/g003/pve_node_reboot_button.png b/images/g003/pve_node_reboot_button.png
deleted file mode 100644
index dc26ac9..0000000
Binary files a/images/g003/pve_node_reboot_button.png and /dev/null differ
diff --git a/images/g003/pve_node_reboot_button.webp b/images/g003/pve_node_reboot_button.webp
new file mode 100644
index 0000000..12aa70b
Binary files /dev/null and b/images/g003/pve_node_reboot_button.webp differ
diff --git a/images/g003/pve_node_reboot_confirmation.png b/images/g003/pve_node_reboot_confirmation.png
deleted file mode 100644
index c8192ef..0000000
Binary files a/images/g003/pve_node_reboot_confirmation.png and /dev/null differ
diff --git a/images/g003/pve_node_reboot_confirmation.webp b/images/g003/pve_node_reboot_confirmation.webp
new file mode 100644
index 0000000..5490d2a
Binary files /dev/null and b/images/g003/pve_node_reboot_confirmation.webp differ
diff --git a/images/g003/pve_node_shell_options.png b/images/g003/pve_node_shell_options.png
deleted file mode 100644
index 6ad44f1..0000000
Binary files a/images/g003/pve_node_shell_options.png and /dev/null differ
diff --git a/images/g003/pve_node_shell_options.webp b/images/g003/pve_node_shell_options.webp
new file mode 100644
index 0000000..7373fbb
Binary files /dev/null and b/images/g003/pve_node_shell_options.webp differ
diff --git a/images/g003/pve_node_shell_s-tui.png b/images/g003/pve_node_shell_s-tui.png
deleted file mode 100644
index ff271a3..0000000
Binary files a/images/g003/pve_node_shell_s-tui.png and /dev/null differ
diff --git a/images/g003/pve_node_shell_s-tui.webp b/images/g003/pve_node_shell_s-tui.webp
new file mode 100644
index 0000000..93306bc
Binary files /dev/null and b/images/g003/pve_node_shell_s-tui.webp differ
diff --git a/images/g003/pve_node_updates_empty_list.png b/images/g003/pve_node_updates_empty_list.png
deleted file mode 100644
index eab9762..0000000
Binary files a/images/g003/pve_node_updates_empty_list.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_empty_list.webp b/images/g003/pve_node_updates_empty_list.webp
new file mode 100644
index 0000000..0ee5f96
Binary files /dev/null and b/images/g003/pve_node_updates_empty_list.webp differ
diff --git a/images/g003/pve_node_updates_noVNC_shell.png b/images/g003/pve_node_updates_noVNC_shell.png
deleted file mode 100644
index 7f41c80..0000000
Binary files a/images/g003/pve_node_updates_noVNC_shell.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_noVNC_shell.webp b/images/g003/pve_node_updates_noVNC_shell.webp
new file mode 100644
index 0000000..b671070
Binary files /dev/null and b/images/g003/pve_node_updates_noVNC_shell.webp differ
diff --git a/images/g003/pve_node_updates_noVNC_shell_apt_ended.png b/images/g003/pve_node_updates_noVNC_shell_apt_ended.png
deleted file mode 100644
index 316b944..0000000
Binary files a/images/g003/pve_node_updates_noVNC_shell_apt_ended.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_noVNC_shell_apt_ended.webp b/images/g003/pve_node_updates_noVNC_shell_apt_ended.webp
new file mode 100644
index 0000000..ccfed33
Binary files /dev/null and b/images/g003/pve_node_updates_noVNC_shell_apt_ended.webp differ
diff --git a/images/g003/pve_node_updates_noVNC_shell_exiting.png b/images/g003/pve_node_updates_noVNC_shell_exiting.png
deleted file mode 100644
index a562d45..0000000
Binary files a/images/g003/pve_node_updates_noVNC_shell_exiting.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_noVNC_shell_logout.webp b/images/g003/pve_node_updates_noVNC_shell_logout.webp
new file mode 100644
index 0000000..04a8a15
Binary files /dev/null and b/images/g003/pve_node_updates_noVNC_shell_logout.webp differ
diff --git a/images/g003/pve_node_updates_pending.png b/images/g003/pve_node_updates_pending.png
deleted file mode 100644
index f2c08fe..0000000
Binary files a/images/g003/pve_node_updates_pending.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_pending.webp b/images/g003/pve_node_updates_pending.webp
new file mode 100644
index 0000000..0d28c05
Binary files /dev/null and b/images/g003/pve_node_updates_pending.webp differ
diff --git a/images/g003/pve_node_updates_refresh_button.png b/images/g003/pve_node_updates_refresh_button.png
deleted file mode 100644
index afc789e..0000000
Binary files a/images/g003/pve_node_updates_refresh_button.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_refresh_button.webp b/images/g003/pve_node_updates_refresh_button.webp
new file mode 100644
index 0000000..c4b7eb5
Binary files /dev/null and b/images/g003/pve_node_updates_refresh_button.webp differ
diff --git a/images/g003/pve_node_updates_refresh_button_after_update.png b/images/g003/pve_node_updates_refresh_button_after_update.png
deleted file mode 100644
index 5eb0b50..0000000
Binary files a/images/g003/pve_node_updates_refresh_button_after_update.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_refresh_button_after_update.webp b/images/g003/pve_node_updates_refresh_button_after_update.webp
new file mode 100644
index 0000000..b0b11d0
Binary files /dev/null and b/images/g003/pve_node_updates_refresh_button_after_update.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section.png b/images/g003/pve_node_updates_repositories_section.png
deleted file mode 100644
index 56cecca..0000000
Binary files a/images/g003/pve_node_updates_repositories_section.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section.webp b/images/g003/pve_node_updates_repositories_section.webp
new file mode 100644
index 0000000..192d08a
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.png b/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.png
deleted file mode 100644
index a1036e2..0000000
Binary files a/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.webp b/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.webp
new file mode 100644
index 0000000..310aab3
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_add_repository_no_subscription.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_add_repository_window.png b/images/g003/pve_node_updates_repositories_section_add_repository_window.png
deleted file mode 100644
index 31a2536..0000000
Binary files a/images/g003/pve_node_updates_repositories_section_add_repository_window.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section_add_repository_window.webp b/images/g003/pve_node_updates_repositories_section_add_repository_window.webp
new file mode 100644
index 0000000..0ba07dc
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_add_repository_window.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_disable_enterprise.png b/images/g003/pve_node_updates_repositories_section_disable_enterprise.png
deleted file mode 100644
index e02463d..0000000
Binary files a/images/g003/pve_node_updates_repositories_section_disable_enterprise.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section_disable_enterprise_ceph.webp b/images/g003/pve_node_updates_repositories_section_disable_enterprise_ceph.webp
new file mode 100644
index 0000000..4f7250c
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_disable_enterprise_ceph.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_disable_enterprise_pve.webp b/images/g003/pve_node_updates_repositories_section_disable_enterprise_pve.webp
new file mode 100644
index 0000000..1049323
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_disable_enterprise_pve.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_warning_no_repository.png b/images/g003/pve_node_updates_repositories_section_warning_no_repository.png
deleted file mode 100644
index 9e99035..0000000
Binary files a/images/g003/pve_node_updates_repositories_section_warning_no_repository.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section_warning_no_repository.webp b/images/g003/pve_node_updates_repositories_section_warning_no_repository.webp
new file mode 100644
index 0000000..8e185c5
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_warning_no_repository.webp differ
diff --git a/images/g003/pve_node_updates_repositories_section_warning_no_subscription.png b/images/g003/pve_node_updates_repositories_section_warning_no_subscription.png
deleted file mode 100644
index 3bc4188..0000000
Binary files a/images/g003/pve_node_updates_repositories_section_warning_no_subscription.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_section_warning_no_subscription.webp b/images/g003/pve_node_updates_repositories_section_warning_no_subscription.webp
new file mode 100644
index 0000000..dc91bc9
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_section_warning_no_subscription.webp differ
diff --git a/images/g003/pve_node_updates_repositories_updated.png b/images/g003/pve_node_updates_repositories_updated.png
deleted file mode 100644
index 1405c0b..0000000
Binary files a/images/g003/pve_node_updates_repositories_updated.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_repositories_updated.webp b/images/g003/pve_node_updates_repositories_updated.webp
new file mode 100644
index 0000000..b7e7c09
Binary files /dev/null and b/images/g003/pve_node_updates_repositories_updated.webp differ
diff --git a/images/g003/pve_node_updates_task_window.png b/images/g003/pve_node_updates_task_window.png
deleted file mode 100644
index d9c0fea..0000000
Binary files a/images/g003/pve_node_updates_task_window.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_task_window.webp b/images/g003/pve_node_updates_task_window.webp
new file mode 100644
index 0000000..ec474ab
Binary files /dev/null and b/images/g003/pve_node_updates_task_window.webp differ
diff --git a/images/g003/pve_node_updates_upgrade_button.png b/images/g003/pve_node_updates_upgrade_button.png
deleted file mode 100644
index 54a2018..0000000
Binary files a/images/g003/pve_node_updates_upgrade_button.png and /dev/null differ
diff --git a/images/g003/pve_node_updates_upgrade_button.webp b/images/g003/pve_node_updates_upgrade_button.webp
new file mode 100644
index 0000000..aa004e8
Binary files /dev/null and b/images/g003/pve_node_updates_upgrade_button.webp differ
diff --git a/images/g005/datacenter_storage_initial_configuration.png b/images/g005/datacenter_storage_initial_configuration.png
deleted file mode 100644
index c88a24d..0000000
Binary files a/images/g005/datacenter_storage_initial_configuration.png and /dev/null differ
diff --git a/images/g005/datacenter_storage_initial_configuration.webp b/images/g005/datacenter_storage_initial_configuration.webp
new file mode 100644
index 0000000..ff01109
Binary files /dev/null and b/images/g005/datacenter_storage_initial_configuration.webp differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_remove_button.png b/images/g005/datacenter_storage_local_lvm_thin_remove_button.png
deleted file mode 100644
index d046b29..0000000
Binary files a/images/g005/datacenter_storage_local_lvm_thin_remove_button.png and /dev/null differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_remove_button.webp b/images/g005/datacenter_storage_local_lvm_thin_remove_button.webp
new file mode 100644
index 0000000..40f721b
Binary files /dev/null and b/images/g005/datacenter_storage_local_lvm_thin_remove_button.webp differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.png b/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.png
deleted file mode 100644
index ce111c8..0000000
Binary files a/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.png and /dev/null differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.webp b/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.webp
new file mode 100644
index 0000000..ac17fa7
Binary files /dev/null and b/images/g005/datacenter_storage_local_lvm_thin_remove_confirmation.webp differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_removed.png b/images/g005/datacenter_storage_local_lvm_thin_removed.png
deleted file mode 100644
index d449eb2..0000000
Binary files a/images/g005/datacenter_storage_local_lvm_thin_removed.png and /dev/null differ
diff --git a/images/g005/datacenter_storage_local_lvm_thin_removed.webp b/images/g005/datacenter_storage_local_lvm_thin_removed.webp
new file mode 100644
index 0000000..d5cb23b
Binary files /dev/null and b/images/g005/datacenter_storage_local_lvm_thin_removed.webp differ
diff --git a/images/g005/pve_node_disks_devices_lvm_updated_screen.png b/images/g005/pve_node_disks_devices_lvm_updated_screen.png
deleted file mode 100644
index c175edc..0000000
Binary files a/images/g005/pve_node_disks_devices_lvm_updated_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_devices_lvm_updated_screen.webp b/images/g005/pve_node_disks_devices_lvm_updated_screen.webp
new file mode 100644
index 0000000..e3dd6c1
Binary files /dev/null and b/images/g005/pve_node_disks_devices_lvm_updated_screen.webp differ
diff --git a/images/g005/pve_node_disks_lvm-thin_screen.png b/images/g005/pve_node_disks_lvm-thin_screen.png
deleted file mode 100644
index 3619b67..0000000
Binary files a/images/g005/pve_node_disks_lvm-thin_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_lvm-thin_screen.webp b/images/g005/pve_node_disks_lvm-thin_screen.webp
new file mode 100644
index 0000000..fd384f5
Binary files /dev/null and b/images/g005/pve_node_disks_lvm-thin_screen.webp differ
diff --git a/images/g005/pve_node_disks_lvm-thin_screen_empty.png b/images/g005/pve_node_disks_lvm-thin_screen_empty.png
deleted file mode 100644
index 16e1d91..0000000
Binary files a/images/g005/pve_node_disks_lvm-thin_screen_empty.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_lvm-thin_screen_empty.webp b/images/g005/pve_node_disks_lvm-thin_screen_empty.webp
new file mode 100644
index 0000000..3293fcc
Binary files /dev/null and b/images/g005/pve_node_disks_lvm-thin_screen_empty.webp differ
diff --git a/images/g005/pve_node_disks_lvm_screen.png b/images/g005/pve_node_disks_lvm_screen.png
deleted file mode 100644
index ece587c..0000000
Binary files a/images/g005/pve_node_disks_lvm_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_lvm_screen.webp b/images/g005/pve_node_disks_lvm_screen.webp
new file mode 100644
index 0000000..d844f7d
Binary files /dev/null and b/images/g005/pve_node_disks_lvm_screen.webp differ
diff --git a/images/g005/pve_node_disks_lvm_screen_new_vgs.png b/images/g005/pve_node_disks_lvm_screen_new_vgs.png
deleted file mode 100644
index baab0f5..0000000
Binary files a/images/g005/pve_node_disks_lvm_screen_new_vgs.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_lvm_screen_new_vgs.webp b/images/g005/pve_node_disks_lvm_screen_new_vgs.webp
new file mode 100644
index 0000000..825a909
Binary files /dev/null and b/images/g005/pve_node_disks_lvm_screen_new_vgs.webp differ
diff --git a/images/g005/pve_node_disks_screen.png b/images/g005/pve_node_disks_screen.png
deleted file mode 100644
index 5f2bdad..0000000
Binary files a/images/g005/pve_node_disks_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_disks_screen.webp b/images/g005/pve_node_disks_screen.webp
new file mode 100644
index 0000000..c0b00ff
Binary files /dev/null and b/images/g005/pve_node_disks_screen.webp differ
diff --git a/images/g005/pve_node_storage_local-lvm_initial_screen.png b/images/g005/pve_node_storage_local-lvm_initial_screen.png
deleted file mode 100644
index 14203b4..0000000
Binary files a/images/g005/pve_node_storage_local-lvm_initial_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_storage_local-lvm_initial_screen.webp b/images/g005/pve_node_storage_local-lvm_initial_screen.webp
new file mode 100644
index 0000000..781d370
Binary files /dev/null and b/images/g005/pve_node_storage_local-lvm_initial_screen.webp differ
diff --git a/images/g005/pve_node_storage_local_initial_screen.png b/images/g005/pve_node_storage_local_initial_screen.png
deleted file mode 100644
index d22e335..0000000
Binary files a/images/g005/pve_node_storage_local_initial_screen.png and /dev/null differ
diff --git a/images/g005/pve_node_storage_local_initial_screen.webp b/images/g005/pve_node_storage_local_initial_screen.webp
new file mode 100644
index 0000000..7951c79
Binary files /dev/null and b/images/g005/pve_node_storage_local_initial_screen.webp differ
diff --git a/images/g006/proxmox_ve_subscription_warning.png b/images/g006/proxmox_ve_subscription_warning.png
deleted file mode 100644
index 24caf1b..0000000
Binary files a/images/g006/proxmox_ve_subscription_warning.png and /dev/null differ
diff --git a/images/g006/proxmox_ve_subscription_warning.webp b/images/g006/proxmox_ve_subscription_warning.webp
new file mode 100644
index 0000000..46e1d7b
Binary files /dev/null and b/images/g006/proxmox_ve_subscription_warning.webp differ
diff --git a/images/g007/2FA_option_root_user_menu.png b/images/g007/2FA_option_root_user_menu.png
deleted file mode 100644
index 0281085..0000000
Binary files a/images/g007/2FA_option_root_user_menu.png and /dev/null differ
diff --git a/images/g007/datacenter-authentication-realm-edit-window-request-tfa-chosen.png b/images/g007/datacenter-authentication-realm-edit-window-request-tfa-chosen.png
deleted file mode 100644
index 43f968c..0000000
Binary files a/images/g007/datacenter-authentication-realm-edit-window-request-tfa-chosen.png and /dev/null differ
diff --git a/images/g007/datacenter-authentication-realm-edit-window-request-tfa-list.png b/images/g007/datacenter-authentication-realm-edit-window-request-tfa-list.png
deleted file mode 100644
index 8c62864..0000000
Binary files a/images/g007/datacenter-authentication-realm-edit-window-request-tfa-list.png and /dev/null differ
diff --git a/images/g007/datacenter-authentication-realm-edit-window.png b/images/g007/datacenter-authentication-realm-edit-window.png
deleted file mode 100644
index 6598ffc..0000000
Binary files a/images/g007/datacenter-authentication-realm-edit-window.png and /dev/null differ
diff --git a/images/g007/datacenter-authentication-realms-pam-totp-oath.png b/images/g007/datacenter-authentication-realms-pam-totp-oath.png
deleted file mode 100644
index 801da1f..0000000
Binary files a/images/g007/datacenter-authentication-realms-pam-totp-oath.png and /dev/null differ
diff --git a/images/g007/datacenter-authentication-realms.png b/images/g007/datacenter-authentication-realms.png
deleted file mode 100644
index 23272dd..0000000
Binary files a/images/g007/datacenter-authentication-realms.png and /dev/null differ
diff --git a/images/g007/datacenter-permissions-realms-pam-edit-button.webp b/images/g007/datacenter-permissions-realms-pam-edit-button.webp
new file mode 100644
index 0000000..12a1a3c
Binary files /dev/null and b/images/g007/datacenter-permissions-realms-pam-edit-button.webp differ
diff --git a/images/g007/datacenter-permissions-realms-pam-edit-tfa-options-list.webp b/images/g007/datacenter-permissions-realms-pam-edit-tfa-options-list.webp
new file mode 100644
index 0000000..ebc78ae
Binary files /dev/null and b/images/g007/datacenter-permissions-realms-pam-edit-tfa-options-list.webp differ
diff --git a/images/g007/datacenter-permissions-realms-pam-edit-tfa-totp-chosen.webp b/images/g007/datacenter-permissions-realms-pam-edit-tfa-totp-chosen.webp
new file mode 100644
index 0000000..68b85d8
Binary files /dev/null and b/images/g007/datacenter-permissions-realms-pam-edit-tfa-totp-chosen.webp differ
diff --git a/images/g007/datacenter-permissions-realms-pam-edit.webp b/images/g007/datacenter-permissions-realms-pam-edit.webp
new file mode 100644
index 0000000..05ab827
Binary files /dev/null and b/images/g007/datacenter-permissions-realms-pam-edit.webp differ
diff --git a/images/g007/datacenter-permissions-realms-pam-oath-totp-enabled.webp b/images/g007/datacenter-permissions-realms-pam-oath-totp-enabled.webp
new file mode 100644
index 0000000..0462485
Binary files /dev/null and b/images/g007/datacenter-permissions-realms-pam-oath-totp-enabled.webp differ
diff --git a/images/g007/datacenter-permissions-realms.webp b/images/g007/datacenter-permissions-realms.webp
new file mode 100644
index 0000000..d46d771
Binary files /dev/null and b/images/g007/datacenter-permissions-realms.webp differ
diff --git a/images/g007/datacenter-permissions-two_factor-add_totp_window.webp b/images/g007/datacenter-permissions-two_factor-add_totp_window.webp
new file mode 100644
index 0000000..c31e639
Binary files /dev/null and b/images/g007/datacenter-permissions-two_factor-add_totp_window.webp differ
diff --git a/images/g007/datacenter-permissions-two_factor-totp_added.webp b/images/g007/datacenter-permissions-two_factor-totp_added.webp
new file mode 100644
index 0000000..6deac1f
Binary files /dev/null and b/images/g007/datacenter-permissions-two_factor-totp_added.webp differ
diff --git a/images/g007/datacenter-permissions-two_factor-totp_option.webp b/images/g007/datacenter-permissions-two_factor-totp_option.webp
new file mode 100644
index 0000000..99accb7
Binary files /dev/null and b/images/g007/datacenter-permissions-two_factor-totp_option.webp differ
diff --git a/images/g007/datacenter-permissions-two_factor.webp b/images/g007/datacenter-permissions-two_factor.webp
new file mode 100644
index 0000000..e4c6558
Binary files /dev/null and b/images/g007/datacenter-permissions-two_factor.webp differ
diff --git a/images/g007/datacenter-realms-v7.2-z.png b/images/g007/datacenter-realms-v7.2-z.png
deleted file mode 100644
index 837e423..0000000
Binary files a/images/g007/datacenter-realms-v7.2-z.png and /dev/null differ
diff --git a/images/g007/gui-datacenter-users-tfa.png b/images/g007/gui-datacenter-users-tfa.png
deleted file mode 100644
index aa735fb..0000000
Binary files a/images/g007/gui-datacenter-users-tfa.png and /dev/null differ
diff --git a/images/g007/pve-web-console-login-failed.png b/images/g007/pve-web-console-login-failed.png
deleted file mode 100644
index 99b14dc..0000000
Binary files a/images/g007/pve-web-console-login-failed.png and /dev/null differ
diff --git a/images/g007/pve-web-console-login-failed.webp b/images/g007/pve-web-console-login-failed.webp
new file mode 100644
index 0000000..e0afdcb
Binary files /dev/null and b/images/g007/pve-web-console-login-failed.webp differ
diff --git a/images/g007/pve-web-console-login-processing.png b/images/g007/pve-web-console-login-processing.png
deleted file mode 100644
index 6c0ddb0..0000000
Binary files a/images/g007/pve-web-console-login-processing.png and /dev/null differ
diff --git a/images/g007/pve-web-console-login-processing.webp b/images/g007/pve-web-console-login-processing.webp
new file mode 100644
index 0000000..95c0476
Binary files /dev/null and b/images/g007/pve-web-console-login-processing.webp differ
diff --git a/images/g007/pve_web_console_username.png b/images/g007/pve_web_console_username.png
deleted file mode 100644
index 3091be0..0000000
Binary files a/images/g007/pve_web_console_username.png and /dev/null differ
diff --git a/images/g007/pve_web_console_username.webp b/images/g007/pve_web_console_username.webp
new file mode 100644
index 0000000..56cb3e3
Binary files /dev/null and b/images/g007/pve_web_console_username.webp differ
diff --git a/images/g007/tfa_option_root_user_menu.webp b/images/g007/tfa_option_root_user_menu.webp
new file mode 100644
index 0000000..af758af
Binary files /dev/null and b/images/g007/tfa_option_root_user_menu.webp differ
diff --git a/images/g008/datacenter-perms-tfa-add-totp-option.webp b/images/g008/datacenter-perms-tfa-add-totp-option.webp
new file mode 100644
index 0000000..76db0f9
Binary files /dev/null and b/images/g008/datacenter-perms-tfa-add-totp-option.webp differ
diff --git a/images/g008/datacenter_permissions_two_factor_section_v7.2-z.png b/images/g008/datacenter_permissions_two_factor_section_v7.2-z.png
deleted file mode 100644
index 555ae93..0000000
Binary files a/images/g008/datacenter_permissions_two_factor_section_v7.2-z.png and /dev/null differ
diff --git a/images/g008/new_pve_platform_managers_group_on_web_console.png b/images/g008/new_pve_platform_managers_group_on_web_console.png
deleted file mode 100644
index 9499933..0000000
Binary files a/images/g008/new_pve_platform_managers_group_on_web_console.png and /dev/null differ
diff --git a/images/g008/new_pve_platform_managers_group_on_web_console.webp b/images/g008/new_pve_platform_managers_group_on_web_console.webp
new file mode 100644
index 0000000..869c765
Binary files /dev/null and b/images/g008/new_pve_platform_managers_group_on_web_console.webp differ
diff --git a/images/g008/new_user_on_user_management_screen.png b/images/g008/new_user_on_user_management_screen.png
deleted file mode 100644
index b3b6266..0000000
Binary files a/images/g008/new_user_on_user_management_screen.png and /dev/null differ
diff --git a/images/g008/new_user_on_user_management_screen.webp b/images/g008/new_user_on_user_management_screen.webp
new file mode 100644
index 0000000..b660906
Binary files /dev/null and b/images/g008/new_user_on_user_management_screen.webp differ
diff --git a/images/g008/new_user_on_user_management_screen_tfa_button.png b/images/g008/new_user_on_user_management_screen_tfa_button.png
deleted file mode 100644
index b3dac86..0000000
Binary files a/images/g008/new_user_on_user_management_screen_tfa_button.png and /dev/null differ
diff --git a/images/g008/new_user_totp_screen.png b/images/g008/new_user_totp_screen.png
deleted file mode 100644
index c1bf891..0000000
Binary files a/images/g008/new_user_totp_screen.png and /dev/null differ
diff --git a/images/g008/new_user_totp_screen.webp b/images/g008/new_user_totp_screen.webp
new file mode 100644
index 0000000..718c224
Binary files /dev/null and b/images/g008/new_user_totp_screen.webp differ
diff --git a/images/g008/pve_new_user_login_web_console.png b/images/g008/pve_new_user_login_web_console.png
deleted file mode 100644
index c94a13d..0000000
Binary files a/images/g008/pve_new_user_login_web_console.png and /dev/null differ
diff --git a/images/g008/pve_new_user_login_web_console.webp b/images/g008/pve_new_user_login_web_console.webp
new file mode 100644
index 0000000..47b8c99
Binary files /dev/null and b/images/g008/pve_new_user_login_web_console.webp differ
diff --git a/images/g008/pve_user_management_screen.png b/images/g008/pve_user_management_screen.png
deleted file mode 100644
index 1e1138f..0000000
Binary files a/images/g008/pve_user_management_screen.png and /dev/null differ
diff --git a/images/g008/pve_user_management_screen.webp b/images/g008/pve_user_management_screen.webp
new file mode 100644
index 0000000..4bdb58c
Binary files /dev/null and b/images/g008/pve_user_management_screen.webp differ
diff --git a/images/g011/htop_command.png b/images/g011/htop_command.png
deleted file mode 100644
index 0d67da1..0000000
Binary files a/images/g011/htop_command.png and /dev/null differ
diff --git a/images/g011/htop_command.webp b/images/g011/htop_command.webp
new file mode 100644
index 0000000..72214ed
Binary files /dev/null and b/images/g011/htop_command.webp differ
diff --git a/images/g011/pve_web_console_shell_options.png b/images/g011/pve_web_console_shell_options.png
deleted file mode 100644
index 5696df0..0000000
Binary files a/images/g011/pve_web_console_shell_options.png and /dev/null differ
diff --git a/images/g011/pve_web_console_shell_options.webp b/images/g011/pve_web_console_shell_options.webp
new file mode 100644
index 0000000..972a61d
Binary files /dev/null and b/images/g011/pve_web_console_shell_options.webp differ
diff --git a/images/g011/pve_web_console_system_services_view.png b/images/g011/pve_web_console_system_services_view.png
deleted file mode 100644
index 0328b71..0000000
Binary files a/images/g011/pve_web_console_system_services_view.png and /dev/null differ
diff --git a/images/g011/pve_web_console_system_services_view.webp b/images/g011/pve_web_console_system_services_view.webp
new file mode 100644
index 0000000..ad88641
Binary files /dev/null and b/images/g011/pve_web_console_system_services_view.webp differ
diff --git a/images/g012/pve_firewall_options_sysctl_parameters.png b/images/g012/pve_firewall_options_sysctl_parameters.png
deleted file mode 100644
index 89f49b7..0000000
Binary files a/images/g012/pve_firewall_options_sysctl_parameters.png and /dev/null differ
diff --git a/images/g012/pve_firewall_options_sysctl_parameters.webp b/images/g012/pve_firewall_options_sysctl_parameters.webp
new file mode 100644
index 0000000..e5a3f12
Binary files /dev/null and b/images/g012/pve_firewall_options_sysctl_parameters.webp differ
diff --git a/images/g014/pve_firewall_disabled_datacenter_tier.png b/images/g014/pve_firewall_disabled_datacenter_tier.png
deleted file mode 100644
index 7b01fb7..0000000
Binary files a/images/g014/pve_firewall_disabled_datacenter_tier.png and /dev/null differ
diff --git a/images/g014/pve_firewall_disabled_datacenter_tier.webp b/images/g014/pve_firewall_disabled_datacenter_tier.webp
new file mode 100644
index 0000000..29789fc
Binary files /dev/null and b/images/g014/pve_firewall_disabled_datacenter_tier.webp differ
diff --git a/images/g014/pve_firewall_ebtables_option.png b/images/g014/pve_firewall_ebtables_option.png
deleted file mode 100644
index 1c5a959..0000000
Binary files a/images/g014/pve_firewall_ebtables_option.png and /dev/null differ
diff --git a/images/g014/pve_firewall_ebtables_option.webp b/images/g014/pve_firewall_ebtables_option.webp
new file mode 100644
index 0000000..f454145
Binary files /dev/null and b/images/g014/pve_firewall_ebtables_option.webp differ
diff --git a/images/g014/pve_firewall_enabled_datacenter_tier.png b/images/g014/pve_firewall_enabled_datacenter_tier.png
deleted file mode 100644
index 92a04aa..0000000
Binary files a/images/g014/pve_firewall_enabled_datacenter_tier.png and /dev/null differ
diff --git a/images/g014/pve_firewall_enabled_datacenter_tier.webp b/images/g014/pve_firewall_enabled_datacenter_tier.webp
new file mode 100644
index 0000000..52b0d70
Binary files /dev/null and b/images/g014/pve_firewall_enabled_datacenter_tier.webp differ
diff --git a/images/g014/pve_firewall_enabled_node_level.png b/images/g014/pve_firewall_enabled_node_level.png
deleted file mode 100644
index 5f1066e..0000000
Binary files a/images/g014/pve_firewall_enabled_node_level.png and /dev/null differ
diff --git a/images/g014/pve_firewall_enabled_node_level.webp b/images/g014/pve_firewall_enabled_node_level.webp
new file mode 100644
index 0000000..15ad3f0
Binary files /dev/null and b/images/g014/pve_firewall_enabled_node_level.webp differ
diff --git a/images/g014/pve_firewall_enabling_datacenter_tier.png b/images/g014/pve_firewall_enabling_datacenter_tier.png
deleted file mode 100644
index 8cdf3ea..0000000
Binary files a/images/g014/pve_firewall_enabling_datacenter_tier.png and /dev/null differ
diff --git a/images/g014/pve_firewall_enabling_datacenter_tier.webp b/images/g014/pve_firewall_enabling_datacenter_tier.webp
new file mode 100644
index 0000000..5873735
Binary files /dev/null and b/images/g014/pve_firewall_enabling_datacenter_tier.webp differ
diff --git a/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.png b/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.png
deleted file mode 100644
index d265af4..0000000
Binary files a/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.png and /dev/null differ
diff --git a/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.webp b/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.webp
new file mode 100644
index 0000000..487c32c
Binary files /dev/null and b/images/g014/pve_firewall_enabling_datacenter_tier_checking_option.webp differ
diff --git a/images/g014/pve_firewall_log_level_values.png b/images/g014/pve_firewall_log_level_values.png
deleted file mode 100644
index f123ffe..0000000
Binary files a/images/g014/pve_firewall_log_level_values.png and /dev/null differ
diff --git a/images/g014/pve_firewall_log_level_values.webp b/images/g014/pve_firewall_log_level_values.webp
new file mode 100644
index 0000000..092f3c8
Binary files /dev/null and b/images/g014/pve_firewall_log_level_values.webp differ
diff --git a/images/g014/pve_firewall_no_rules_node_level.png b/images/g014/pve_firewall_no_rules_node_level.png
deleted file mode 100644
index 1ba18a0..0000000
Binary files a/images/g014/pve_firewall_no_rules_node_level.png and /dev/null differ
diff --git a/images/g014/pve_firewall_no_rules_node_level.webp b/images/g014/pve_firewall_no_rules_node_level.webp
new file mode 100644
index 0000000..f732fe0
Binary files /dev/null and b/images/g014/pve_firewall_no_rules_node_level.webp differ
diff --git a/images/g014/pve_firewall_node_level_options.png b/images/g014/pve_firewall_node_level_options.png
deleted file mode 100644
index 8e2a319..0000000
Binary files a/images/g014/pve_firewall_node_level_options.png and /dev/null differ
diff --git a/images/g014/pve_firewall_node_level_options.webp b/images/g014/pve_firewall_node_level_options.webp
new file mode 100644
index 0000000..3b040ea
Binary files /dev/null and b/images/g014/pve_firewall_node_level_options.webp differ
diff --git a/images/g014/pve_node_firewall_log_view.png b/images/g014/pve_node_firewall_log_view.png
deleted file mode 100644
index 7b275a0..0000000
Binary files a/images/g014/pve_node_firewall_log_view.png and /dev/null differ
diff --git a/images/g014/pve_node_firewall_log_view.webp b/images/g014/pve_node_firewall_log_view.webp
new file mode 100644
index 0000000..6ee0869
Binary files /dev/null and b/images/g014/pve_node_firewall_log_view.webp differ
diff --git a/images/g014/pve_node_system_network_bridge.png b/images/g014/pve_node_system_network_bridge.png
deleted file mode 100644
index 2da0bd8..0000000
Binary files a/images/g014/pve_node_system_network_bridge.png and /dev/null differ
diff --git a/images/g014/pve_node_system_network_bridge.webp b/images/g014/pve_node_system_network_bridge.webp
new file mode 100644
index 0000000..d13ba02
Binary files /dev/null and b/images/g014/pve_node_system_network_bridge.webp differ
diff --git a/images/g017/pve_node_system_network_create_linux_bridge.png b/images/g017/pve_node_system_network_create_linux_bridge.png
deleted file mode 100644
index 10caff8..0000000
Binary files a/images/g017/pve_node_system_network_create_linux_bridge.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_create_linux_bridge.webp b/images/g017/pve_node_system_network_create_linux_bridge.webp
new file mode 100644
index 0000000..1f3029d
Binary files /dev/null and b/images/g017/pve_node_system_network_create_linux_bridge.webp differ
diff --git a/images/g017/pve_node_system_network_create_options_list.png b/images/g017/pve_node_system_network_create_options_list.png
deleted file mode 100644
index baf35cc..0000000
Binary files a/images/g017/pve_node_system_network_create_options_list.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_create_options_list.webp b/images/g017/pve_node_system_network_create_options_list.webp
new file mode 100644
index 0000000..2b3a538
Binary files /dev/null and b/images/g017/pve_node_system_network_create_options_list.webp differ
diff --git a/images/g017/pve_node_system_network_initial_setup.png b/images/g017/pve_node_system_network_initial_setup.png
deleted file mode 100644
index bee5eaf..0000000
Binary files a/images/g017/pve_node_system_network_initial_setup.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_initial_setup.webp b/images/g017/pve_node_system_network_initial_setup.webp
new file mode 100644
index 0000000..f75f817
Binary files /dev/null and b/images/g017/pve_node_system_network_initial_setup.webp differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_active.png b/images/g017/pve_node_system_network_linux_bridge_active.png
deleted file mode 100644
index a595bfc..0000000
Binary files a/images/g017/pve_node_system_network_linux_bridge_active.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_active.webp b/images/g017/pve_node_system_network_linux_bridge_active.webp
new file mode 100644
index 0000000..c4b8d0c
Binary files /dev/null and b/images/g017/pve_node_system_network_linux_bridge_active.webp differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.png b/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.png
deleted file mode 100644
index 318a869..0000000
Binary files a/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.webp b/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.webp
new file mode 100644
index 0000000..64f028a
Binary files /dev/null and b/images/g017/pve_node_system_network_linux_bridge_apply_config_confirm.webp differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.png b/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.png
deleted file mode 100644
index 442e821..0000000
Binary files a/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.webp b/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.webp
new file mode 100644
index 0000000..f15c280
Binary files /dev/null and b/images/g017/pve_node_system_network_linux_bridge_apply_config_progress.webp differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.png b/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.png
deleted file mode 100644
index 9323b57..0000000
Binary files a/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.png and /dev/null differ
diff --git a/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.webp b/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.webp
new file mode 100644
index 0000000..81d5579
Binary files /dev/null and b/images/g017/pve_node_system_network_linux_bridge_created_pending_changes.webp differ
diff --git a/images/g019/pve_datacenter_storage_all_directories_added.png b/images/g019/pve_datacenter_storage_all_directories_added.png
deleted file mode 100644
index c8d2c7e..0000000
Binary files a/images/g019/pve_datacenter_storage_all_directories_added.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_all_directories_added.webp b/images/g019/pve_datacenter_storage_all_directories_added.webp
new file mode 100644
index 0000000..47f8bba
Binary files /dev/null and b/images/g019/pve_datacenter_storage_all_directories_added.webp differ
diff --git a/images/g019/pve_datacenter_storage_choosing_directory.webp b/images/g019/pve_datacenter_storage_choosing_directory.webp
new file mode 100644
index 0000000..8ef7c71
Binary files /dev/null and b/images/g019/pve_datacenter_storage_choosing_directory.webp differ
diff --git a/images/g019/pve_datacenter_storage_choosing_lvm_thin.png b/images/g019/pve_datacenter_storage_choosing_lvm_thin.png
deleted file mode 100644
index 568dc60..0000000
Binary files a/images/g019/pve_datacenter_storage_choosing_lvm_thin.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_choosing_lvm_thin.webp b/images/g019/pve_datacenter_storage_choosing_lvm_thin.webp
new file mode 100644
index 0000000..a49905e
Binary files /dev/null and b/images/g019/pve_datacenter_storage_choosing_lvm_thin.webp differ
diff --git a/images/g019/pve_datacenter_storage_choosing_storage.png b/images/g019/pve_datacenter_storage_choosing_storage.png
deleted file mode 100644
index 67115a7..0000000
Binary files a/images/g019/pve_datacenter_storage_choosing_storage.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin.png b/images/g019/pve_datacenter_storage_creating_lvm_thin.png
deleted file mode 100644
index 50dc6cd..0000000
Binary files a/images/g019/pve_datacenter_storage_creating_lvm_thin.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin.webp b/images/g019/pve_datacenter_storage_creating_lvm_thin.webp
new file mode 100644
index 0000000..e30fc47
Binary files /dev/null and b/images/g019/pve_datacenter_storage_creating_lvm_thin.webp differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.png b/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.png
deleted file mode 100644
index aa4a76c..0000000
Binary files a/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.webp b/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.webp
new file mode 100644
index 0000000..a3182aa
Binary files /dev/null and b/images/g019/pve_datacenter_storage_creating_lvm_thin_backup_retention_disabled.webp differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.png b/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.png
deleted file mode 100644
index 8834e31..0000000
Binary files a/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.webp b/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.webp
new file mode 100644
index 0000000..7f7aeb5
Binary files /dev/null and b/images/g019/pve_datacenter_storage_creating_lvm_thin_form_filled.webp differ
diff --git a/images/g019/pve_datacenter_storage_creating_vzdumps_directory.png b/images/g019/pve_datacenter_storage_creating_vzdumps_directory.png
deleted file mode 100644
index 08aa7e2..0000000
Binary files a/images/g019/pve_datacenter_storage_creating_vzdumps_directory.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_creating_vzdumps_directory.webp b/images/g019/pve_datacenter_storage_creating_vzdumps_directory.webp
new file mode 100644
index 0000000..abe9e4b
Binary files /dev/null and b/images/g019/pve_datacenter_storage_creating_vzdumps_directory.webp differ
diff --git a/images/g019/pve_datacenter_storage_current_directory.png b/images/g019/pve_datacenter_storage_current_directory.png
deleted file mode 100644
index 3b1db4b..0000000
Binary files a/images/g019/pve_datacenter_storage_current_directory.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_current_directory.webp b/images/g019/pve_datacenter_storage_current_directory.webp
new file mode 100644
index 0000000..cd8809d
Binary files /dev/null and b/images/g019/pve_datacenter_storage_current_directory.webp differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window.png b/images/g019/pve_datacenter_storage_directory_creation_window.png
deleted file mode 100644
index cd66d94..0000000
Binary files a/images/g019/pve_datacenter_storage_directory_creation_window.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window.webp b/images/g019/pve_datacenter_storage_directory_creation_window.webp
new file mode 100644
index 0000000..726366a
Binary files /dev/null and b/images/g019/pve_datacenter_storage_directory_creation_window.webp differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window_advanced.png b/images/g019/pve_datacenter_storage_directory_creation_window_advanced.png
deleted file mode 100644
index 4731d1a..0000000
Binary files a/images/g019/pve_datacenter_storage_directory_creation_window_advanced.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window_advanced.webp b/images/g019/pve_datacenter_storage_directory_creation_window_advanced.webp
new file mode 100644
index 0000000..34f5e58
Binary files /dev/null and b/images/g019/pve_datacenter_storage_directory_creation_window_advanced.webp differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.png b/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.png
deleted file mode 100644
index b8821f2..0000000
Binary files a/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.webp b/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.webp
new file mode 100644
index 0000000..85a1e21
Binary files /dev/null and b/images/g019/pve_datacenter_storage_directory_creation_window_backup_retention.webp differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_disabled.png b/images/g019/pve_datacenter_storage_local_directory_disabled.png
deleted file mode 100644
index 1c55e07..0000000
Binary files a/images/g019/pve_datacenter_storage_local_directory_disabled.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_disabled.webp b/images/g019/pve_datacenter_storage_local_directory_disabled.webp
new file mode 100644
index 0000000..ebcf0d2
Binary files /dev/null and b/images/g019/pve_datacenter_storage_local_directory_disabled.webp differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_edit.png b/images/g019/pve_datacenter_storage_local_directory_edit.png
deleted file mode 100644
index a1a029e..0000000
Binary files a/images/g019/pve_datacenter_storage_local_directory_edit.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_edit.webp b/images/g019/pve_datacenter_storage_local_directory_edit.webp
new file mode 100644
index 0000000..b955714
Binary files /dev/null and b/images/g019/pve_datacenter_storage_local_directory_edit.webp differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_edit_disabling.png b/images/g019/pve_datacenter_storage_local_directory_edit_disabling.png
deleted file mode 100644
index 4616ea1..0000000
Binary files a/images/g019/pve_datacenter_storage_local_directory_edit_disabling.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_local_directory_edit_disabling.webp b/images/g019/pve_datacenter_storage_local_directory_edit_disabling.webp
new file mode 100644
index 0000000..ef1a7f1
Binary files /dev/null and b/images/g019/pve_datacenter_storage_local_directory_edit_disabling.webp differ
diff --git a/images/g019/pve_datacenter_storage_lvm_thin_added.png b/images/g019/pve_datacenter_storage_lvm_thin_added.png
deleted file mode 100644
index 58e66e8..0000000
Binary files a/images/g019/pve_datacenter_storage_lvm_thin_added.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_lvm_thin_added.webp b/images/g019/pve_datacenter_storage_lvm_thin_added.webp
new file mode 100644
index 0000000..a99ede5
Binary files /dev/null and b/images/g019/pve_datacenter_storage_lvm_thin_added.webp differ
diff --git a/images/g019/pve_datacenter_storage_vzdumps_directory_added.png b/images/g019/pve_datacenter_storage_vzdumps_directory_added.png
deleted file mode 100644
index a74aed0..0000000
Binary files a/images/g019/pve_datacenter_storage_vzdumps_directory_added.png and /dev/null differ
diff --git a/images/g019/pve_datacenter_storage_vzdumps_directory_added.webp b/images/g019/pve_datacenter_storage_vzdumps_directory_added.webp
new file mode 100644
index 0000000..04539bf
Binary files /dev/null and b/images/g019/pve_datacenter_storage_vzdumps_directory_added.webp differ
diff --git a/images/g019/pve_node_disks_new_lvm_thin_volumes.png b/images/g019/pve_node_disks_new_lvm_thin_volumes.png
deleted file mode 100644
index 1a5fb25..0000000
Binary files a/images/g019/pve_node_disks_new_lvm_thin_volumes.png and /dev/null differ
diff --git a/images/g019/pve_node_disks_new_lvm_thin_volumes.webp b/images/g019/pve_node_disks_new_lvm_thin_volumes.webp
new file mode 100644
index 0000000..0fc6dd8
Binary files /dev/null and b/images/g019/pve_node_disks_new_lvm_thin_volumes.webp differ
diff --git a/images/g020/debian_netinstall_iso_download.png b/images/g020/debian_netinstall_iso_download.png
deleted file mode 100644
index a17ee11..0000000
Binary files a/images/g020/debian_netinstall_iso_download.png and /dev/null differ
diff --git a/images/g020/debian_netinstall_iso_download.webp b/images/g020/debian_netinstall_iso_download.webp
new file mode 100644
index 0000000..86d2e4f
Binary files /dev/null and b/images/g020/debian_netinstall_iso_download.webp differ
diff --git a/images/g020/debian_vm_adding_network_device.png b/images/g020/debian_vm_adding_network_device.png
deleted file mode 100644
index 79a00b0..0000000
Binary files a/images/g020/debian_vm_adding_network_device.png and /dev/null differ
diff --git a/images/g020/debian_vm_adding_network_device.webp b/images/g020/debian_vm_adding_network_device.webp
new file mode 100644
index 0000000..e1caa80
Binary files /dev/null and b/images/g020/debian_vm_adding_network_device.webp differ
diff --git a/images/g020/debian_vm_adding_network_device_changing_bridge.png b/images/g020/debian_vm_adding_network_device_changing_bridge.png
deleted file mode 100644
index 4a3d539..0000000
Binary files a/images/g020/debian_vm_adding_network_device_changing_bridge.png and /dev/null differ
diff --git a/images/g020/debian_vm_adding_network_device_changing_bridge.webp b/images/g020/debian_vm_adding_network_device_changing_bridge.webp
new file mode 100644
index 0000000..d2badd0
Binary files /dev/null and b/images/g020/debian_vm_adding_network_device_changing_bridge.webp differ
diff --git a/images/g020/debian_vm_adding_network_device_choose_network_device.png b/images/g020/debian_vm_adding_network_device_choose_network_device.png
deleted file mode 100644
index b69f4df..0000000
Binary files a/images/g020/debian_vm_adding_network_device_choose_network_device.png and /dev/null differ
diff --git a/images/g020/debian_vm_adding_network_device_choose_network_device.webp b/images/g020/debian_vm_adding_network_device_choose_network_device.webp
new file mode 100644
index 0000000..83d5f2d
Binary files /dev/null and b/images/g020/debian_vm_adding_network_device_choose_network_device.webp differ
diff --git a/images/g020/debian_vm_adding_network_device_hardware_tab.png b/images/g020/debian_vm_adding_network_device_hardware_tab.png
deleted file mode 100644
index a1faa1d..0000000
Binary files a/images/g020/debian_vm_adding_network_device_hardware_tab.png and /dev/null differ
diff --git a/images/g020/debian_vm_adding_network_device_hardware_tab.webp b/images/g020/debian_vm_adding_network_device_hardware_tab.webp
new file mode 100644
index 0000000..0594fb2
Binary files /dev/null and b/images/g020/debian_vm_adding_network_device_hardware_tab.webp differ
diff --git a/images/g020/debian_vm_adding_network_device_hardware_updated.png b/images/g020/debian_vm_adding_network_device_hardware_updated.png
deleted file mode 100644
index 8464533..0000000
Binary files a/images/g020/debian_vm_adding_network_device_hardware_updated.png and /dev/null differ
diff --git a/images/g020/debian_vm_adding_network_device_hardware_updated.webp b/images/g020/debian_vm_adding_network_device_hardware_updated.webp
new file mode 100644
index 0000000..fffcfed
Binary files /dev/null and b/images/g020/debian_vm_adding_network_device_hardware_updated.webp differ
diff --git a/images/g020/debian_vm_create_vm_button.png b/images/g020/debian_vm_create_vm_button.png
deleted file mode 100644
index 6590239..0000000
Binary files a/images/g020/debian_vm_create_vm_button.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_button.webp b/images/g020/debian_vm_create_vm_button.webp
new file mode 100644
index 0000000..c13ce47
Binary files /dev/null and b/images/g020/debian_vm_create_vm_button.webp differ
diff --git a/images/g020/debian_vm_create_vm_confirm.png b/images/g020/debian_vm_create_vm_confirm.png
deleted file mode 100644
index 4c20a73..0000000
Binary files a/images/g020/debian_vm_create_vm_confirm.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_confirm.webp b/images/g020/debian_vm_create_vm_confirm.webp
new file mode 100644
index 0000000..4ffa4c8
Binary files /dev/null and b/images/g020/debian_vm_create_vm_confirm.webp differ
diff --git a/images/g020/debian_vm_create_vm_cpu_filled.png b/images/g020/debian_vm_create_vm_cpu_filled.png
deleted file mode 100644
index a41833c..0000000
Binary files a/images/g020/debian_vm_create_vm_cpu_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_cpu_filled.webp b/images/g020/debian_vm_create_vm_cpu_filled.webp
new file mode 100644
index 0000000..2bf5b0e
Binary files /dev/null and b/images/g020/debian_vm_create_vm_cpu_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_cpu_unfilled.png b/images/g020/debian_vm_create_vm_cpu_unfilled.png
deleted file mode 100644
index cfd2abf..0000000
Binary files a/images/g020/debian_vm_create_vm_cpu_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_cpu_unfilled.webp b/images/g020/debian_vm_create_vm_cpu_unfilled.webp
new file mode 100644
index 0000000..7174089
Binary files /dev/null and b/images/g020/debian_vm_create_vm_cpu_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_disk_filled.png b/images/g020/debian_vm_create_vm_disk_filled.png
deleted file mode 100644
index b11907e..0000000
Binary files a/images/g020/debian_vm_create_vm_disk_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_disk_filled.webp b/images/g020/debian_vm_create_vm_disk_filled.webp
new file mode 100644
index 0000000..78260bb
Binary files /dev/null and b/images/g020/debian_vm_create_vm_disk_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_disk_unfilled.png b/images/g020/debian_vm_create_vm_disk_unfilled.png
deleted file mode 100644
index 207960a..0000000
Binary files a/images/g020/debian_vm_create_vm_disk_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_disk_unfilled.webp b/images/g020/debian_vm_create_vm_disk_unfilled.webp
new file mode 100644
index 0000000..f67d4f1
Binary files /dev/null and b/images/g020/debian_vm_create_vm_disk_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_disk_unfilled_advanced.png b/images/g020/debian_vm_create_vm_disk_unfilled_advanced.png
deleted file mode 100644
index bdb1196..0000000
Binary files a/images/g020/debian_vm_create_vm_disk_unfilled_advanced.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_disk_unfilled_advanced.webp b/images/g020/debian_vm_create_vm_disk_unfilled_advanced.webp
new file mode 100644
index 0000000..6258569
Binary files /dev/null and b/images/g020/debian_vm_create_vm_disk_unfilled_advanced.webp differ
diff --git a/images/g020/debian_vm_create_vm_general_filled.png b/images/g020/debian_vm_create_vm_general_filled.png
deleted file mode 100644
index 2ddee5a..0000000
Binary files a/images/g020/debian_vm_create_vm_general_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_general_filled.webp b/images/g020/debian_vm_create_vm_general_filled.webp
new file mode 100644
index 0000000..96b145b
Binary files /dev/null and b/images/g020/debian_vm_create_vm_general_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_general_unfilled.png b/images/g020/debian_vm_create_vm_general_unfilled.png
deleted file mode 100644
index 38f4c11..0000000
Binary files a/images/g020/debian_vm_create_vm_general_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_general_unfilled.webp b/images/g020/debian_vm_create_vm_general_unfilled.webp
new file mode 100644
index 0000000..e4b3fd7
Binary files /dev/null and b/images/g020/debian_vm_create_vm_general_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_memory_filled.png b/images/g020/debian_vm_create_vm_memory_filled.png
deleted file mode 100644
index e94ae8a..0000000
Binary files a/images/g020/debian_vm_create_vm_memory_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_memory_filled.webp b/images/g020/debian_vm_create_vm_memory_filled.webp
new file mode 100644
index 0000000..2fa7266
Binary files /dev/null and b/images/g020/debian_vm_create_vm_memory_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_memory_unfilled.png b/images/g020/debian_vm_create_vm_memory_unfilled.png
deleted file mode 100644
index e8006c8..0000000
Binary files a/images/g020/debian_vm_create_vm_memory_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_memory_unfilled.webp b/images/g020/debian_vm_create_vm_memory_unfilled.webp
new file mode 100644
index 0000000..a8eb6d2
Binary files /dev/null and b/images/g020/debian_vm_create_vm_memory_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_network_unfilled.png b/images/g020/debian_vm_create_vm_network_unfilled.png
deleted file mode 100644
index 3e369b1..0000000
Binary files a/images/g020/debian_vm_create_vm_network_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_network_unfilled.webp b/images/g020/debian_vm_create_vm_network_unfilled.webp
new file mode 100644
index 0000000..dc93e1b
Binary files /dev/null and b/images/g020/debian_vm_create_vm_network_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_os_filled.png b/images/g020/debian_vm_create_vm_os_filled.png
deleted file mode 100644
index 85b33ec..0000000
Binary files a/images/g020/debian_vm_create_vm_os_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_os_filled.webp b/images/g020/debian_vm_create_vm_os_filled.webp
new file mode 100644
index 0000000..6ae8379
Binary files /dev/null and b/images/g020/debian_vm_create_vm_os_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_os_unfilled.png b/images/g020/debian_vm_create_vm_os_unfilled.png
deleted file mode 100644
index 63303ba..0000000
Binary files a/images/g020/debian_vm_create_vm_os_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_os_unfilled.webp b/images/g020/debian_vm_create_vm_os_unfilled.webp
new file mode 100644
index 0000000..0cc5beb
Binary files /dev/null and b/images/g020/debian_vm_create_vm_os_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_system_filled.png b/images/g020/debian_vm_create_vm_system_filled.png
deleted file mode 100644
index b8c1e3c..0000000
Binary files a/images/g020/debian_vm_create_vm_system_filled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_system_filled.webp b/images/g020/debian_vm_create_vm_system_filled.webp
new file mode 100644
index 0000000..87e012b
Binary files /dev/null and b/images/g020/debian_vm_create_vm_system_filled.webp differ
diff --git a/images/g020/debian_vm_create_vm_system_unfilled.png b/images/g020/debian_vm_create_vm_system_unfilled.png
deleted file mode 100644
index 10e9072..0000000
Binary files a/images/g020/debian_vm_create_vm_system_unfilled.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_system_unfilled.webp b/images/g020/debian_vm_create_vm_system_unfilled.webp
new file mode 100644
index 0000000..b645f3d
Binary files /dev/null and b/images/g020/debian_vm_create_vm_system_unfilled.webp differ
diff --git a/images/g020/debian_vm_create_vm_task_done.png b/images/g020/debian_vm_create_vm_task_done.png
deleted file mode 100644
index 6e6d335..0000000
Binary files a/images/g020/debian_vm_create_vm_task_done.png and /dev/null differ
diff --git a/images/g020/debian_vm_create_vm_task_done.webp b/images/g020/debian_vm_create_vm_task_done.webp
new file mode 100644
index 0000000..719a1e2
Binary files /dev/null and b/images/g020/debian_vm_create_vm_task_done.webp differ
diff --git a/images/g020/debian_vm_install_os_debian_login.png b/images/g020/debian_vm_install_os_debian_login.png
deleted file mode 100644
index b3163e9..0000000
Binary files a/images/g020/debian_vm_install_os_debian_login.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_debian_login.webp b/images/g020/debian_vm_install_os_debian_login.webp
new file mode 100644
index 0000000..614e82f
Binary files /dev/null and b/images/g020/debian_vm_install_os_debian_login.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_additional_media.png b/images/g020/debian_vm_install_os_installer_additional_media.png
deleted file mode 100644
index ce884a0..0000000
Binary files a/images/g020/debian_vm_install_os_installer_additional_media.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_additional_media.webp b/images/g020/debian_vm_install_os_installer_additional_media.webp
new file mode 100644
index 0000000..479b463
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_additional_media.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_base_system_installation.png b/images/g020/debian_vm_install_os_installer_base_system_installation.png
deleted file mode 100644
index 5c2b981..0000000
Binary files a/images/g020/debian_vm_install_os_installer_base_system_installation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_base_system_installation.webp b/images/g020/debian_vm_install_os_installer_base_system_installation.webp
new file mode 100644
index 0000000..0ad59b9
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_base_system_installation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_language.png b/images/g020/debian_vm_install_os_installer_choosing_language.png
deleted file mode 100644
index 65046db..0000000
Binary files a/images/g020/debian_vm_install_os_installer_choosing_language.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_language.webp b/images/g020/debian_vm_install_os_installer_choosing_language.webp
new file mode 100644
index 0000000..56eadda
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_choosing_language.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_locales.png b/images/g020/debian_vm_install_os_installer_choosing_locales.png
deleted file mode 100644
index adb7247..0000000
Binary files a/images/g020/debian_vm_install_os_installer_choosing_locales.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_locales.webp b/images/g020/debian_vm_install_os_installer_choosing_locales.webp
new file mode 100644
index 0000000..22fb7f3
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_choosing_locales.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_location.png b/images/g020/debian_vm_install_os_installer_choosing_location.png
deleted file mode 100644
index 67f4430..0000000
Binary files a/images/g020/debian_vm_install_os_installer_choosing_location.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_location.webp b/images/g020/debian_vm_install_os_installer_choosing_location.webp
new file mode 100644
index 0000000..910d2ba
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_choosing_location.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_timezone.png b/images/g020/debian_vm_install_os_installer_choosing_timezone.png
deleted file mode 100644
index 4107eb3..0000000
Binary files a/images/g020/debian_vm_install_os_installer_choosing_timezone.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_choosing_timezone.webp b/images/g020/debian_vm_install_os_installer_choosing_timezone.webp
new file mode 100644
index 0000000..971b751
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_choosing_timezone.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_keyboard.png b/images/g020/debian_vm_install_os_installer_configuring_keyboard.png
deleted file mode 100644
index a885fad..0000000
Binary files a/images/g020/debian_vm_install_os_installer_configuring_keyboard.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_keyboard.webp b/images/g020/debian_vm_install_os_installer_configuring_keyboard.webp
new file mode 100644
index 0000000..f51f975
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_configuring_keyboard.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.png b/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.png
deleted file mode 100644
index 411c67d..0000000
Binary files a/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.webp b/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.webp
new file mode 100644
index 0000000..a28b87f
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_configuring_network_choosing_network_device.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.png b/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.png
deleted file mode 100644
index cb166c9..0000000
Binary files a/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.webp b/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.webp
new file mode 100644
index 0000000..37db670
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_configuring_network_with_dhcp.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.png b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.png
deleted file mode 100644
index 35f97f9..0000000
Binary files a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.webp b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.webp
new file mode 100644
index 0000000..5eb1b67
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_full_name.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.png b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.png
deleted file mode 100644
index 9fac850..0000000
Binary files a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.webp b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.webp
new file mode 100644
index 0000000..269d81f
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.png b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.png
deleted file mode 100644
index 25ffb66..0000000
Binary files a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.webp b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.webp
new file mode 100644
index 0000000..4282a0a
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_password_confirmation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.png b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.png
deleted file mode 100644
index b369c74..0000000
Binary files a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.webp b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.webp
new file mode 100644
index 0000000..90b26e2
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_creating_admin_user_setting_username.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning.png b/images/g020/debian_vm_install_os_installer_disk_partitioning.png
deleted file mode 100644
index 6682fdb..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning.webp
new file mode 100644
index 0000000..c9417dd
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.png
deleted file mode 100644
index faa111c..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.webp
new file mode 100644
index 0000000..9b987b3
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_choose_drive.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.png
deleted file mode 100644
index edec01d..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.webp
new file mode 100644
index 0000000..0c08b97
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_confirmation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.png
deleted file mode 100644
index 6857b2b..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.webp
new file mode 100644
index 0000000..9045c4b
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_final_confirmation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_home_setup.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_home_setup.png
deleted file mode 100644
index 2c71641..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_home_setup.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.png
deleted file mode 100644
index 1f2386d..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.webp
new file mode 100644
index 0000000..c28ebc9
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_lvm_group_size.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_schema_setup.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_schema_setup.webp
new file mode 100644
index 0000000..05bd6e9
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_schema_setup.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.png b/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.png
deleted file mode 100644
index 46c1e15..0000000
Binary files a/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.webp b/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.webp
new file mode 100644
index 0000000..ead2ce5
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_disk_partitioning_second_option_chosen.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_domain_name_input.png b/images/g020/debian_vm_install_os_installer_domain_name_input.png
deleted file mode 100644
index 0d01f79..0000000
Binary files a/images/g020/debian_vm_install_os_installer_domain_name_input.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_domain_name_input.webp b/images/g020/debian_vm_install_os_installer_domain_name_input.webp
new file mode 100644
index 0000000..a59e86e
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_domain_name_input.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_executing_installation.png b/images/g020/debian_vm_install_os_installer_executing_installation.png
deleted file mode 100644
index 26f5f6d..0000000
Binary files a/images/g020/debian_vm_install_os_installer_executing_installation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_executing_installation.webp b/images/g020/debian_vm_install_os_installer_executing_installation.webp
new file mode 100644
index 0000000..b7f911e
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_executing_installation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_finishing_installation.png b/images/g020/debian_vm_install_os_installer_finishing_installation.png
deleted file mode 100644
index 6e40ccc..0000000
Binary files a/images/g020/debian_vm_install_os_installer_finishing_installation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_finishing_installation.webp b/images/g020/debian_vm_install_os_installer_finishing_installation.webp
new file mode 100644
index 0000000..465aedf
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_finishing_installation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader.png b/images/g020/debian_vm_install_os_installer_grub_boot_loader.png
deleted file mode 100644
index fabadc3..0000000
Binary files a/images/g020/debian_vm_install_os_installer_grub_boot_loader.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader.webp b/images/g020/debian_vm_install_os_installer_grub_boot_loader.webp
new file mode 100644
index 0000000..8142f43
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_grub_boot_loader.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.png b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.png
deleted file mode 100644
index 2be3fab..0000000
Binary files a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.webp b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.webp
new file mode 100644
index 0000000..b878a77
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.png b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.png
deleted file mode 100644
index 8a089bc..0000000
Binary files a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.webp b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.webp
new file mode 100644
index 0000000..5f24d86
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_grub_boot_loader_disk_sda_chosen.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_hostname_input.png b/images/g020/debian_vm_install_os_installer_hostname_input.png
deleted file mode 100644
index d605947..0000000
Binary files a/images/g020/debian_vm_install_os_installer_hostname_input.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_hostname_input.webp b/images/g020/debian_vm_install_os_installer_hostname_input.webp
new file mode 100644
index 0000000..507a636
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_hostname_input.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_install_option.png b/images/g020/debian_vm_install_os_installer_install_option.png
deleted file mode 100644
index acc3487..0000000
Binary files a/images/g020/debian_vm_install_os_installer_install_option.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_install_option.webp b/images/g020/debian_vm_install_os_installer_install_option.webp
new file mode 100644
index 0000000..e323a11
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_install_option.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_loading_components.png b/images/g020/debian_vm_install_os_installer_loading_components.png
deleted file mode 100644
index ba461cc..0000000
Binary files a/images/g020/debian_vm_install_os_installer_loading_components.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_loading_components.webp b/images/g020/debian_vm_install_os_installer_loading_components.webp
new file mode 100644
index 0000000..abe3d19
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_loading_components.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_menu.png b/images/g020/debian_vm_install_os_installer_menu.png
deleted file mode 100644
index ad301f9..0000000
Binary files a/images/g020/debian_vm_install_os_installer_menu.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_menu.webp b/images/g020/debian_vm_install_os_installer_menu.webp
new file mode 100644
index 0000000..64b055b
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_menu.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.png b/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.png
deleted file mode 100644
index 398b1e2..0000000
Binary files a/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.webp b/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.webp
new file mode 100644
index 0000000..833b381
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_package_manager_autoconfiguring_apt.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.png b/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.png
deleted file mode 100644
index 71086ae..0000000
Binary files a/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.webp b/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.webp
new file mode 100644
index 0000000..354d9cd
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_package_manager_choosing_mirror_server.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.png b/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.png
deleted file mode 100644
index 48e2459..0000000
Binary files a/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.webp b/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.webp
new file mode 100644
index 0000000..19475ca
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_package_manager_mirrors_location.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.png b/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.png
deleted file mode 100644
index cf28f08..0000000
Binary files a/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.webp b/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.webp
new file mode 100644
index 0000000..37d3a7d
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_package_manager_popularity_contest.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_proxy.png b/images/g020/debian_vm_install_os_installer_package_manager_proxy.png
deleted file mode 100644
index 83fe4e6..0000000
Binary files a/images/g020/debian_vm_install_os_installer_package_manager_proxy.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_package_manager_proxy.webp b/images/g020/debian_vm_install_os_installer_package_manager_proxy.webp
new file mode 100644
index 0000000..30e64b9
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_package_manager_proxy.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_remove_media.png b/images/g020/debian_vm_install_os_installer_remove_media.png
deleted file mode 100644
index f0478eb..0000000
Binary files a/images/g020/debian_vm_install_os_installer_remove_media.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_remove_media.webp b/images/g020/debian_vm_install_os_installer_remove_media.webp
new file mode 100644
index 0000000..651c362
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_remove_media.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_setting_root_password.png b/images/g020/debian_vm_install_os_installer_setting_root_password.png
deleted file mode 100644
index e90a1c8..0000000
Binary files a/images/g020/debian_vm_install_os_installer_setting_root_password.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_setting_root_password.webp b/images/g020/debian_vm_install_os_installer_setting_root_password.webp
new file mode 100644
index 0000000..acee85f
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_setting_root_password.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.png b/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.png
deleted file mode 100644
index ba5d708..0000000
Binary files a/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.webp b/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.webp
new file mode 100644
index 0000000..3dfc13f
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_setting_root_password_confirmation.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_software_selection.png b/images/g020/debian_vm_install_os_installer_software_selection.png
deleted file mode 100644
index 910b260..0000000
Binary files a/images/g020/debian_vm_install_os_installer_software_selection.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_software_selection.webp b/images/g020/debian_vm_install_os_installer_software_selection.webp
new file mode 100644
index 0000000..077d97f
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_software_selection.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_software_selection_changed.png b/images/g020/debian_vm_install_os_installer_software_selection_changed.png
deleted file mode 100644
index 334b54a..0000000
Binary files a/images/g020/debian_vm_install_os_installer_software_selection_changed.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_installer_software_selection_changed.webp b/images/g020/debian_vm_install_os_installer_software_selection_changed.webp
new file mode 100644
index 0000000..0baf6d1
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_software_selection_changed.webp differ
diff --git a/images/g020/debian_vm_install_os_installer_time_server.webp b/images/g020/debian_vm_install_os_installer_time_server.webp
new file mode 100644
index 0000000..457bc87
Binary files /dev/null and b/images/g020/debian_vm_install_os_installer_time_server.webp differ
diff --git a/images/g020/debian_vm_install_os_start_button.png b/images/g020/debian_vm_install_os_start_button.png
deleted file mode 100644
index 7019bec..0000000
Binary files a/images/g020/debian_vm_install_os_start_button.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_start_button.webp b/images/g020/debian_vm_install_os_start_button.webp
new file mode 100644
index 0000000..548af1c
Binary files /dev/null and b/images/g020/debian_vm_install_os_start_button.webp differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.png b/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.png
deleted file mode 100644
index f7d4244..0000000
Binary files a/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.webp b/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.webp
new file mode 100644
index 0000000..b671996
Binary files /dev/null and b/images/g020/debian_vm_install_os_vm_hardware_tab_cdrom_updated.webp differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.png b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.png
deleted file mode 100644
index 865ca50..0000000
Binary files a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.webp b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.webp
new file mode 100644
index 0000000..aae4f87
Binary files /dev/null and b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_button.webp differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.png b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.png
deleted file mode 100644
index 60e5575..0000000
Binary files a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.webp b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.webp
new file mode 100644
index 0000000..6f3e023
Binary files /dev/null and b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window.webp differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.png b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.png
deleted file mode 100644
index 0aa5dec..0000000
Binary files a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.webp b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.webp
new file mode 100644
index 0000000..0887c12
Binary files /dev/null and b/images/g020/debian_vm_install_os_vm_hardware_tab_edit_window_changed.webp differ
diff --git a/images/g020/debian_vm_install_os_vm_rebooted_grub.png b/images/g020/debian_vm_install_os_vm_rebooted_grub.png
deleted file mode 100644
index 87f5644..0000000
Binary files a/images/g020/debian_vm_install_os_vm_rebooted_grub.png and /dev/null differ
diff --git a/images/g020/debian_vm_install_os_vm_rebooted_grub.webp b/images/g020/debian_vm_install_os_vm_rebooted_grub.webp
new file mode 100644
index 0000000..e579310
Binary files /dev/null and b/images/g020/debian_vm_install_os_vm_rebooted_grub.webp differ
diff --git a/images/g020/debian_vm_note_option_boot_order_edit_window.png b/images/g020/debian_vm_note_option_boot_order_edit_window.png
deleted file mode 100644
index 35b7ae5..0000000
Binary files a/images/g020/debian_vm_note_option_boot_order_edit_window.png and /dev/null differ
diff --git a/images/g020/debian_vm_note_option_boot_order_edit_window.webp b/images/g020/debian_vm_note_option_boot_order_edit_window.webp
new file mode 100644
index 0000000..5350b08
Binary files /dev/null and b/images/g020/debian_vm_note_option_boot_order_edit_window.webp differ
diff --git a/images/g020/debian_vm_note_option_boot_order_options_tab.png b/images/g020/debian_vm_note_option_boot_order_options_tab.png
deleted file mode 100644
index 5b95e13..0000000
Binary files a/images/g020/debian_vm_note_option_boot_order_options_tab.png and /dev/null differ
diff --git a/images/g020/debian_vm_note_option_boot_order_options_tab.webp b/images/g020/debian_vm_note_option_boot_order_options_tab.webp
new file mode 100644
index 0000000..aa8b4b0
Binary files /dev/null and b/images/g020/debian_vm_note_option_boot_order_options_tab.webp differ
diff --git a/images/g020/pve_templates_storage_images_list_empty.png b/images/g020/pve_templates_storage_images_list_empty.png
deleted file mode 100644
index 4d60ca4..0000000
Binary files a/images/g020/pve_templates_storage_images_list_empty.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_images_list_empty.webp b/images/g020/pve_templates_storage_images_list_empty.webp
new file mode 100644
index 0000000..6f82738
Binary files /dev/null and b/images/g020/pve_templates_storage_images_list_empty.webp differ
diff --git a/images/g020/pve_templates_storage_images_list_updated.png b/images/g020/pve_templates_storage_images_list_updated.png
deleted file mode 100644
index ca3bd6a..0000000
Binary files a/images/g020/pve_templates_storage_images_list_updated.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_images_list_updated.webp b/images/g020/pve_templates_storage_images_list_updated.webp
new file mode 100644
index 0000000..96df6cd
Binary files /dev/null and b/images/g020/pve_templates_storage_images_list_updated.webp differ
diff --git a/images/g020/pve_templates_storage_iso_upload_copy_task_ok.webp b/images/g020/pve_templates_storage_iso_upload_copy_task_ok.webp
new file mode 100644
index 0000000..ea90393
Binary files /dev/null and b/images/g020/pve_templates_storage_iso_upload_copy_task_ok.webp differ
diff --git a/images/g020/pve_templates_storage_iso_upload_dialog.png b/images/g020/pve_templates_storage_iso_upload_dialog.png
deleted file mode 100644
index 41ff899..0000000
Binary files a/images/g020/pve_templates_storage_iso_upload_dialog.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_iso_upload_dialog.webp b/images/g020/pve_templates_storage_iso_upload_dialog.webp
new file mode 100644
index 0000000..a271973
Binary files /dev/null and b/images/g020/pve_templates_storage_iso_upload_dialog.webp differ
diff --git a/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.png b/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.png
deleted file mode 100644
index e423e56..0000000
Binary files a/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.webp b/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.webp
new file mode 100644
index 0000000..5189e74
Binary files /dev/null and b/images/g020/pve_templates_storage_iso_upload_dialog_in_progress.webp differ
diff --git a/images/g020/pve_templates_storage_summary.png b/images/g020/pve_templates_storage_summary.png
deleted file mode 100644
index 7f21484..0000000
Binary files a/images/g020/pve_templates_storage_summary.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_summary.webp b/images/g020/pve_templates_storage_summary.webp
new file mode 100644
index 0000000..6ebafba
Binary files /dev/null and b/images/g020/pve_templates_storage_summary.webp differ
diff --git a/images/g020/pve_templates_storage_summary_updated.png b/images/g020/pve_templates_storage_summary_updated.png
deleted file mode 100644
index e03b715..0000000
Binary files a/images/g020/pve_templates_storage_summary_updated.png and /dev/null differ
diff --git a/images/g020/pve_templates_storage_summary_updated.webp b/images/g020/pve_templates_storage_summary_updated.webp
new file mode 100644
index 0000000..5571e5e
Binary files /dev/null and b/images/g020/pve_templates_storage_summary_updated.webp differ
diff --git a/images/g021/pve_qemu_guest_agent_web_console_connects.png b/images/g021/pve_qemu_guest_agent_web_console_connects.png
deleted file mode 100644
index f555cfd..0000000
Binary files a/images/g021/pve_qemu_guest_agent_web_console_connects.png and /dev/null differ
diff --git a/images/g021/pve_qemu_guest_agent_web_console_connects.webp b/images/g021/pve_qemu_guest_agent_web_console_connects.webp
new file mode 100644
index 0000000..aa191ca
Binary files /dev/null and b/images/g021/pve_qemu_guest_agent_web_console_connects.webp differ
diff --git a/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.png b/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.png
deleted file mode 100644
index a613b86..0000000
Binary files a/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.png and /dev/null differ
diff --git a/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.webp b/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.webp
new file mode 100644
index 0000000..960c1c1
Binary files /dev/null and b/images/g021/pve_qemu_guest_agent_web_console_connects_show_more.webp differ
diff --git a/images/g022/pve_datacenter_firewall_alias_empty.png b/images/g022/pve_datacenter_firewall_alias_empty.png
deleted file mode 100644
index 854d7b5..0000000
Binary files a/images/g022/pve_datacenter_firewall_alias_empty.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_alias_empty.webp b/images/g022/pve_datacenter_firewall_alias_empty.webp
new file mode 100644
index 0000000..fe1c35b
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_alias_empty.webp differ
diff --git a/images/g022/pve_datacenter_firewall_alias_form_empty.png b/images/g022/pve_datacenter_firewall_alias_form_empty.png
deleted file mode 100644
index adad31e..0000000
Binary files a/images/g022/pve_datacenter_firewall_alias_form_empty.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_alias_form_empty.webp b/images/g022/pve_datacenter_firewall_alias_form_empty.webp
new file mode 100644
index 0000000..cdd4609
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_alias_form_empty.webp differ
diff --git a/images/g022/pve_datacenter_firewall_alias_form_filled.png b/images/g022/pve_datacenter_firewall_alias_form_filled.png
deleted file mode 100644
index e7f7b1b..0000000
Binary files a/images/g022/pve_datacenter_firewall_alias_form_filled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_alias_form_filled.webp b/images/g022/pve_datacenter_firewall_alias_form_filled.webp
new file mode 100644
index 0000000..f07129e
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_alias_form_filled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_alias_updated_list.png b/images/g022/pve_datacenter_firewall_alias_updated_list.png
deleted file mode 100644
index fd56a0d..0000000
Binary files a/images/g022/pve_datacenter_firewall_alias_updated_list.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_alias_updated_list.webp b/images/g022/pve_datacenter_firewall_alias_updated_list.webp
new file mode 100644
index 0000000..48d6e46
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_alias_updated_list.webp differ
diff --git a/images/g022/pve_datacenter_firewall_empty.png b/images/g022/pve_datacenter_firewall_empty.png
deleted file mode 100644
index 9505b3b..0000000
Binary files a/images/g022/pve_datacenter_firewall_empty.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_empty.webp b/images/g022/pve_datacenter_firewall_empty.webp
new file mode 100644
index 0000000..63cf915
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_empty.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_add_ip_form.png b/images/g022/pve_datacenter_firewall_ipset_add_ip_form.png
deleted file mode 100644
index 570690a..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_add_ip_form.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_add_ip_form.webp b/images/g022/pve_datacenter_firewall_ipset_add_ip_form.webp
new file mode 100644
index 0000000..6dad0cf
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_add_ip_form.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.png b/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.png
deleted file mode 100644
index f616b36..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.webp b/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.webp
new file mode 100644
index 0000000..c353776
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_add_ip_form_filled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_create_form.webp b/images/g022/pve_datacenter_firewall_ipset_create_form.webp
new file mode 100644
index 0000000..4cfb4c9
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_create_form.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_create_form_filled.webp b/images/g022/pve_datacenter_firewall_ipset_create_form_filled.webp
new file mode 100644
index 0000000..a213ece
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_create_form_filled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_create_window.png b/images/g022/pve_datacenter_firewall_ipset_create_window.png
deleted file mode 100644
index 4b7f278..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_create_window.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_create_window_filled.png b/images/g022/pve_datacenter_firewall_ipset_create_window_filled.png
deleted file mode 100644
index 6efc1a3..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_create_window_filled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_created.png b/images/g022/pve_datacenter_firewall_ipset_created.png
deleted file mode 100644
index be76463..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_created.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_created.webp b/images/g022/pve_datacenter_firewall_ipset_created.webp
new file mode 100644
index 0000000..6351a21
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_created.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_empty.png b/images/g022/pve_datacenter_firewall_ipset_empty.png
deleted file mode 100644
index 44c88d0..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_empty.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_empty.webp b/images/g022/pve_datacenter_firewall_ipset_empty.webp
new file mode 100644
index 0000000..aaf7e32
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_empty.webp differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.png b/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.png
deleted file mode 100644
index b713470..0000000
Binary files a/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.webp b/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.webp
new file mode 100644
index 0000000..c6c0e47
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_ipset_new_ip_listed.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_editor.png b/images/g022/pve_datacenter_firewall_security_group_editor.png
deleted file mode 100644
index 52f0e61..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_editor.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_editor.webp b/images/g022/pve_datacenter_firewall_security_group_editor.webp
new file mode 100644
index 0000000..df604f2
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_editor.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_editor_filled.png b/images/g022/pve_datacenter_firewall_security_group_editor_filled.png
deleted file mode 100644
index c994b1e..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_editor_filled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_editor_filled.webp b/images/g022/pve_datacenter_firewall_security_group_editor_filled.webp
new file mode 100644
index 0000000..b83f611
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_editor_filled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_empty.png b/images/g022/pve_datacenter_firewall_security_group_empty.png
deleted file mode 100644
index a2ac2bc..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_empty.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_empty.webp b/images/g022/pve_datacenter_firewall_security_group_empty.webp
new file mode 100644
index 0000000..98a6913
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_empty.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_disabled.png b/images/g022/pve_datacenter_firewall_security_group_rule_disabled.png
deleted file mode 100644
index 719686d..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_disabled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_disabled.webp b/images/g022/pve_datacenter_firewall_security_group_rule_disabled.webp
new file mode 100644
index 0000000..0f349ba
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_disabled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor.png b/images/g022/pve_datacenter_firewall_security_group_rule_editor.png
deleted file mode 100644
index 3e555c4..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_editor.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor.webp b/images/g022/pve_datacenter_firewall_security_group_rule_editor.webp
new file mode 100644
index 0000000..bcc1b15
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_editor.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.png b/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.png
deleted file mode 100644
index b5d17d8..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.webp b/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.webp
new file mode 100644
index 0000000..0a813d7
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_editor_filled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.png b/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.png
deleted file mode 100644
index 5f17a26..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.webp b/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.webp
new file mode 100644
index 0000000..c1c0026
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_editor_source_list.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_enabled.png b/images/g022/pve_datacenter_firewall_security_group_rule_enabled.png
deleted file mode 100644
index 50b2391..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_enabled.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_enabled.webp b/images/g022/pve_datacenter_firewall_security_group_rule_enabled.webp
new file mode 100644
index 0000000..7ee64fa
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_enabled.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.png b/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.png
deleted file mode 100644
index e943cc1..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.webp b/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.webp
new file mode 100644
index 0000000..10ca909
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_rule_list_updated.webp differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_updated_list.png b/images/g022/pve_datacenter_firewall_security_group_updated_list.png
deleted file mode 100644
index 2eae732..0000000
Binary files a/images/g022/pve_datacenter_firewall_security_group_updated_list.png and /dev/null differ
diff --git a/images/g022/pve_datacenter_firewall_security_group_updated_list.webp b/images/g022/pve_datacenter_firewall_security_group_updated_list.webp
new file mode 100644
index 0000000..76bbb42
Binary files /dev/null and b/images/g022/pve_datacenter_firewall_security_group_updated_list.webp differ
diff --git a/images/g022/pve_node_firewall_rule_security_group_editor.png b/images/g022/pve_node_firewall_rule_security_group_editor.png
deleted file mode 100644
index 618b1ca..0000000
Binary files a/images/g022/pve_node_firewall_rule_security_group_editor.png and /dev/null differ
diff --git a/images/g022/pve_node_firewall_rule_security_group_editor.webp b/images/g022/pve_node_firewall_rule_security_group_editor.webp
new file mode 100644
index 0000000..10a5df0
Binary files /dev/null and b/images/g022/pve_node_firewall_rule_security_group_editor.webp differ
diff --git a/images/g022/pve_node_firewall_rule_security_group_editor_filled.png b/images/g022/pve_node_firewall_rule_security_group_editor_filled.png
deleted file mode 100644
index a160d23..0000000
Binary files a/images/g022/pve_node_firewall_rule_security_group_editor_filled.png and /dev/null differ
diff --git a/images/g022/pve_node_firewall_rule_security_group_editor_filled.webp b/images/g022/pve_node_firewall_rule_security_group_editor_filled.webp
new file mode 100644
index 0000000..b29b2ce
Binary files /dev/null and b/images/g022/pve_node_firewall_rule_security_group_editor_filled.webp differ
diff --git a/images/g022/pve_node_firewall_ruleless.png b/images/g022/pve_node_firewall_ruleless.png
deleted file mode 100644
index efc5bfc..0000000
Binary files a/images/g022/pve_node_firewall_ruleless.png and /dev/null differ
diff --git a/images/g022/pve_node_firewall_ruleless.webp b/images/g022/pve_node_firewall_ruleless.webp
new file mode 100644
index 0000000..8cced11
Binary files /dev/null and b/images/g022/pve_node_firewall_ruleless.webp differ
diff --git a/images/g022/pve_node_firewall_rules_updated.png b/images/g022/pve_node_firewall_rules_updated.png
deleted file mode 100644
index 7d795c8..0000000
Binary files a/images/g022/pve_node_firewall_rules_updated.png and /dev/null differ
diff --git a/images/g022/pve_node_firewall_rules_updated.webp b/images/g022/pve_node_firewall_rules_updated.webp
new file mode 100644
index 0000000..673465a
Binary files /dev/null and b/images/g022/pve_node_firewall_rules_updated.webp differ
diff --git a/images/g022/pve_vm_summary_status_ip.png b/images/g022/pve_vm_summary_status_ip.png
deleted file mode 100644
index 8c87588..0000000
Binary files a/images/g022/pve_vm_summary_status_ip.png and /dev/null differ
diff --git a/images/g022/pve_vm_summary_status_ip.webp b/images/g022/pve_vm_summary_status_ip.webp
new file mode 100644
index 0000000..336b729
Binary files /dev/null and b/images/g022/pve_vm_summary_status_ip.webp differ
diff --git a/images/g023/pve_vm_more_convert_to_template_confirmation.png b/images/g023/pve_vm_more_convert_to_template_confirmation.png
deleted file mode 100644
index 17ead15..0000000
Binary files a/images/g023/pve_vm_more_convert_to_template_confirmation.png and /dev/null differ
diff --git a/images/g023/pve_vm_more_convert_to_template_confirmation.webp b/images/g023/pve_vm_more_convert_to_template_confirmation.webp
new file mode 100644
index 0000000..113e6a0
Binary files /dev/null and b/images/g023/pve_vm_more_convert_to_template_confirmation.webp differ
diff --git a/images/g023/pve_vm_more_convert_to_template_option.png b/images/g023/pve_vm_more_convert_to_template_option.png
deleted file mode 100644
index 6a56e87..0000000
Binary files a/images/g023/pve_vm_more_convert_to_template_option.png and /dev/null differ
diff --git a/images/g023/pve_vm_more_convert_to_template_option.webp b/images/g023/pve_vm_more_convert_to_template_option.webp
new file mode 100644
index 0000000..1ca084f
Binary files /dev/null and b/images/g023/pve_vm_more_convert_to_template_option.webp differ
diff --git a/images/g023/pve_vm_shutdown_button.png b/images/g023/pve_vm_shutdown_button.png
deleted file mode 100644
index 1f8b724..0000000
Binary files a/images/g023/pve_vm_shutdown_button.png and /dev/null differ
diff --git a/images/g023/pve_vm_shutdown_button.webp b/images/g023/pve_vm_shutdown_button.webp
new file mode 100644
index 0000000..dbd56d1
Binary files /dev/null and b/images/g023/pve_vm_shutdown_button.webp differ
diff --git a/images/g023/pve_vm_shutdown_completed.png b/images/g023/pve_vm_shutdown_completed.png
deleted file mode 100644
index 348955c..0000000
Binary files a/images/g023/pve_vm_shutdown_completed.png and /dev/null differ
diff --git a/images/g023/pve_vm_shutdown_completed.webp b/images/g023/pve_vm_shutdown_completed.webp
new file mode 100644
index 0000000..de78ba7
Binary files /dev/null and b/images/g023/pve_vm_shutdown_completed.webp differ
diff --git a/images/g023/pve_vm_shutdown_confirmation.png b/images/g023/pve_vm_shutdown_confirmation.png
deleted file mode 100644
index 706e4cc..0000000
Binary files a/images/g023/pve_vm_shutdown_confirmation.png and /dev/null differ
diff --git a/images/g023/pve_vm_shutdown_confirmation.webp b/images/g023/pve_vm_shutdown_confirmation.webp
new file mode 100644
index 0000000..3515eef
Binary files /dev/null and b/images/g023/pve_vm_shutdown_confirmation.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.png b/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.png
deleted file mode 100644
index 4f4787b..0000000
Binary files a/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.webp b/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.webp
new file mode 100644
index 0000000..f1a565a
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_bkp_buttons_enabled.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm.png b/images/g023/pve_vm_template_backup_view_bkp_restore_vm.png
deleted file mode 100644
index 1f8f6b8..0000000
Binary files a/images/g023/pve_vm_template_backup_view_bkp_restore_vm.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm.webp b/images/g023/pve_vm_template_backup_view_bkp_restore_vm.webp
new file mode 100644
index 0000000..bc7b43f
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_bkp_restore_vm.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.png b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.png
deleted file mode 100644
index 1b6194d..0000000
Binary files a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.webp b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.webp
new file mode 100644
index 0000000..2b6f0dc
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_confirmation.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.png b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.png
deleted file mode 100644
index 99947cf..0000000
Binary files a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.webp b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.webp
new file mode 100644
index 0000000..12ae1d5
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_bkp_restore_vm_progress.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_show_configuration.png b/images/g023/pve_vm_template_backup_view_bkp_show_configuration.png
deleted file mode 100644
index 31d99ff..0000000
Binary files a/images/g023/pve_vm_template_backup_view_bkp_show_configuration.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_bkp_show_configuration.webp b/images/g023/pve_vm_template_backup_view_bkp_show_configuration.webp
new file mode 100644
index 0000000..1e4e0c7
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_bkp_show_configuration.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_empty.png b/images/g023/pve_vm_template_backup_view_empty.png
deleted file mode 100644
index 3178294..0000000
Binary files a/images/g023/pve_vm_template_backup_view_empty.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_empty.webp b/images/g023/pve_vm_template_backup_view_empty.webp
new file mode 100644
index 0000000..f2562d9
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_empty.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp.png b/images/g023/pve_vm_template_backup_view_new_bkp.png
deleted file mode 100644
index 96ba2c8..0000000
Binary files a/images/g023/pve_vm_template_backup_view_new_bkp.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp.webp b/images/g023/pve_vm_template_backup_view_new_bkp.webp
new file mode 100644
index 0000000..feecc28
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_new_bkp.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_done.png b/images/g023/pve_vm_template_backup_view_new_bkp_done.png
deleted file mode 100644
index 1310545..0000000
Binary files a/images/g023/pve_vm_template_backup_view_new_bkp_done.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_done.webp b/images/g023/pve_vm_template_backup_view_new_bkp_done.webp
new file mode 100644
index 0000000..84e7210
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_new_bkp_done.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.png b/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.png
deleted file mode 100644
index fcd9c6e..0000000
Binary files a/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.webp b/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.webp
new file mode 100644
index 0000000..0611b53
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_new_bkp_mode_stop.webp differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_progress.png b/images/g023/pve_vm_template_backup_view_new_bkp_progress.png
deleted file mode 100644
index 620d5ac..0000000
Binary files a/images/g023/pve_vm_template_backup_view_new_bkp_progress.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_backup_view_new_bkp_progress.webp b/images/g023/pve_vm_template_backup_view_new_bkp_progress.webp
new file mode 100644
index 0000000..6c5743b
Binary files /dev/null and b/images/g023/pve_vm_template_backup_view_new_bkp_progress.webp differ
diff --git a/images/g023/pve_vm_template_summary_notes_editor_window.png b/images/g023/pve_vm_template_summary_notes_editor_window.png
deleted file mode 100644
index 211453a..0000000
Binary files a/images/g023/pve_vm_template_summary_notes_editor_window.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_summary_notes_editor_window.webp b/images/g023/pve_vm_template_summary_notes_editor_window.webp
new file mode 100644
index 0000000..914c936
Binary files /dev/null and b/images/g023/pve_vm_template_summary_notes_editor_window.webp differ
diff --git a/images/g023/pve_vm_template_summary_notes_gear_icon.png b/images/g023/pve_vm_template_summary_notes_gear_icon.png
deleted file mode 100644
index 43a703d..0000000
Binary files a/images/g023/pve_vm_template_summary_notes_gear_icon.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_summary_notes_gear_icon.webp b/images/g023/pve_vm_template_summary_notes_gear_icon.webp
new file mode 100644
index 0000000..4e777ab
Binary files /dev/null and b/images/g023/pve_vm_template_summary_notes_gear_icon.webp differ
diff --git a/images/g023/pve_vm_template_summary_view.png b/images/g023/pve_vm_template_summary_view.png
deleted file mode 100644
index c313db9..0000000
Binary files a/images/g023/pve_vm_template_summary_view.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_summary_view.webp b/images/g023/pve_vm_template_summary_view.webp
new file mode 100644
index 0000000..4b65111
Binary files /dev/null and b/images/g023/pve_vm_template_summary_view.webp differ
diff --git a/images/g023/pve_vm_template_summary_view_notes_updated.png b/images/g023/pve_vm_template_summary_view_notes_updated.png
deleted file mode 100644
index 0b8e2e6..0000000
Binary files a/images/g023/pve_vm_template_summary_view_notes_updated.png and /dev/null differ
diff --git a/images/g023/pve_vm_template_summary_view_notes_updated.webp b/images/g023/pve_vm_template_summary_view_notes_updated.webp
new file mode 100644
index 0000000..e48a789
Binary files /dev/null and b/images/g023/pve_vm_template_summary_view_notes_updated.webp differ
diff --git a/images/g024/pve_node_k3snodetpl_ips_highlighted.png b/images/g024/pve_node_k3snodetpl_ips_highlighted.png
deleted file mode 100644
index ad2f92f..0000000
Binary files a/images/g024/pve_node_k3snodetpl_ips_highlighted.png and /dev/null differ
diff --git a/images/g024/pve_node_k3snodetpl_ips_highlighted.webp b/images/g024/pve_node_k3snodetpl_ips_highlighted.webp
new file mode 100644
index 0000000..405eb23
Binary files /dev/null and b/images/g024/pve_node_k3snodetpl_ips_highlighted.webp differ
diff --git a/images/g024/pve_node_new_vm_network_devices.png b/images/g024/pve_node_new_vm_network_devices.png
deleted file mode 100644
index b93eb91..0000000
Binary files a/images/g024/pve_node_new_vm_network_devices.png and /dev/null differ
diff --git a/images/g024/pve_node_new_vm_network_devices.webp b/images/g024/pve_node_new_vm_network_devices.webp
new file mode 100644
index 0000000..c99ff8c
Binary files /dev/null and b/images/g024/pve_node_new_vm_network_devices.webp differ
diff --git a/images/g024/pve_node_template_backup_button.png b/images/g024/pve_node_template_backup_button.png
deleted file mode 100644
index f891692..0000000
Binary files a/images/g024/pve_node_template_backup_button.png and /dev/null differ
diff --git a/images/g024/pve_node_template_backup_button.webp b/images/g024/pve_node_template_backup_button.webp
new file mode 100644
index 0000000..9ea82d1
Binary files /dev/null and b/images/g024/pve_node_template_backup_button.webp differ
diff --git a/images/g024/pve_node_template_clone_window.png b/images/g024/pve_node_template_clone_window.png
deleted file mode 100644
index 7362370..0000000
Binary files a/images/g024/pve_node_template_clone_window.png and /dev/null differ
diff --git a/images/g024/pve_node_template_clone_window.webp b/images/g024/pve_node_template_clone_window.webp
new file mode 100644
index 0000000..db8ea2f
Binary files /dev/null and b/images/g024/pve_node_template_clone_window.webp differ
diff --git a/images/g024/pve_node_template_clone_window_filled.png b/images/g024/pve_node_template_clone_window_filled.png
deleted file mode 100644
index 83e065e..0000000
Binary files a/images/g024/pve_node_template_clone_window_filled.png and /dev/null differ
diff --git a/images/g024/pve_node_template_clone_window_filled.webp b/images/g024/pve_node_template_clone_window_filled.webp
new file mode 100644
index 0000000..80895f3
Binary files /dev/null and b/images/g024/pve_node_template_clone_window_filled.webp differ
diff --git a/images/g024/pve_node_template_full_clone_done.png b/images/g024/pve_node_template_full_clone_done.png
deleted file mode 100644
index cd7634e..0000000
Binary files a/images/g024/pve_node_template_full_clone_done.png and /dev/null differ
diff --git a/images/g024/pve_node_template_full_clone_done.webp b/images/g024/pve_node_template_full_clone_done.webp
new file mode 100644
index 0000000..06f824d
Binary files /dev/null and b/images/g024/pve_node_template_full_clone_done.webp differ
diff --git a/images/g024/pve_node_template_full_clone_progress.png b/images/g024/pve_node_template_full_clone_progress.png
deleted file mode 100644
index cd33d68..0000000
Binary files a/images/g024/pve_node_template_full_clone_progress.png and /dev/null differ
diff --git a/images/g024/pve_node_template_full_clone_progress.webp b/images/g024/pve_node_template_full_clone_progress.webp
new file mode 100644
index 0000000..fef5c8f
Binary files /dev/null and b/images/g024/pve_node_template_full_clone_progress.webp differ
diff --git a/images/g024/pve_node_template_more_clone_option.png b/images/g024/pve_node_template_more_clone_option.png
deleted file mode 100644
index 7f82f89..0000000
Binary files a/images/g024/pve_node_template_more_clone_option.png and /dev/null differ
diff --git a/images/g024/pve_node_template_more_clone_option.webp b/images/g024/pve_node_template_more_clone_option.webp
new file mode 100644
index 0000000..1286d2d
Binary files /dev/null and b/images/g024/pve_node_template_more_clone_option.webp differ
diff --git a/images/g024/pve_node_template_more_convert_to_template_option.png b/images/g024/pve_node_template_more_convert_to_template_option.png
deleted file mode 100644
index 4d8588a..0000000
Binary files a/images/g024/pve_node_template_more_convert_to_template_option.png and /dev/null differ
diff --git a/images/g024/pve_node_template_more_convert_to_template_option.webp b/images/g024/pve_node_template_more_convert_to_template_option.webp
new file mode 100644
index 0000000..355d3f6
Binary files /dev/null and b/images/g024/pve_node_template_more_convert_to_template_option.webp differ
diff --git a/images/g024/pve_vm_options_protection.png b/images/g024/pve_vm_options_protection.png
deleted file mode 100644
index a26de01..0000000
Binary files a/images/g024/pve_vm_options_protection.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_alias_updated.png b/images/g025/pve_datacenter_firewall_alias_updated.png
deleted file mode 100644
index 51b1d64..0000000
Binary files a/images/g025/pve_datacenter_firewall_alias_updated.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_alias_updated.webp b/images/g025/pve_datacenter_firewall_alias_updated.webp
new file mode 100644
index 0000000..abf1f71
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_alias_updated.webp differ
diff --git a/images/g025/pve_datacenter_firewall_alias_updated_local_network.png b/images/g025/pve_datacenter_firewall_alias_updated_local_network.png
deleted file mode 100644
index 1301208..0000000
Binary files a/images/g025/pve_datacenter_firewall_alias_updated_local_network.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_alias_updated_local_network.webp b/images/g025/pve_datacenter_firewall_alias_updated_local_network.webp
new file mode 100644
index 0000000..6c3704e
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_alias_updated_local_network.webp differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.png b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.png
deleted file mode 100644
index de14449..0000000
Binary files a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.webp b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.webp
new file mode 100644
index 0000000..6807e16
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal.webp differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.png b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.png
deleted file mode 100644
index 8161ddc..0000000
Binary files a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.webp b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.webp
new file mode 100644
index 0000000..1c93014
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_ipset_debiantpl_net0_removal_confirm.webp differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_edited.png b/images/g025/pve_datacenter_firewall_ipset_edited.png
deleted file mode 100644
index 414178b..0000000
Binary files a/images/g025/pve_datacenter_firewall_ipset_edited.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_edited.webp b/images/g025/pve_datacenter_firewall_ipset_edited.webp
new file mode 100644
index 0000000..ada70e7
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_ipset_edited.webp differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_initial.png b/images/g025/pve_datacenter_firewall_ipset_initial.png
deleted file mode 100644
index 4c40a82..0000000
Binary files a/images/g025/pve_datacenter_firewall_ipset_initial.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_initial.webp b/images/g025/pve_datacenter_firewall_ipset_initial.webp
new file mode 100644
index 0000000..4521d4b
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_ipset_initial.webp differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_local_network.png b/images/g025/pve_datacenter_firewall_ipset_local_network.png
deleted file mode 100644
index e9b6bd9..0000000
Binary files a/images/g025/pve_datacenter_firewall_ipset_local_network.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_ipset_local_network.webp b/images/g025/pve_datacenter_firewall_ipset_local_network.webp
new file mode 100644
index 0000000..7496b68
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_ipset_local_network.webp differ
diff --git a/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.png b/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.png
deleted file mode 100644
index e9d1cc3..0000000
Binary files a/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.webp b/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.webp
new file mode 100644
index 0000000..e3b2c41
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_security_group_k3s_agents_accept_in_net0.webp differ
diff --git a/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.png b/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.png
deleted file mode 100644
index 079f682..0000000
Binary files a/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.png and /dev/null differ
diff --git a/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.webp b/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.webp
new file mode 100644
index 0000000..0163a2e
Binary files /dev/null and b/images/g025/pve_datacenter_firewall_security_group_k3s_servers_accept_in_net0.webp differ
diff --git a/images/g025/pve_k3server01_firewall_inserted_security_group_net0.png b/images/g025/pve_k3server01_firewall_inserted_security_group_net0.png
deleted file mode 100644
index 6e48634..0000000
Binary files a/images/g025/pve_k3server01_firewall_inserted_security_group_net0.png and /dev/null differ
diff --git a/images/g025/pve_k3server01_firewall_inserted_security_group_net0.webp b/images/g025/pve_k3server01_firewall_inserted_security_group_net0.webp
new file mode 100644
index 0000000..ea36456
Binary files /dev/null and b/images/g025/pve_k3server01_firewall_inserted_security_group_net0.webp differ
diff --git a/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.png b/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.png
deleted file mode 100644
index d8aad2b..0000000
Binary files a/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.png and /dev/null differ
diff --git a/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.webp b/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.webp
new file mode 100644
index 0000000..0ed85c9
Binary files /dev/null and b/images/g025/pve_k3server01_firewall_ipset_ipfilter_net0.webp differ
diff --git a/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.png b/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.png
deleted file mode 100644
index 6fe800f..0000000
Binary files a/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.png and /dev/null differ
diff --git a/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.webp b/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.webp
new file mode 100644
index 0000000..0500e25
Binary files /dev/null and b/images/g025/pve_k3server01_firewall_options_enabled_ipfilter.webp differ
diff --git a/images/g025/pve_node_summary_reboot_button.png b/images/g025/pve_node_summary_reboot_button.png
deleted file mode 100644
index d17df58..0000000
Binary files a/images/g025/pve_node_summary_reboot_button.png and /dev/null differ
diff --git a/images/g025/pve_node_summary_reboot_button.webp b/images/g025/pve_node_summary_reboot_button.webp
new file mode 100644
index 0000000..b19e399
Binary files /dev/null and b/images/g025/pve_node_summary_reboot_button.webp differ
diff --git a/images/g025/pve_node_template_link_cloning_k3snodetpl.png b/images/g025/pve_node_template_link_cloning_k3snodetpl.png
deleted file mode 100644
index 9d080e5..0000000
Binary files a/images/g025/pve_node_template_link_cloning_k3snodetpl.png and /dev/null differ
diff --git a/images/g025/pve_node_template_link_cloning_k3snodetpl.webp b/images/g025/pve_node_template_link_cloning_k3snodetpl.webp
new file mode 100644
index 0000000..39805fb
Binary files /dev/null and b/images/g025/pve_node_template_link_cloning_k3snodetpl.webp differ
diff --git a/images/g025/pve_tasks_log_start_shutdown_vms.png b/images/g025/pve_tasks_log_start_shutdown_vms.png
deleted file mode 100644
index 80e57f6..0000000
Binary files a/images/g025/pve_tasks_log_start_shutdown_vms.png and /dev/null differ
diff --git a/images/g025/pve_tasks_log_start_shutdown_vms.webp b/images/g025/pve_tasks_log_start_shutdown_vms.webp
new file mode 100644
index 0000000..b66d9fa
Binary files /dev/null and b/images/g025/pve_tasks_log_start_shutdown_vms.webp differ
diff --git a/images/g025/pve_vm_hardware_tab.png b/images/g025/pve_vm_hardware_tab.png
deleted file mode 100644
index 273f805..0000000
Binary files a/images/g025/pve_vm_hardware_tab.png and /dev/null differ
diff --git a/images/g025/pve_vm_hardware_tab.webp b/images/g025/pve_vm_hardware_tab.webp
new file mode 100644
index 0000000..3c6ec26
Binary files /dev/null and b/images/g025/pve_vm_hardware_tab.webp differ
diff --git a/images/g025/pve_vm_options_start_at_boot_edit_window.png b/images/g025/pve_vm_options_start_at_boot_edit_window.png
deleted file mode 100644
index ad6a9ed..0000000
Binary files a/images/g025/pve_vm_options_start_at_boot_edit_window.png and /dev/null differ
diff --git a/images/g025/pve_vm_options_start_at_boot_edit_window.webp b/images/g025/pve_vm_options_start_at_boot_edit_window.webp
new file mode 100644
index 0000000..a83b6cb
Binary files /dev/null and b/images/g025/pve_vm_options_start_at_boot_edit_window.webp differ
diff --git a/images/g025/pve_vm_options_start_shutdown_edited.png b/images/g025/pve_vm_options_start_shutdown_edited.png
deleted file mode 100644
index c0710c3..0000000
Binary files a/images/g025/pve_vm_options_start_shutdown_edited.png and /dev/null differ
diff --git a/images/g025/pve_vm_options_start_shutdown_edited.webp b/images/g025/pve_vm_options_start_shutdown_edited.webp
new file mode 100644
index 0000000..48122ee
Binary files /dev/null and b/images/g025/pve_vm_options_start_shutdown_edited.webp differ
diff --git a/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.png b/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.png
deleted file mode 100644
index 378fe8b..0000000
Binary files a/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.png and /dev/null differ
diff --git a/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.webp b/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.webp
new file mode 100644
index 0000000..68ccb11
Binary files /dev/null and b/images/g025/pve_vm_options_start_shutdown_edited_k3sagent01.webp differ
diff --git a/images/g025/pve_vm_options_start_shutdown_highlighted.webp b/images/g025/pve_vm_options_start_shutdown_highlighted.webp
new file mode 100644
index 0000000..e0b53a3
Binary files /dev/null and b/images/g025/pve_vm_options_start_shutdown_highlighted.webp differ
diff --git a/images/g025/pve_vm_options_start_shutdown_hihglighted.png b/images/g025/pve_vm_options_start_shutdown_hihglighted.png
deleted file mode 100644
index 5a6193f..0000000
Binary files a/images/g025/pve_vm_options_start_shutdown_hihglighted.png and /dev/null differ
diff --git a/images/g025/pve_vm_options_start_shutdown_order_edit_window.png b/images/g025/pve_vm_options_start_shutdown_order_edit_window.png
deleted file mode 100644
index 6bd1fa3..0000000
Binary files a/images/g025/pve_vm_options_start_shutdown_order_edit_window.png and /dev/null differ
diff --git a/images/g025/pve_vm_options_start_shutdown_order_edit_window.webp b/images/g025/pve_vm_options_start_shutdown_order_edit_window.webp
new file mode 100644
index 0000000..bd04bad
Binary files /dev/null and b/images/g025/pve_vm_options_start_shutdown_order_edit_window.webp differ
diff --git a/images/g026/pve_datacenter_firewall_security_group_rule_added.png b/images/g026/pve_datacenter_firewall_security_group_rule_added.png
deleted file mode 100644
index 1e35df4..0000000
Binary files a/images/g026/pve_datacenter_firewall_security_group_rule_added.png and /dev/null differ
diff --git a/images/g026/pve_datacenter_firewall_security_group_rule_added.webp b/images/g026/pve_datacenter_firewall_security_group_rule_added.webp
new file mode 100644
index 0000000..cd8db53
Binary files /dev/null and b/images/g026/pve_datacenter_firewall_security_group_rule_added.webp differ
diff --git a/images/g030/kubernetes-dashboard_namespaces_list.png b/images/g030/kubernetes-dashboard_namespaces_list.png
deleted file mode 100644
index 1c987c0..0000000
Binary files a/images/g030/kubernetes-dashboard_namespaces_list.png and /dev/null differ
diff --git a/images/g030/kubernetes-dashboard_sign-in.png b/images/g030/kubernetes-dashboard_sign-in.png
deleted file mode 100644
index aee7a74..0000000
Binary files a/images/g030/kubernetes-dashboard_sign-in.png and /dev/null differ
diff --git a/images/g030/kubernetes-dashboard_sign-out.png b/images/g030/kubernetes-dashboard_sign-out.png
deleted file mode 100644
index 17c9dcb..0000000
Binary files a/images/g030/kubernetes-dashboard_sign-out.png and /dev/null differ
diff --git a/images/g030/kubernetes-dashboard_workloads_all_namespaces.png b/images/g030/kubernetes-dashboard_workloads_all_namespaces.png
deleted file mode 100644
index 5c9a40a..0000000
Binary files a/images/g030/kubernetes-dashboard_workloads_all_namespaces.png and /dev/null differ
diff --git a/images/g030/kubernetes-dashboard_workloads_default_namespace.png b/images/g030/kubernetes-dashboard_workloads_default_namespace.png
deleted file mode 100644
index 57bfdf0..0000000
Binary files a/images/g030/kubernetes-dashboard_workloads_default_namespace.png and /dev/null differ
diff --git a/images/g030/traefik-dashboard_main-page.webp b/images/g030/traefik-dashboard_main-page.webp
new file mode 100644
index 0000000..ae65c6f
Binary files /dev/null and b/images/g030/traefik-dashboard_main-page.webp differ
diff --git a/images/g030/traefik-dashboard_sign-in.webp b/images/g030/traefik-dashboard_sign-in.webp
new file mode 100644
index 0000000..31bb32d
Binary files /dev/null and b/images/g030/traefik-dashboard_sign-in.webp differ
diff --git a/images/g031/headlamp-authentication-token.webp b/images/g031/headlamp-authentication-token.webp
new file mode 100644
index 0000000..dbedbf6
Binary files /dev/null and b/images/g031/headlamp-authentication-token.webp differ
diff --git a/images/g031/headlamp-clusters-view.webp b/images/g031/headlamp-clusters-view.webp
new file mode 100644
index 0000000..784f0b0
Binary files /dev/null and b/images/g031/headlamp-clusters-view.webp differ
diff --git a/images/g031/headlamp-map-view.webp b/images/g031/headlamp-map-view.webp
new file mode 100644
index 0000000..bf1124c
Binary files /dev/null and b/images/g031/headlamp-map-view.webp differ
diff --git a/images/g031/traefik-dashboard_main-page.png b/images/g031/traefik-dashboard_main-page.png
deleted file mode 100644
index 5850af7..0000000
Binary files a/images/g031/traefik-dashboard_main-page.png and /dev/null differ
diff --git a/images/g031/traefik-dashboard_sign-in.png b/images/g031/traefik-dashboard_sign-in.png
deleted file mode 100644
index c80ce13..0000000
Binary files a/images/g031/traefik-dashboard_sign-in.png and /dev/null differ
diff --git a/images/g021/pve_vm_template_hardware_hard_disk_remarked.png b/images/g906/pve_vm_template_hardware_hard_disk_remarked.png
similarity index 100%
rename from images/g021/pve_vm_template_hardware_hard_disk_remarked.png
rename to images/g906/pve_vm_template_hardware_hard_disk_remarked.png
diff --git a/k8sprjs/cert-manager/certificates/kustomization.yaml b/k8sprjs/cert-manager/certificates/kustomization.yaml
index a1c2724..08abd5f 100644
--- a/k8sprjs/cert-manager/certificates/kustomization.yaml
+++ b/k8sprjs/cert-manager/certificates/kustomization.yaml
@@ -3,16 +3,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
-- resources/certificates.namespace.yaml
-- resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml
-- resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml
-
-patches:
-- path: patches/wildcard.deimos.cloud-tls.certificate.cert-manager.reflector.namespaces.yaml
- target:
- group: cert-manager.io
- version: v1
- kind: Certificate
- namespace: certificates
- name: wildcard.deimos.cloud-tls
-
+- resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml
+- resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml
+- resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml
+- resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml
+- resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/patches/wildcard.deimos.cloud-tls.certificate.cert-manager.reflector.namespaces.yaml b/k8sprjs/cert-manager/certificates/patches/wildcard.deimos.cloud-tls.certificate.cert-manager.reflector.namespaces.yaml
deleted file mode 100644
index 017ff59..0000000
--- a/k8sprjs/cert-manager/certificates/patches/wildcard.deimos.cloud-tls.certificate.cert-manager.reflector.namespaces.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-# Certificate wildcard.deimos.cloud-tls patch for Reflector-managed namespaces
-apiVersion: cert-manager.io/v1
-kind: Certificate
-
-metadata:
- name: wildcard.deimos.cloud-tls
- namespace: certificates
-spec:
- secretTemplate:
- annotations:
- reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "kube-system,nextcloud,gitea,monitoring"
- reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "kube-system,nextcloud,gitea,monitoring"
diff --git a/k8sprjs/cert-manager/certificates/resources/certificates.namespace.yaml b/k8sprjs/cert-manager/certificates/resources/certificates.namespace.yaml
deleted file mode 100644
index 4efbaba..0000000
--- a/k8sprjs/cert-manager/certificates/resources/certificates.namespace.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: v1
-kind: Namespace
-
-metadata:
- name: certificates
diff --git a/k8sprjs/cert-manager/certificates/resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml
deleted file mode 100644
index 85deea4..0000000
--- a/k8sprjs/cert-manager/certificates/resources/cluster-issuer-selfsigned.cluster-issuer.cert-manager.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Generic self-signed cluster-wide issuer for certificates
-apiVersion: cert-manager.io/v1
-kind: ClusterIssuer
-
-metadata:
- name: cluster-issuer-selfsigned
-spec:
- selfSigned: {}
diff --git a/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml
new file mode 100644
index 0000000..191bc46
--- /dev/null
+++ b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-issuer.cluster-issuer.cert-manager.yaml
@@ -0,0 +1,9 @@
+# Cluster-wide issuer using intermediate CA 01's secret
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+
+metadata:
+ name: homelab.cloud-intm-ca01-issuer
+spec:
+ ca:
+ secretName: homelab.cloud-intm-ca01-tls
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml
new file mode 100644
index 0000000..ff729f5
--- /dev/null
+++ b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-intm-ca01-tls.certificate.cert-manager.yaml
@@ -0,0 +1,21 @@
+# Certificate for intermediate CA 01
+apiVersion: cert-manager.io/v1
+kind: Certificate
+
+metadata:
+ name: homelab.cloud-intm-ca01-tls
+ namespace: cert-manager
+spec:
+ isCA: true
+ commonName: homelab.cloud-intm-ca01-tls
+ secretName: homelab.cloud-intm-ca01-tls
+ duration: 4380h # 6 months
+ renewBefore: 360h # Certificates must be renewed some time before they expire (15 days)
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-root-ca-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml
new file mode 100644
index 0000000..da7829d
--- /dev/null
+++ b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer-selfsigned.cluster-issuer.cert-manager.yaml
@@ -0,0 +1,8 @@
+# Self-signed cluster-wide issuer for the root CA's certificate
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+
+metadata:
+ name: homelab.cloud-root-ca-issuer-selfsigned
+spec:
+ selfSigned: {}
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml
new file mode 100644
index 0000000..cad582c
--- /dev/null
+++ b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-issuer.cluster-issuer.cert-manager.yaml
@@ -0,0 +1,9 @@
+# Cluster-wide issuer using root CA's secret
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+
+metadata:
+ name: homelab.cloud-root-ca-issuer
+spec:
+ ca:
+ secretName: homelab.cloud-root-ca-tls
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml
new file mode 100644
index 0000000..db0006b
--- /dev/null
+++ b/k8sprjs/cert-manager/certificates/resources/homelab.cloud-root-ca-tls.certificate.cert-manager.yaml
@@ -0,0 +1,21 @@
+# Certificate for root CA
+apiVersion: cert-manager.io/v1
+kind: Certificate
+
+metadata:
+ name: homelab.cloud-root-ca-tls
+ namespace: cert-manager
+spec:
+ isCA: true
+ commonName: homelab.cloud-root-ca-tls
+ secretName: homelab.cloud-root-ca-tls
+ duration: 8760h # 1 year
+ renewBefore: 720h # Certificates must be renewed some time before they expire (30 days)
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-root-ca-issuer-selfsigned
+ kind: ClusterIssuer
+ group: cert-manager.io
\ No newline at end of file
diff --git a/k8sprjs/cert-manager/certificates/resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml b/k8sprjs/cert-manager/certificates/resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml
deleted file mode 100644
index 2054644..0000000
--- a/k8sprjs/cert-manager/certificates/resources/wildcard.deimos.cloud-tls.certificate.cert-manager.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Wilcard certificate for deimos.cloud
-apiVersion: cert-manager.io/v1
-kind: Certificate
-
-metadata:
- name: wildcard.deimos.cloud-tls
- namespace: certificates
-spec:
- secretName: wildcard.deimos.cloud-tls
- secretTemplate:
- annotations:
- reflector.v1.k8s.emberstack.com/reflection-allowed: "true"
- reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "kube-system"
- reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true"
- reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "kube-system"
- duration: 8760h # No certificate should last more than a year
- renewBefore: 720h # Certificates must be renewed some time before they expire (30 days)
- isCA: false
- subject:
- organizations:
- - "Deimos"
- privateKey:
- algorithm: ECDSA
- size: 384
- encoding: PKCS8
- rotationPolicy: Always
- dnsNames:
- - "*.deimos.cloud"
- - "deimos.cloud"
- issuerRef:
- name: cluster-issuer-selfsigned
- kind: ClusterIssuer
- group: cert-manager.io
diff --git a/k8sprjs/cert-manager/deployment/kustomization.yaml b/k8sprjs/cert-manager/deployment/kustomization.yaml
index e07bb43..59a1c51 100644
--- a/k8sprjs/cert-manager/deployment/kustomization.yaml
+++ b/k8sprjs/cert-manager/deployment/kustomization.yaml
@@ -3,4 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
-- 'https://github.com/jetstack/cert-manager/releases/download/v1.6.1/cert-manager.yaml'
\ No newline at end of file
+- https://github.com/cert-manager/cert-manager/releases/download/v1.19.0/cert-manager.yaml
\ No newline at end of file
diff --git a/k8sprjs/headlamp/kustomization.yaml b/k8sprjs/headlamp/kustomization.yaml
new file mode 100644
index 0000000..080d341
--- /dev/null
+++ b/k8sprjs/headlamp/kustomization.yaml
@@ -0,0 +1,13 @@
+# Headlamp setup
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- resources/headlamp-admin.serviceaccount.yaml
+- resources/cluster-admin-users.clusterrolebinding.yaml
+- resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml
+- resources/headlamp.ingressroute.traefik.yaml
+- https://raw.githubusercontent.com/kubernetes-sigs/headlamp/main/kubernetes-headlamp.yaml
+
+patches:
+- path: patches/headlamp.service.patch.yaml
\ No newline at end of file
diff --git a/k8sprjs/headlamp/patches/headlamp.service.patch.yaml b/k8sprjs/headlamp/patches/headlamp.service.patch.yaml
new file mode 100644
index 0000000..0714baf
--- /dev/null
+++ b/k8sprjs/headlamp/patches/headlamp.service.patch.yaml
@@ -0,0 +1,11 @@
+# Headlamp service patch
+kind: Service
+apiVersion: v1
+
+metadata:
+ name: headlamp
+ namespace: kube-system
+
+spec:
+ type: LoadBalancer
+ loadBalancerIP: 10.7.0.2
\ No newline at end of file
diff --git a/k8sprjs/k8sdashboard/resources/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml b/k8sprjs/headlamp/resources/cluster-admin-users.clusterrolebinding.yaml
similarity index 63%
rename from k8sprjs/k8sdashboard/resources/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml
rename to k8sprjs/headlamp/resources/cluster-admin-users.clusterrolebinding.yaml
index 8d3b129..9136f9e 100644
--- a/k8sprjs/k8sdashboard/resources/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml
+++ b/k8sprjs/headlamp/resources/cluster-admin-users.clusterrolebinding.yaml
@@ -1,12 +1,14 @@
+# Administrator cluster role bindings
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: admin-user
+ name: cluster-admin-users
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
- name: admin-user
+ name: headlamp-admin
+ namespace: kube-system
\ No newline at end of file
diff --git a/k8sprjs/headlamp/resources/headlamp-admin.serviceaccount.yaml b/k8sprjs/headlamp/resources/headlamp-admin.serviceaccount.yaml
new file mode 100644
index 0000000..60388e0
--- /dev/null
+++ b/k8sprjs/headlamp/resources/headlamp-admin.serviceaccount.yaml
@@ -0,0 +1,7 @@
+# Headlamp administrator user
+apiVersion: v1
+kind: ServiceAccount
+
+metadata:
+ name: headlamp-admin
+ namespace: kube-system
\ No newline at end of file
diff --git a/k8sprjs/headlamp/resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml b/k8sprjs/headlamp/resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml
new file mode 100644
index 0000000..77338c3
--- /dev/null
+++ b/k8sprjs/headlamp/resources/headlamp.homelab.cloud-tls.certificate.cert-manager.yaml
@@ -0,0 +1,25 @@
+# Certificate for Headlamp
+apiVersion: cert-manager.io/v1
+kind: Certificate
+
+metadata:
+ name: headlamp.homelab.cloud-tls
+ namespace: kube-system
+spec:
+ isCA: false
+ secretName: headlamp.homelab.cloud-tls
+ duration: 2190h # 3 months
+ renewBefore: 168h # Certificates must be renewed some time before they expire (7 days)
+ dnsNames:
+ - headlamp.homelab.cloud
+ - hdl.homelab.cloud
+ ipAddresses:
+ - 10.7.0.2
+ privateKey:
+ algorithm: Ed25519
+ encoding: PKCS8
+ rotationPolicy: Always
+ issuerRef:
+ name: homelab.cloud-intm-ca01-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io
\ No newline at end of file
diff --git a/k8sprjs/headlamp/resources/headlamp.ingressroute.traefik.yaml b/k8sprjs/headlamp/resources/headlamp.ingressroute.traefik.yaml
new file mode 100644
index 0000000..ef21472
--- /dev/null
+++ b/k8sprjs/headlamp/resources/headlamp.ingressroute.traefik.yaml
@@ -0,0 +1,19 @@
+# Traefik IngressRoute for Headlamp
+apiVersion: traefik.io/v1alpha1
+kind: IngressRoute
+
+metadata:
+ name: headlamp
+ namespace: kube-system
+spec:
+ entryPoints:
+ - web
+ routes:
+ - match: Host(`10.7.0.2`) || Host(`headlamp.homelab.cloud`) || Host(`hdl.homelab.cloud`)
+ kind: Rule
+ services:
+ - name: headlamp
+ kind: Service
+ port: 80
+ tls:
+ secretName: headlamp.homelab.cloud-tls
\ No newline at end of file
diff --git a/k8sprjs/k8sdashboard/kustomization.yaml b/k8sprjs/k8sdashboard/kustomization.yaml
deleted file mode 100644
index 50c9c2d..0000000
--- a/k8sprjs/k8sdashboard/kustomization.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# Kubernetes Dashboard setup
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-namespace: kubernetes-dashboard
-
-resources:
-- https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
-- resources/admin-user.clusterrolebinding.cluster-admin.clusterrole.yaml
-- resources/admin-user.serviceaccount.yaml
diff --git a/k8sprjs/k8sdashboard/resources/admin-user.serviceaccount.yaml b/k8sprjs/k8sdashboard/resources/admin-user.serviceaccount.yaml
deleted file mode 100644
index 6812a4c..0000000
--- a/k8sprjs/k8sdashboard/resources/admin-user.serviceaccount.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-
-metadata:
- name: admin-user
diff --git a/k8sprjs/metallb/configs/config b/k8sprjs/metallb/configs/config
deleted file mode 100644
index 988b56b..0000000
--- a/k8sprjs/metallb/configs/config
+++ /dev/null
@@ -1,5 +0,0 @@
-address-pools:
-- name: default
- protocol: layer2
- addresses:
- - 192.168.1.41-192.168.1.80
diff --git a/k8sprjs/metallb/kustomization.yaml b/k8sprjs/metallb/kustomization.yaml
index 1ac7596..cf68175 100644
--- a/k8sprjs/metallb/kustomization.yaml
+++ b/k8sprjs/metallb/kustomization.yaml
@@ -4,19 +4,7 @@ kind: Kustomization
namespace: metallb-system
-# This is the configuration valid for MetalLB v0.13.0 and onwards.
-# resources:
-# - github.com/metallb/metallb/config/native?ref=v0.13.3
-# - resources/l2-ip.l2advertisement.yaml
-# - resources/default-pool.ipaddresspool.yaml
-
-# WARNING! The configuration below works ONLY with MetalLB v0.12.z or EARLIER versions!!!
resources:
-- github.com/metallb/metallb//manifests?ref=v0.11.0
-
-configMapGenerator:
-- name: config
- files:
- - configs/config
- options:
- disableNameSuffixHash: true
\ No newline at end of file
+- github.com/metallb/metallb/config/native?ref=v0.15.2
+- resources/l2-ip.l2advertisement.metallb.yaml
+- resources/default-pool.ipaddresspool.metallb.yaml
\ No newline at end of file
diff --git a/k8sprjs/metallb/resources/default-pool.ipaddresspool.yaml b/k8sprjs/metallb/resources/default-pool.ipaddresspool.metallb.yaml
similarity index 77%
rename from k8sprjs/metallb/resources/default-pool.ipaddresspool.yaml
rename to k8sprjs/metallb/resources/default-pool.ipaddresspool.metallb.yaml
index 8dbab59..8f1fea8 100644
--- a/k8sprjs/metallb/resources/default-pool.ipaddresspool.yaml
+++ b/k8sprjs/metallb/resources/default-pool.ipaddresspool.metallb.yaml
@@ -5,4 +5,4 @@ metadata:
name: default-pool
spec:
addresses:
- - 192.168.1.41-192.168.1.80
\ No newline at end of file
+ - 10.7.0.0-10.7.0.20
\ No newline at end of file
diff --git a/k8sprjs/metallb/resources/l2-ip.l2advertisement.yaml b/k8sprjs/metallb/resources/l2-ip.l2advertisement.metallb.yaml
similarity index 100%
rename from k8sprjs/metallb/resources/l2-ip.l2advertisement.yaml
rename to k8sprjs/metallb/resources/l2-ip.l2advertisement.metallb.yaml
diff --git a/k8sprjs/metrics-server/kustomization.yaml b/k8sprjs/metrics-server/kustomization.yaml
index 17344bd..beb58c4 100644
--- a/k8sprjs/metrics-server/kustomization.yaml
+++ b/k8sprjs/metrics-server/kustomization.yaml
@@ -3,7 +3,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
-- https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.2/components.yaml
+- https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.8.0/components.yaml
-patchesStrategicMerge:
-- patches/metrics-server.deployment.containers.args.patch.yaml
+patches:
+- path: patches/metrics-server.deployment.patch.yaml
\ No newline at end of file
diff --git a/k8sprjs/metrics-server/patches/metrics-server.deployment.containers.args.patch.yaml b/k8sprjs/metrics-server/patches/metrics-server.deployment.patch.yaml
similarity index 64%
rename from k8sprjs/metrics-server/patches/metrics-server.deployment.containers.args.patch.yaml
rename to k8sprjs/metrics-server/patches/metrics-server.deployment.patch.yaml
index d119f13..80f8eca 100644
--- a/k8sprjs/metrics-server/patches/metrics-server.deployment.containers.args.patch.yaml
+++ b/k8sprjs/metrics-server/patches/metrics-server.deployment.patch.yaml
@@ -12,14 +12,12 @@ spec:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
- - key: "node-role.kubernetes.io/master"
- operator: "Exists"
- effect: "NoSchedule"
containers:
- name: metrics-server
args:
- --cert-dir=/tmp
- - --secure-port=4443
+ - --secure-port=10250
- --kubelet-preferred-address-types=InternalIP
- --kubelet-use-node-status-port
- --metric-resolution=15s
+ - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
\ No newline at end of file
diff --git a/k8sprjs/reflector/kustomization.yaml b/k8sprjs/reflector/kustomization.yaml
deleted file mode 100644
index a9173ea..0000000
--- a/k8sprjs/reflector/kustomization.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-# Reflector setup
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-namespace: kube-system
-
-resources:
-- https://github.com/emberstack/kubernetes-reflector/releases/latest/download/reflector.yaml
diff --git a/k8sprjs/traefik-dashboard/kustomization.yaml b/k8sprjs/traefik-dashboard/kustomization.yaml
new file mode 100644
index 0000000..459d4a2
--- /dev/null
+++ b/k8sprjs/traefik-dashboard/kustomization.yaml
@@ -0,0 +1,16 @@
+# Traefik dashboard setup
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- resources/traefik-dashboard-basicauth.middleware.traefik.yaml
+- resources/traefik-dashboard.service.yaml
+- resources/traefik-dashboard.ingressroute.traefik.yaml
+
+secretGenerator:
+- name: traefik-dashboard-basicauth-secret
+ namespace: kube-system
+ files:
+ - secrets/users
+ options:
+ disableNameSuffixHash: true
\ No newline at end of file
diff --git a/k8sprjs/traefik-dashboard/resources/traefik-dashboard-basicauth.middleware.traefik.yaml b/k8sprjs/traefik-dashboard/resources/traefik-dashboard-basicauth.middleware.traefik.yaml
new file mode 100644
index 0000000..e0d5510
--- /dev/null
+++ b/k8sprjs/traefik-dashboard/resources/traefik-dashboard-basicauth.middleware.traefik.yaml
@@ -0,0 +1,10 @@
+# Basic authentication method for Traefik dashboard
+apiVersion: traefik.io/v1alpha1
+kind: Middleware
+
+metadata:
+ name: traefik-dashboard-basicauth
+ namespace: kube-system
+spec:
+ basicAuth:
+ secret: traefik-dashboard-basicauth-secret
\ No newline at end of file
diff --git a/k8sprjs/traefik-dashboard/resources/traefik-dashboard.ingressroute.traefik.yaml b/k8sprjs/traefik-dashboard/resources/traefik-dashboard.ingressroute.traefik.yaml
new file mode 100644
index 0000000..404d224
--- /dev/null
+++ b/k8sprjs/traefik-dashboard/resources/traefik-dashboard.ingressroute.traefik.yaml
@@ -0,0 +1,18 @@
+# Ingress for Traefik's dashboard
+apiVersion: traefik.io/v1alpha1
+kind: IngressRoute
+
+metadata:
+ name: traefik-dashboard
+ namespace: kube-system
+spec:
+ entryPoints:
+ - websecure
+ routes:
+ - kind: Rule
+ match: Host(`10.7.0.1`) || Host(`traefik.homelab.cloud`) || Host(`tfk.homelab.cloud`)
+ services:
+ - name: api@internal
+ kind: TraefikService
+ middlewares:
+ - name: traefik-dashboard-basicauth
\ No newline at end of file
diff --git a/k8sprjs/traefik-dashboard/resources/traefik-dashboard.service.yaml b/k8sprjs/traefik-dashboard/resources/traefik-dashboard.service.yaml
new file mode 100644
index 0000000..406c0f1
--- /dev/null
+++ b/k8sprjs/traefik-dashboard/resources/traefik-dashboard.service.yaml
@@ -0,0 +1,21 @@
+# Traefik dashboard service
+apiVersion: v1
+kind: Service
+
+metadata:
+ name: traefik-dashboard
+ namespace: kube-system
+ labels:
+ app.kubernetes.io/instance: traefik-kube-system
+ app.kubernetes.io/name: traefik-dashboard
+spec:
+ type: LoadBalancer
+ loadBalancerIP: 10.7.0.1
+ ports:
+ - name: websecure
+ port: 443
+ targetPort: websecure
+ protocol: TCP
+ selector:
+ app.kubernetes.io/instance: traefik-kube-system
+ app.kubernetes.io/name: traefik
\ No newline at end of file
diff --git a/k8sprjs/traefik-dashboard/secrets/users b/k8sprjs/traefik-dashboard/secrets/users
new file mode 100644
index 0000000..b68cbeb
--- /dev/null
+++ b/k8sprjs/traefik-dashboard/secrets/users
@@ -0,0 +1 @@
+tfkuser:$2y$17$0mdP4WLdbj8BWj1lIJMDb.bXyYK75qR5AfRNzuunZuCamvAlqDlo.
\ No newline at end of file
diff --git a/k8sprjs/traefik_dashboard-ingressroute/kustomization.yaml b/k8sprjs/traefik_dashboard-ingressroute/kustomization.yaml
deleted file mode 100644
index 6d2dbec..0000000
--- a/k8sprjs/traefik_dashboard-ingressroute/kustomization.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Traefik Dashboard IngressRoute setup
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-resources:
-- resources/traefik-auth.middleware.basicauth.yaml
-- resources/traefik-dashboard.ingressroute.yaml
-
-secretGenerator:
-- name: traefik-auth-secret
- namespace: kube-system
- files:
- - secrets/users
- options:
- disableNameSuffixHash: true
diff --git a/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-auth.middleware.basicauth.yaml b/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-auth.middleware.basicauth.yaml
deleted file mode 100644
index b097a47..0000000
--- a/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-auth.middleware.basicauth.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: traefik.containo.us/v1alpha1
-kind: Middleware
-
-metadata:
- name: traefik-auth
- namespace: kube-system
-spec:
- basicAuth:
- secret: traefik-auth-secret
diff --git a/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-dashboard.ingressroute.yaml b/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-dashboard.ingressroute.yaml
deleted file mode 100644
index 3582555..0000000
--- a/k8sprjs/traefik_dashboard-ingressroute/resources/traefik-dashboard.ingressroute.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-
-metadata:
- name: traefik-dashboard
- namespace: kube-system
-spec:
- entryPoints:
- - websecure
- tls:
- secretName: wildcard.deimos.cloud-tls
- routes:
- - match: (Host(`192.168.1.41`) || Host(`traefik.deimos.cloud`) || Host(`tfk.deimos.cloud`)) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`))
- kind: Rule
- services:
- - name: api@internal
- kind: TraefikService
- middlewares:
- - name: traefik-auth
diff --git a/k8sprjs/traefik_dashboard-ingressroute/secrets/users b/k8sprjs/traefik_dashboard-ingressroute/secrets/users
deleted file mode 100644
index e94ae4d..0000000
--- a/k8sprjs/traefik_dashboard-ingressroute/secrets/users
+++ /dev/null
@@ -1 +0,0 @@
-tfkuser:$2y$06$7jmZf3zMoZStQwJlj7o3pOErocCtXrWtS2Uon/OZ6MA0i2egom77K