commit 74412c51a420fb05a944d92d56261a9137e423a6 Author: Gregor Michels Date: Thu Oct 26 16:45:29 2023 +0200 initial commit diff --git a/README.md b/README.md new file mode 100644 index 0000000..81da068 --- /dev/null +++ b/README.md @@ -0,0 +1,42 @@ +# Proxmox Management + +This repo contains script used to manage a multi-tenant proxmox environment for the [Reudnetz w.V.](). +These scripts were created because ansible currently lacks modules to configure users and groups. + +**Please take note of all the orgas, users and vms you've created -- this collection does not keep track of that** + +## Overview + +We use users, groups, resource pools, and roles to build a multi-tenante proxmox instance. + +The idea is that virtual machines/containers are created by an admin and moved into a resource pool that is + + +## Requirements + +* zfs storage pool for customer compartments: + * _change in `create_organisation`_ + +* storage pool for customer vm disks: + * `zfs create rpool/customer-disks` + * `pvesm add zfspool customer-disks --pool rpool/customer-disks --content images` + * _change in `create_vm`_ + +* bridge for customer vms + * _change in `create_vm`_ + * needs to be tag aware + * vms tag there traffic with the vmid + +## Create a new VM + +information you need to get beforehand from the custome: +- organisation +- username +- mail + +**step 1: create an organisation** +* (if one does not already exist) +* `./create_organisation ` + +**step 2: create the user account** + diff --git a/create_organisation b/create_organisation new file mode 100755 index 0000000..bc81ba0 --- /dev/null +++ b/create_organisation @@ -0,0 +1,32 @@ +#!/bin/sh + +set -e + +ZFS_PARENT_PATH=rpool/customer + +ORGA=$1 + +usage() { + printf "usage: %s \n" $0 +} + +# check if we where correctly called +[ $# != 1 ] && usage && exit 1 + +# create group for organisation +pveum group add "${ORGA}" --comment "group for the organisation '${ORGA}'" + +# create resource pool for the organisation +pveum pool add "${ORGA}" --comment "pool for the organisation '${ORGA}'" + +# allow group to access resource pool +pveum acl modify "/pool/${ORGA}/" --roles PVEVMUser,PVEDatastoreAdmin,RDNTZVMSnapshot --groups "${ORGA}" + +# create zfs filesystem for isos, backups and stuff +zfs create -o quota=150G -p "${ZFS_PARENT_PATH}/${ORGA}-images" + +# create proxmox storage ontop of zfs filesystem +pvesm add dir "${ORGA}-images" --path "/${ZFS_PARENT_PATH}/${ORGA}-images" --content vztmpl,iso,backup,backup + +# add storage into storage pool +pveum pool modify "${ORGA}" --storage "${ORGA}-images" diff --git a/create_user b/create_user new file mode 100755 index 0000000..5af0b71 --- /dev/null +++ b/create_user @@ -0,0 +1,21 @@ +#!/bin/sh + +set -e + +USER="$1@pve" +MAIL="$2" +ORGA="$3" +PASS="$(openssl rand -base64 24)" + +usage() { + printf "usage: $0 \n" $0 +} + +# check usage +[ $# != 3 ] && usage && exit 1 + +# create user +pveum user add "${USER}" --comment "user account for '${USER}'" --email "${MAIL}" --groups "${ORGA}" --password "${PASS}" + +# print password for user +echo "${PASS}" diff --git a/create_vm b/create_vm new file mode 100755 index 0000000..5d3e0b5 --- /dev/null +++ b/create_vm @@ -0,0 +1,43 @@ +#!/bin/sh + +set -e + +CPUS=1 +MEMORY=512 +BRIDGE=vmbr0 +STORAGE=customer-disks +OS_TYPE=l26 +DISK_IMAGE=/root/images/debian-12-genericcloud-amd64.qcow2 + +ID=$1 +NAME=$2 +ORGA=$3 + +usage() { + printf "usage: %s \n" $0 +} + +[ $# != 3 ] && usage && exit 1 + +# create vm +qm create "${ID}" \ + --cpu x86-64-v3 \ + --cores ${CPUS} \ + --memory ${MEMORY} \ + --name "${NAME}" \ + --net0 "bridge=${BRIDGE},model=virtio,tag=${ID}" \ + --onboot 1 \ + --ostype ${OS_TYPE} \ + --pool "${ORGA}" \ + --scsihw virtio-scsi-pci \ + --sata0 media=cdrom,file=none \ + --ide2 ${STORAGE}:cloudinit + +# import debian cloud image +qm set ${ID} --virtio0 ${STORAGE}:0,import-from=${DISK_IMAGE} + +# resize imported disk to 50GB +qm disk resize ${ID} virtio0 50G + +# change boot device to debian cloud image +qm set ${ID} --boot order=virtio0 diff --git a/delete_organisation b/delete_organisation new file mode 100755 index 0000000..d54a77e --- /dev/null +++ b/delete_organisation @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +ORGA=$1 + +usage() { + printf "usage: %s \n" $0 +} + +# check if we where correctly called +[ $# != 1 ] && usage && exit 1 + +# remove storage from storage pool +pveum pool modify "${ORGA}" --storage "${ORGA}-images" --delete + +# remove proxmox storage object +pvesm remove "${ORGA}-images" + +# nuke zfs filesystem with isos and backups +zfs destroy rpool/customer/${ORGA}-images + +# delete resource pool for the organisation +pveum pool delete "${ORGA}" + +# delete group +pveum group delete "${ORGA}" diff --git a/delete_user b/delete_user new file mode 100755 index 0000000..fa0a80f --- /dev/null +++ b/delete_user @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +USER=$1 + +usage() { + printf "usage: %s \n" +} + +[ $# != 1 ] && usage && exit 1 + +pveum user delete "${USER}" diff --git a/delete_vm b/delete_vm new file mode 100755 index 0000000..99f0976 --- /dev/null +++ b/delete_vm @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +ID=$1 + +usage() { + printf "usage: %s \n" $0 +} + +[ $# != 1 ] && usage && exit 1 + +qm destroy "${ID}" diff --git a/get_linklocal_for_vm b/get_linklocal_for_vm new file mode 100755 index 0000000..35f0ac0 --- /dev/null +++ b/get_linklocal_for_vm @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +mac_to_ipv6_ll() { + IFS=':'; set $1; unset IFS + echo "fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6" +} + + +mac_to_ipv6_ll $(qm config $1 | grep -oE ..:..:..:..:..:.. | tr '[:upper:]' '[:lower:]') diff --git a/manage_autostart b/manage_autostart new file mode 100755 index 0000000..8a77f6b --- /dev/null +++ b/manage_autostart @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +PARAM=$1 + +qm list | tail -n +2 | awk '{ print $1 }' | while read vmid +do + case $PARAM in + 0|1) + qm set $vmid --onboot $PARAM + ;; + "") + qm config $vmid | grep -e onboot: -e name: + ;; + *) + cat << EOF +usage: manage autostart behaviour of vms on node + + $0 show autstart state for alle vms on node + $0 <0|1> disables or enables autostart for all vms on node + $0 help this help +EOF + exit 1 + ;; + esac +done