initial commit

This commit is contained in:
Gregor Michels 2023-10-26 16:45:29 +02:00
commit 74412c51a4
9 changed files with 229 additions and 0 deletions

42
README.md Normal file
View file

@ -0,0 +1,42 @@
# Proxmox Management
This repo contains script used to manage a multi-tenant proxmox environment for the [Reudnetz w.V.]().
These scripts were created because ansible currently lacks modules to configure users and groups.
**Please take note of all the orgas, users and vms you've created -- this collection does not keep track of that**
## Overview
We use users, groups, resource pools, and roles to build a multi-tenante proxmox instance.
The idea is that virtual machines/containers are created by an admin and moved into a resource pool that is
## Requirements
* zfs storage pool for customer compartments:
* _change in `create_organisation`_
* storage pool for customer vm disks:
* `zfs create rpool/customer-disks`
* `pvesm add zfspool customer-disks --pool rpool/customer-disks --content images`
* _change in `create_vm`_
* bridge for customer vms
* _change in `create_vm`_
* needs to be tag aware
* vms tag there traffic with the vmid
## Create a new VM
information you need to get beforehand from the custome:
- organisation
- username
- mail
**step 1: create an organisation**
* (if one does not already exist)
* `./create_organisation <name>`
**step 2: create the user account**

32
create_organisation Executable file
View file

@ -0,0 +1,32 @@
#!/bin/sh
set -e
ZFS_PARENT_PATH=rpool/customer
ORGA=$1
usage() {
printf "usage: %s <organisation_name>\n" $0
}
# check if we where correctly called
[ $# != 1 ] && usage && exit 1
# create group for organisation
pveum group add "${ORGA}" --comment "group for the organisation '${ORGA}'"
# create resource pool for the organisation
pveum pool add "${ORGA}" --comment "pool for the organisation '${ORGA}'"
# allow group to access resource pool
pveum acl modify "/pool/${ORGA}/" --roles PVEVMUser,PVEDatastoreAdmin,RDNTZVMSnapshot --groups "${ORGA}"
# create zfs filesystem for isos, backups and stuff
zfs create -o quota=150G -p "${ZFS_PARENT_PATH}/${ORGA}-images"
# create proxmox storage ontop of zfs filesystem
pvesm add dir "${ORGA}-images" --path "/${ZFS_PARENT_PATH}/${ORGA}-images" --content vztmpl,iso,backup,backup
# add storage into storage pool
pveum pool modify "${ORGA}" --storage "${ORGA}-images"

21
create_user Executable file
View file

@ -0,0 +1,21 @@
#!/bin/sh
set -e
USER="$1@pve"
MAIL="$2"
ORGA="$3"
PASS="$(openssl rand -base64 24)"
usage() {
printf "usage: $0 <username> <mail> <organisation>\n" $0
}
# check usage
[ $# != 3 ] && usage && exit 1
# create user
pveum user add "${USER}" --comment "user account for '${USER}'" --email "${MAIL}" --groups "${ORGA}" --password "${PASS}"
# print password for user
echo "${PASS}"

43
create_vm Executable file
View file

@ -0,0 +1,43 @@
#!/bin/sh
set -e
CPUS=1
MEMORY=512
BRIDGE=vmbr0
STORAGE=customer-disks
OS_TYPE=l26
DISK_IMAGE=/root/images/debian-12-genericcloud-amd64.qcow2
ID=$1
NAME=$2
ORGA=$3
usage() {
printf "usage: %s <id> <name> <organisation>\n" $0
}
[ $# != 3 ] && usage && exit 1
# create vm
qm create "${ID}" \
--cpu x86-64-v3 \
--cores ${CPUS} \
--memory ${MEMORY} \
--name "${NAME}" \
--net0 "bridge=${BRIDGE},model=virtio,tag=${ID}" \
--onboot 1 \
--ostype ${OS_TYPE} \
--pool "${ORGA}" \
--scsihw virtio-scsi-pci \
--sata0 media=cdrom,file=none \
--ide2 ${STORAGE}:cloudinit
# import debian cloud image
qm set ${ID} --virtio0 ${STORAGE}:0,import-from=${DISK_IMAGE}
# resize imported disk to 50GB
qm disk resize ${ID} virtio0 50G
# change boot device to debian cloud image
qm set ${ID} --boot order=virtio0

27
delete_organisation Executable file
View file

@ -0,0 +1,27 @@
#!/bin/sh
set -e
ORGA=$1
usage() {
printf "usage: %s <organisation_name>\n" $0
}
# check if we where correctly called
[ $# != 1 ] && usage && exit 1
# remove storage from storage pool
pveum pool modify "${ORGA}" --storage "${ORGA}-images" --delete
# remove proxmox storage object
pvesm remove "${ORGA}-images"
# nuke zfs filesystem with isos and backups
zfs destroy rpool/customer/${ORGA}-images
# delete resource pool for the organisation
pveum pool delete "${ORGA}"
# delete group
pveum group delete "${ORGA}"

13
delete_user Executable file
View file

@ -0,0 +1,13 @@
#!/bin/sh
set -e
USER=$1
usage() {
printf "usage: %s <username>\n"
}
[ $# != 1 ] && usage && exit 1
pveum user delete "${USER}"

13
delete_vm Executable file
View file

@ -0,0 +1,13 @@
#!/bin/sh
set -e
ID=$1
usage() {
printf "usage: %s <id>\n" $0
}
[ $# != 1 ] && usage && exit 1
qm destroy "${ID}"

11
get_linklocal_for_vm Executable file
View file

@ -0,0 +1,11 @@
#!/bin/sh
set -e
mac_to_ipv6_ll() {
IFS=':'; set $1; unset IFS
echo "fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6"
}
mac_to_ipv6_ll $(qm config $1 | grep -oE ..:..:..:..:..:.. | tr '[:upper:]' '[:lower:]')

27
manage_autostart Executable file
View file

@ -0,0 +1,27 @@
#!/bin/sh
set -e
PARAM=$1
qm list | tail -n +2 | awk '{ print $1 }' | while read vmid
do
case $PARAM in
0|1)
qm set $vmid --onboot $PARAM
;;
"")
qm config $vmid | grep -e onboot: -e name:
;;
*)
cat << EOF
usage: manage autostart behaviour of vms on node
$0 show autstart state for alle vms on node
$0 <0|1> disables or enables autostart for all vms on node
$0 help this help
EOF
exit 1
;;
esac
done