diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f8835d9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +opennebula-vm-etcd/config-and-secrets.conf + +*.pyc + +.idea +.vscode diff --git a/alpine-rebuild-initramfs.sh b/alpine-rebuild-initramfs.sh index 643cc3f..b56454b 100755 --- a/alpine-rebuild-initramfs.sh +++ b/alpine-rebuild-initramfs.sh @@ -3,8 +3,8 @@ set -e set -x -MAJOR_VERSION=3.10 -MINOR_VERSION=3 +MAJOR_VERSION=3.11 +MINOR_VERSION=2 IMAGE=alpine-minirootfs-$MAJOR_VERSION.$MINOR_VERSION-x86_64.tar.gz SSH_KEYS=$(cat ~/.ssh/id_rsa.pub) RESOLVCONF=/etc/resolv.conf diff --git a/build-alpine-chroot.sh b/build-alpine-chroot.sh old mode 100644 new mode 100755 diff --git a/ceph-osd-create-start-alpine b/ceph-osd-create-start-alpine new file mode 100755 index 0000000..a19e1da --- /dev/null +++ b/ceph-osd-create-start-alpine @@ -0,0 +1,107 @@ +#!/bin/sh +# 17:19, 2018-02-09 +# Nico Schottelius + +# Based on ceph-disk -v prepare --bluestore /dev/sdc --osd-id ${ID} --osd-uuid $(uuidgen) --crush-device-class "ssd" + +# Create: +# - block -> link to partuuid +# - block_uuid -e> uuid if the block +# - ceph_fsid -> get from ceph-conf +# crush_device_class -> ssd, hdd +# fsid -> uuidgen! +# magic -> string "ceph osd volume v026" +# type -> bluestore + +fsid=$(ceph-conf --cluster=ceph --name=osd. --lookup fsid) +fs_uuid=$(uuidgen) +magic="ceph osd volume v026" + +set -x +set -e + +if [ $# -lt 2 ]; then + echo "$0 disk class [osdweight]" + echo "class = hdd or ssd" + exit 1 +fi + +export DEV=$1;shift +export CLASS=$1; shift + + +uuid_metadata=$(uuidgen) +uuid_block=$(uuidgen) + +osd_id=$(ceph osd create) + +dev_metadata="/dev/disk/by-partuuid/$uuid_metadata" +dev_block="/dev/disk/by-partuuid/$uuid_block" + +/usr/bin/sgdisk --new=0:0:+100M --change-name="0:ceph data" \ + --partition-guid="0:$uuid_metadata" \ + --typecode=0:4fbd7e29-9d25-41b8-afd0-062c0ceff05d \ + --mbrtogpt -- $DEV +/sbin/udevadm settle --timeout=600 + +# Using gdisk --largest-new does not change the name or set guid; +# So use 2 steps instead +/usr/bin/sgdisk --largest-new=0 --mbrtogpt -- $DEV +/sbin/udevadm settle --timeout=600 + + +lastpart=$(gdisk -l $DEV | tail -n1 | awk '{ print $1 }') +/usr/bin/sgdisk --change-name="${lastpart}:ceph block" \ + --partition-guid="${lastpart}:$uuid_block" \ + --typecode="${lastpart}:cafecafe-9b03-4f30-b4c6-b4b80ceff106" \ + --mbrtogpt -- $DEV +/sbin/udevadm settle --timeout=600 + +#echo $1 +#echo $(blkid | grep $1"2") + +#cblock=$(blkid | grep $1"2" | cut -d'"' -f4) +#echo $cblock + +/sbin/mkfs -t xfs -f -i size=2048 -- "$dev_metadata" + +mountpath=/var/lib/ceph/osd/ceph-${osd_id} + +mkdir -p "$mountpath" +mount "$dev_metadata" "$mountpath" + +ln -s $dev_block "$mountpath/block" + +echo "$uuid_block" > "$mountpath/block_uuid" +echo "$fsid" > "$mountpath/ceph_fsid" +echo "$magic" > "$mountpath/magic" +echo "$CLASS" > "$mountpath/crush_device_class" +echo $(echo $dev_block | cut -c23-) > "$mountpath/fsid" + + +# Important, otherwise --mkfs later will try to create filestore +echo bluestore > "$mountpath/type" + +ceph auth get-or-create "osd.${osd_id}" osd \ + 'allow *' mon 'allow profile osd' > $mountpath/keyring + +echo ${osd_id} > "$mountpath/whoami" +touch "$mountpath/openrc" + +ceph-osd --cluster ceph -i "${osd_id}" --mkfs +chown -R ceph:ceph "$mountpath" + +if [ $# -eq 1 ]; then + WEIGHT=$1; shift +else + devname=$(readlink -f $dev_block) + nodev=$(echo $devname | sed 's,/dev/,,') + WEIGHT=$(lsblk -l -b | awk "/^$nodev/ { print \$4/(1024^4) }") +fi + +ceph osd crush add osd.${osd_id} ${WEIGHT} host=$(hostname) + +echo "$metadata_dev /var/lib/ceph/osd/ceph-${osd_id} xfs noatime 0 0" >> /etc/fstab + +# Starting with monit, if available +ceph-osd -i ${osd_id} diff --git a/create-guacamole-session-ldap-DB b/create-guacamole-session-ldap-DB new file mode 100644 index 0000000..ce1e5cd --- /dev/null +++ b/create-guacamole-session-ldap-DB @@ -0,0 +1,41 @@ +#!/bin/bash +#option $1 is vm_list file name +#option $2 id DB location +#option $3 is DB user +#option $4 is DB name + +#host='localhost' + +user_arr=( $(cat $1 | awk '{print $1}' )) +vmid_arr=( $(cat $1 | awk '{print $2}' )) +port_arr=( $(cat $1 | awk '{print $3}' )) +place_arr=( $(cat $1 | awk '{print $4}' )) + +for ((i=0; i<${#user_arr[@]}; i++)) do + #create user + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');" + en_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';") + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());" + + #create connection + cn=${user_arr[i]}${vmid_arr[i]} + echo $cn + if [ 0 -eq $(psql -h $2 -U $3 -d $4 -tAc "SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';" | wc -l) ]; then + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');" + cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;") + + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','${place_arr[i]}');" + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');" + + #connection permission + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');" + #clipboard-encoding + psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','clipboard-encoding','UTF-8');" + + else + cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;") + psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${place_arr[i]}' where connection_id='$cn_id' and parameter_name='hostname';" + psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';" + fi + +done \ No newline at end of file diff --git a/create-guacamole-session-ldap-file b/create-guacamole-session-ldap-file new file mode 100644 index 0000000..c11b4bc --- /dev/null +++ b/create-guacamole-session-ldap-file @@ -0,0 +1,38 @@ +#!/bin/bash +#option $1 is vm_list file name +#option $2 is DB name +#this script should be run on guacamole server + + +host='localhost' +user_arr=( $(cat $1 | awk '{print $1}' )) +vmid_arr=( $(cat $1 | awk '{print $2}' )) +port_arr=( $(cat $1 | awk '{print $3}' )) +place_arr=( $(cat $1 | awk '{print $4}' )) + +for ((i=0; i<${#user_arr[@]}; i++)) do + #create user + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');\"" + en_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';\"") + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());\"" + + #create connection + cn=${user_arr[i]}${vmid_arr[i]} + + if [ 0 -eq $(su - postgres -c "psql postgres -d $2 -tAc \"SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';\"" | wc -l) ]; then + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');\"" + cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"") + + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','$host');\"" + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');\"" + + #connection permission + su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');\"" + + else + cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"") + su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='$host' where connection_id='$cn_id' and parameter_name='hostname';\"" + su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';\"" + fi + +done \ No newline at end of file diff --git a/debian-devuan-netboot.sh b/debian-devuan-netboot.sh new file mode 100755 index 0000000..d64de5e --- /dev/null +++ b/debian-devuan-netboot.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# Nico Schottelius, 2019-12-09 +# the ugly code is llnu + +#this can only run in the ungleich-tools directory because of the cat magiccommand........ + +if [ $# -ne 2 ]; then + echo $0 suite out-directory + echo out-directory: into which directory to place resulting files + echo suite is for instance ascii, beowulf, etc + exit 1 +fi + +suite=$1; shift +outdir=$1; shift + +date=$(date +%F) + +basename=${suite}-${date} +dir=${outdir}/${basename} +kernel=${outdir}/kernel-${basename} +initramfs=${outdir}/initramfs-${basename} +keyurl=https://code.ungleich.ch/ungleich-public/__ungleich_staff_ssh_access/raw/master/files + +debootstrap "${suite}" "${dir}" + +# need non-free for firmware-bnx2 +echo "deb http://pkgmaster.devuan.org/merged ${suite} main contrib non-free" > ${dir}/etc/apt/sources.list + +chroot ${dir} apt update +chroot ${dir} apt install -y openssh-server rdnssd linux-image-amd64 firmware-bnx2 + + +cp ${dir}/boot/vmlinuz-* ${kernel} + +echo '* * * * * root ip -o -6 addr show | grep -E -v " lo |one" > /etc/issue' > ${dir}/etc/cron.d/ipv6addr + +mkdir -p ${dir}/root/.ssh + +for key in balazs dominique jinguk nico; do + curl -s ${keyurl}/${key}.pub >> ${dir}/root/.ssh/authorized_keys +done + +################################################################################ +# networking + +# always lo +cat > ${dir}/etc/network/interfaces << EOF +auto lo +iface lo inet loopback + +source-directory /etc/network/interfaces.d +EOF + +# find the boot interfaces at boot +cat > ${dir}/etc/rc.local < /etc/network/interfaces.d/bootinterface << eof +auto \$dev +iface \$dev inet6 auto +eof + +ifup "\${dev}" + +exit 0 +EOF + +chmod a+rx ${dir}/etc/rc.local" + +# ensure there is /init in the initramfs -> otherwise there is a kernel panic +# reason: initramfs is designed to be PRE regular os, so /init usually hands over to /sbin/init +# in our case, they are just the same +ln -s /sbin/init ${dir}/init + +# Finally building the initramfs +( cd ${dir} ; find . | cpio -H newc -o | gzip -9 > ${initramfs} ) diff --git a/detect-dns64-prefix.py b/detect-dns64-prefix.py new file mode 100644 index 0000000..1179ca4 --- /dev/null +++ b/detect-dns64-prefix.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# Nico Schottelius, 2020-01-07 +# Detect the DNS64 prefix +# Based on https://tools.ietf.org/html/draft-ietf-behave-nat64-discovery-heuristic-05 +# +# How it works: +# - ipv4only.arpa only has A records. +# - a DNS64 server will add AAAA records +# - we take this response (if any) and derive the IPv6 prefix from it +# + +import dns.resolver +import ipaddress + + +if __name__ == '__main__': + dns64_prefix = None + answers = dns.resolver.query('ipv4only.arpa', 'AAAA') + + for rdata in answers: + address = str(rdata) + network = ipaddress.IPv6Network("{}/96".format(address), + strict=False) + # print("{}: {}".format(rdata, network)) + print("{}".format(network)) diff --git a/devuan-netboot.sh b/devuan-netboot.sh deleted file mode 100644 index 6e7f39c..0000000 --- a/devuan-netboot.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -date=$(date +%F) -suite=ascii - -dir=${suit}-${date} - -debootstrap ${suite} diff --git a/etcd_import_opennebula_vm.py b/etcd_import_opennebula_vm.py new file mode 100644 index 0000000..d2c94c9 --- /dev/null +++ b/etcd_import_opennebula_vm.py @@ -0,0 +1,28 @@ +import json +import pprint +#import etcd3 + +with open("nico-vm-one.json", "r") as fd: + vmcontent = fd.read() + +#vm = json.loads(vmcontent.decode('utf-8')) +vm = json.loads(vmcontent) +pprint.pprint(vm['TEMPLATE']['DISK']) + +# storing info + +for_etcd={} +for_etcd['data_version'] = "1" +for_etcd['vm_id'] = vm['ID'] +for_etcd['owner'] = vm['UNAME'] + +for_etcd['disks'] = [] +for disk in vm['TEMPLATE']['DISK']: + disk_etcd = {} + disk_etcd['image_name'] = disk['IMAGE'] + disk_etcd['image_id'] = disk['IMAGE_ID'] + disk_etcd['datastore_name'] = disk['DATASTORE'] + disk_etcd['datastore_id'] = disk['DATASTORE_ID'] + for_etcd['disks'].append(disk_etcd) + +pprint.pprint(for_etcd) diff --git a/ldap-get-emails b/ldap-get-emails new file mode 100755 index 0000000..733811a --- /dev/null +++ b/ldap-get-emails @@ -0,0 +1,31 @@ +#!/bin/sh +# +# List mail addresses found under base DN $1 (defaults to dc=ungleich,dc=ch) + +set -e + +# Hardcoded parameters. +LDAP_SERVER="ldaps://ldap1.ungleich.ch" +LDAP_BIND_DN="cn=manager,dc=ungleich,dc=ch" + +if [ "$1" != "" ]; then + LDAP_SEARCH_BASE="$1" +else + LDAP_SEARCH_BASE="dc=ungleich,dc=ch" +fi + +# Read secrets from environment. +if [ "$LDAP_BIND_PASSWD" = "" ]; then + echo "You have to define LDAP_BIND_PASSWD before launching this script." >&2 + exit 1 +fi + +# Extract mail addresses from LDAP directory. +ldap_search_result="$( + ldapsearch -x -H "$LDAP_SERVER" \ + -D "$LDAP_BIND_DN" \ + -w "$LDAP_BIND_PASSWD" \ + -b "$LDAP_SEARCH_BASE" mail + )" + +echo "$ldap_search_result" | grep 'mail:' | cut -d ' ' -f 2 - diff --git a/magiccommand b/magiccommand new file mode 100755 index 0000000..e724d8e --- /dev/null +++ b/magiccommand @@ -0,0 +1,2 @@ +* * * * * root ip -o -6 addr show | grep -E -v "lo |one" | awk '{print $1" " $2": "$4}' >> /dev/tty1 + diff --git a/one-get-instances b/one-get-instances new file mode 100755 index 0000000..653fed6 --- /dev/null +++ b/one-get-instances @@ -0,0 +1,18 @@ +#!/bin/sh +# +# This script extract VM IDs and filter them if a pattern is provided as first +# argument. + +set -e + +# Extract instances from ONE. +instances=$(onevm list --csv | tail -n +2) + +# Filter them is a pattern has been provided. +if [ "$1" != "" ]; then + filtered_instances="$(echo "$instances" | grep -E "$1")" + instances="$filtered_instances" +fi + +# Outputs instance IDs. +echo "$instances" | cut -d ',' -f 1 - diff --git a/one-inspect-instance-network b/one-inspect-instance-network new file mode 100755 index 0000000..70e5795 --- /dev/null +++ b/one-inspect-instance-network @@ -0,0 +1,18 @@ +#!/bin/sh +# +# This script is expected to run on the ONE server (i.e. +# opennebula.ungleich.ch). + +set -e + +# Fetch instance list from STDIN. +instances=$(cat -) + +# For every instance, extract relevant information: +for id in $instances; do + nics_raw="$(onevm show --xml $id | xml_grep 'NIC')" + networks="$(echo $nics_raw | xml_grep --text_only 'NETWORK' | tr '\n' ',' | sed 's/,$//')" + ip="$(echo $nics_raw | xml_grep --text_only 'IP' | tr '\n' ',' | sed 's/,$//')" + ip6="$(echo $nics_raw | xml_grep --text_only 'IP6_GLOBAL' | tr '\n' ',' | sed 's/,$//')" + echo "$id,$networks,$ip,$ip6" +done diff --git a/opennebula-images/FreeBSD-build-opennebula-image-generic.sh b/opennebula-images/FreeBSD-build-opennebula-image-generic.sh new file mode 100644 index 0000000..d251f56 --- /dev/null +++ b/opennebula-images/FreeBSD-build-opennebula-image-generic.sh @@ -0,0 +1,243 @@ +#!/bin/sh +# +# Copyright 2020 -- Evilham +# This is BSD licensed as it's based on BSD-licensed code +# +# We could have used e.g. something like: +# - https://git.sr.ht/~sircmpwn/builds.sr.ht/tree/master/images/freebsd/genimg +# +# But we actually do want to compile the kernel, so that the IPv6-only images +# are different and don't support INET. + +# Explode if something goes wrong +set -e + +# What are we building? +# These are the only configuration options. +# They default to current environment. +# RELEASE: should be 'CURRENT' for current or 'X.Y' Defaults to 'CURRENT'. +# ARCH: probably amd64 for DCL +# VMFORMATS: defaults to qcow2, can also be raw. See man mkimg. +# OPENNEBULA_CONTEXT_VERSION: For DCL's OpenNebula that'd be 5.10.0 (default) +# OPENNEBULA_CONTEXT_REVISION: Defaults to 1. +RELEASE=${RELEASE:-CURRENT} +if [ "${RELEASE}" == "CURRENT" ]; then + SRCBRANCH="master" +else + SRCBRANCH="releng/${RELEASE}" +fi +ARCH=${ARCH:-amd64} +VMFORMATS=${VMFORMATS:-qcow2} +OPENNEBULA_CONTEXT_VERSION=${OPENNEBULA_CONTEXT_VERSION:-5.10.0} +OPENNEBULA_CONTEXT_REVISION=${OPENNEBULA_CONTEXT_REVISION:-1} + +# Didn't see a need to make these configurable. +CHROOTDIR="/scratch" +SRCDIR="${CHROOTDIR}/usr/src" +OUR_DIR="$(realpath $(dirname "${0}"))" +OUR_SRCCONF="${SRCDIR}/release/src.conf" +OUR_RELEASE_CONF="${SRCDIR}/release/release.conf" +# Shorthand for the package file name. +OPENNEBULA_CONTEXT="one-context-${OPENNEBULA_CONTEXT_VERSION}_${OPENNEBULA_CONTEXT_REVISION}.txz" + +setup_sources() { + # Let's use git, we might need to install it + if ! which git 2>&1 > /dev/null; then + pkg install -y git + fi + + if [ ! -d "$(dirname ${SRCDIR})" ]; then + mkdir -p "$(dirname ${SRCDIR})" + fi + + # Checkout needed branch + if [ ! -d "${SRCDIR}" ]; then + git clone "https://github.com/freebsd/freebsd" \ + --branch "${SRCBRANCH}" "${SRCDIR}" + else + GIT_CMD="git -C ${SRCDIR}" + ${GIT_CMD} clean -df + ${GIT_CMD} reset --hard + ${GIT_CMD} fetch + ${GIT_CMD} checkout "${SRCBRANCH}" + ${GIT_CMD} pull + fi + + # Add settings for IPv6-only kernel + cat > "${SRCDIR}/sys/${ARCH}/conf/GENERIC-IPV6ONLY" << EOF +include GENERIC +ident GENERIC-IPV6ONLY +makeoptions MKMODULESENV+="WITHOUT_INET_SUPPORT=" +nooptions INET +nodevice gre +EOF + # Fix vmimage.subr to install custom package and fix other things + cat >> "${SRCDIR}/release/tools/vmimage.subr" << EOF +vm_extra_install_ports() { + # Make sure we install the opennbula context package + cp "/${OPENNEBULA_CONTEXT}" "\${DESTDIR}/tmp/${OPENNEBULA_CONTEXT}" + chroot \${DESTDIR} \${EMULATOR} env ASSUME_ALWAYS_YES=yes \\ + /usr/sbin/pkg add '/tmp/${OPENNEBULA_CONTEXT}' + + # Now make sure the system has better defaults + cat >> "\${DESTDIR}/etc/rc.conf" << eof +# Update to latest patch on first boot +firstboot_freebsd_update_enable="YES" +# Enable OpenNebula's service. +one_context_enable="YES" +# Enable SSH for customers +sshd_enable="YES" +# Clear tmp on boot +clear_tmp_enable="YES" +# Disable sendmail by default +sendmail_enable="NONE" +# Disable crash dumps +dumpdev="NO" +eof + # Enable root access with SSH key. + # It is user's responsibility to further secure their system. + sed -i '' -E \ + 's/(^#[ ]*|^)PermitRootLogin .*/PermitRootLogin without-password/' \ + "\${DESTDIR}/etc/ssh/sshd_config" +} +EOF + # Skip building iso images + rm "${SRCDIR}/release/${ARCH}/mkisoimages.sh" + # This is a hack to not build the memstick + cat > "${SRCDIR}/release/${ARCH}/make-memstick.sh" < \${CHROOTDIR}/etc/src-env.conf +} + +## Set the directory within which the release will be built. +CHROOTDIR="${CHROOTDIR}" + +## Set to override the default target architecture and kernel +TARGET="${ARCH}" +TARGET_ARCH="${ARCH}" +KERNEL="${KERNEL_CONFIG}" + +## Set to specify a custom make.conf and/or src.conf +SRC_CONF="${OUR_SRCCONF}" + +# Since these are VMs, users should add other components if they want to. +NODOC=YES +NOPORTS=YES +NOSRC=YES + +# We manage sources manually +SRC_UPDATE_SKIP=YES + +## Set to pass additional flags to make(1) for the build chroot setup, such +## as TARGET/TARGET_ARCH. +# This was necessary for "cross-compiling" +CHROOT_MAKEENV="MK_LLVM_TARGET_X86=yes" + +WITH_VMIMAGES=YES + +# VM image size, see man 1 truncate +VMSIZE="10G" + +# List of disk image formats, see man mkgimg. +VMFORMATS="${VMFORMATS}" + +# These variables have to be exported because they are needed in subprocesses. +export NOSWAP=YES +# Custom ports +# - firstboot-freebsd-update helps us not have to create an image for each +# patch level. We still will have to do it for each minor version update. +# - bash is apparently needed for one-context +export VM_EXTRA_PACKAGES="firstboot-freebsd-update bash" +EOF +} + +_do_run_release() { + . "${SRCDIR}/release/release.sh" +} +run_release() { + _do_run_release -c "${OUR_RELEASE_CONF}" +} + + +build_image() { + # Generate configuration + echo "${2}" > "${OUR_SRCCONF}" + KERNEL_CONFIG="${1}" + gen_releaseconf > "${OUR_RELEASE_CONF}" + # Be paranoid about files and stuff + sync + # Continue with the release script + run_release + # Be paranoid about files and stuff + sync + + mv "${CHROOTDIR}/R/vmimages" "${OUR_DIR}/FreeBSD-${RELEASE}-${1}" + + # Be paranoid about files and stuff + sync +} + +our_main() { + case "$1" in + --dualstack) + BUILD_DUALSTACK=yes + ;; + --ipv6only) + BUILD_IPV6ONLY=yes + ;; + *) + cat << EOF +Run with --dualstack or --ipv6only depending on the image you want. +EOF + exit 1 + ;; + esac + setup_sources + setup_our_env + # Fetch OpenNebula's context package + fetch "https://github.com/OpenNebula/addon-context-linux/releases/download/v${OPENNEBULA_CONTEXT_VERSION}/${OPENNEBULA_CONTEXT}" \ + -o "${CHROOTDIR}/${OPENNEBULA_CONTEXT}" + # Do run + if [ -n "${BUILD_DUALSTACK}" ]; then + build_image "GENERIC" + fi + if [ -n "${BUILD_IPV6ONLY}" ]; then + build_image "GENERIC-IPV6ONLY" "$(cat << EOF +WITHOUT_INET=yes +WITHOUT_INET_SUPPORT=yes +EOF +)" + fi + + cat << EOF + +*************** DONE *************** +You will find the images under "${OUR_DIR}". +************************************ +EOF +} + +our_main "${@}" diff --git a/opennebula-images/FreeBSD-build-opennebula-image.sh b/opennebula-images/FreeBSD-build-opennebula-image.sh new file mode 100755 index 0000000..c72a2b0 --- /dev/null +++ b/opennebula-images/FreeBSD-build-opennebula-image.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# +# Copyright 2020 -- Evilham +# This is BSD licensed as it's based on BSD-licensed code +# +# +# This builds all needed FreeBSD images for ungleich's Data Center Light +# When there are new releases, they should be updated here and the script +# should run. +# 11.4 is scheduled end of June 2020 +# 12.2 is scheduled end of October 2020 +# + +SUPPORTED_RELEASES="11.3 12.1" + +# This should run in a DCL VM with an OK amount of cores (4/8 minimum), +# 4G RAM, and storage of roughly 20G + 5G * #resulting_images. +# +# This is because there is the base system, a 'pristine chroot', and during the +# build there can be 2 copies of the resulting system written to the system. +# Since there are 4 combinations of images: +# {STABLE,RELEASE} x {dualstack, IPv6ONLY} +# +# That means we'll need to assign about 40G storage to be on the safe side. + +for release in ${SUPPORTED_RELEASES}; do + for build in dualstack ipv6only; do + env RELEASE=${release} sh FreeBSD-build-opennebula-image-generic.sh --${build} \ + | tee "FreeBSD-${release}-${build}.log" + done +done diff --git a/opennebula-images/alpine-build-opennebula-image.sh b/opennebula-images/alpine-build-opennebula-image.sh new file mode 100755 index 0000000..0a074b4 --- /dev/null +++ b/opennebula-images/alpine-build-opennebula-image.sh @@ -0,0 +1,179 @@ +#!/bin/sh + +# This script generates Alpine images for OpenNebula. +# +# Test image locally (without network) with: +# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2 + +set -e +set -x + +# XXX: Handle command-line arguments? +RELEASE=v3.11 +ARCH=x86_64 +IMAGE_PATH=alpine-$RELEASE-$(date -I).img.qcow2 +IMAGE_SIZE=10G +NBD_DEVICE=/dev/nbd0 +APK_MIRROR=http://dl-2.alpinelinux.org/alpine/ # Mind the trailing / + +ONE_CONTEXT_APK_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-r1.apk" +ONE_CONTEXT_APK_PATH=/root/one-context.apk + +cleanup() { + # The order here is important. + umount /mnt/dev/pts 2>/dev/null || true + umount /mnt/dev/shm 2>/dev/null || true + umount /mnt/dev 2>/dev/null || true + umount /mnt/proc 2>/dev/null || true + umount /mnt/run 2>/dev/null || true + umount /mnt/sys 2>/dev/null || true + umount /mnt/boot 2>/dev/null || true + umount /mnt 2>/dev/null || true + qemu-nbd --disconnect "$NBD_DEVICE" || true +} + +run_root() { + chroot /mnt /usr/bin/env \ + PATH=/sbin:/usr/sbin:/bin:/usr/bin \ + sh -c "$*" +} + +if [ "$(whoami)" != 'root' ]; then + echo "This script must be run as root." >&2 + exit 1 +fi + +if [ "$(lsb_release --short --id)" != "Alpine" ]; then + echo "WARNING: this script has been designed to run on an Alpine system." >&2 + echo "WARNING: Not running Alpine. Giving you 5 seconds to abort." >&2 + sleep 5 +fi + +# Create base QCOW2 image. +qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE" +modprobe nbd max_part=16 +qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH" + +# Wait for qemu-nbd to settle. +sleep 1 + +# Don't forget to cleanup, even if the script crash. +trap cleanup EXIT + +# Create partition table, format partitions. +sfdisk --no-reread "$NBD_DEVICE" < /mnt/etc/hosts << EOF +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +EOF + +# Configure package sources and update package index. +run_root setup-timezone -z UTC +if [ "$RELEASE" = "edge" ] +then + cat >/mnt/etc/apk/repositories </mnt/etc/apk/repositories <>/mnt/etc/fstab </mnt/boot/extlinux.conf < "/mnt$ONE_CONTEXT_APK_PATH" +run_root apk add --allow-untrusted "$ONE_CONTEXT_APK_PATH" +run_root rm "$ONE_CONTEXT_APK_PATH" + +# Remove resolvconf: handled by uncloud-init. +run_root rm /etc/resolv.conf + +# Make sure everything is written to disk before exiting. +sync diff --git a/opennebula-images/centos-build-opennebula-image.sh b/opennebula-images/centos-build-opennebula-image.sh new file mode 100755 index 0000000..6a8fe31 --- /dev/null +++ b/opennebula-images/centos-build-opennebula-image.sh @@ -0,0 +1,170 @@ +#!/bin/sh + +# This script generates CentOS images for OpenNebula. + +# Depends on the following packages (as of CentOS 8): +# qemu-img util-linux coreutils dnf curl e2fsprogs + +# Run locally (without network) with: +# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2 + +set -e +set -x + +# XXX: Handle command-line arguments? +RELEASE=8 +ARCH=x86_64 +IMAGE_PATH=centos-$RELEASE-$(date --iso-8601).img +IMAGE_SIZE=10G +LOOPBACK_DEVICE=/dev/loop0 + +# TODO: find the package definition and built ourself, publish in some RPM repository. +ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el8.noarch.rpm" +ONE_CONTEXT_RPM_PATH=/root/one-context.rpm + +cleanup() { + # The order here is important. + umount /mnt/dev/pts 2>/dev/null || true + umount /mnt/dev/shm 2>/dev/null || true + umount /mnt/dev 2>/dev/null || true + umount /mnt/proc 2>/dev/null || true + umount /mnt/run 2>/dev/null || true + umount /mnt/sys 2>/dev/null || true + umount /mnt/boot 2>/dev/null || true + umount /mnt 2>/dev/null || true + losetup -d "$LOOPBACK_DEVICE" +} + +run_root() { + chroot /mnt /usr/bin/env \ + PATH=/sbin:/usr/sbin:/bin:/usr/bin \ + sh -c "$*" +} + +if [ "$(whoami)" != 'root' ]; then + echo "This script must be run as root." >&2 + exit 1 +fi + +if [ ! -f '/etc/centos-release' ]; then + echo "WARNING: this script has been designed to run on a CentOS system." >&2 + echo "WARNING: Not running CentOS. Giving you 5 seconds to abort." >&2 + sleep 5 +fi + +# Create base RAW image (no LOOPBACK support in RHEL/CentOS). +qemu-img create -f raw "$IMAGE_PATH" "$IMAGE_SIZE" +losetup "$LOOPBACK_DEVICE" "$IMAGE_PATH" + +# Don't forget to cleanup, even if the script crash. +trap cleanup EXIT + +# Create partition table, format partitions. +{ +sfdisk --no-reread "$LOOPBACK_DEVICE" < /mnt/etc/hosts << EOF +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +EOF + +# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details. +# network-scripts.x86_64 : Legacy scripts for manipulating of network devices +run_root dnf -y install network-scripts + +# Install (magic?) one-context RPM and hope things works as expected. +curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH" +run_root dnf -y install "$ONE_CONTEXT_RPM_PATH" +run_root rm "$ONE_CONTEXT_RPM_PATH" + +# Install resize2fs, which is required to resize the root file-system. +run_root dnf -y install e2fsprogs + +# Initalize base services. +run_root systemd-machine-id-setup +run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime + +# Install and configure NTP client. +run_root dnf install -y chrony +run_root systemctl enable chronyd.service + +# Install kernel and bootloader. +# Note: linux-firmware is not required our environment and takes almost 200M +# uncompressed but is a direct dependency of kernel-core... +run_root dnf -y install kernel grub2 + +# Add support for virtio block devices at boot time. +cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <>/mnt/etc/fstab </dev/null || true + umount /mnt/dev/shm 2>/dev/null || true + umount /mnt/dev 2>/dev/null || true + umount /mnt/proc 2>/dev/null || true + umount /mnt/run 2>/dev/null || true + umount /mnt/sys 2>/dev/null || true + umount /mnt/boot 2>/dev/null || true + umount /mnt 2>/dev/null || true + qemu-nbd --disconnect "$NBD_DEVICE" || true +} + +run_root() { + chroot /mnt /usr/bin/env \ + PATH=/sbin:/usr/sbin:/bin:/usr/bin \ + sh -c "$*" +} + +if [ "$(whoami)" != 'root' ]; then + echo "This script must be run as root." >&2 + exit 1 +fi + +if [ $(lsb_release --short --id) != "Ubuntu" ]; then + echo "WARNING: this script has been designed to run on an Ubuntu system." >&2 + echo "WARNING: Not running Ubuntu. Giving you 5 seconds to abort." >&2 + sleep 5 +fi + +# Create base QCOW2 image. +qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE" +modprobe nbd max_part=16 +qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH" + +# Wait for qemu-nbd to settle. +sleep 1 + +# Don't forget to cleanup, even if the script crash. +trap cleanup EXIT + +# Create partition table, format partitions. +sfdisk --no-reread "$NBD_DEVICE" < /mnt/etc/hosts << EOF +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +EOF + +# Configure package sources and update package index. +cat >/mnt/etc/apt/sources.list < "/mnt$ONE_CONTEXT_DEB_PATH" +run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH" +run_root rm "$ONE_CONTEXT_DEB_PATH" + +# Manually install legacy network scripts used by one-context. +run_root apt-get -y install ifupdown + +# Initalize base services. +run_root systemd-machine-id-setup + +run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime +run_root systemctl enable systemd-timesyncd.service + +# Install kernel and bootloader. Do not autoconfigure grub. +run_root 'echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections' +run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-image-amd64 grub-pc + +# Configure grub. +run_root grub-install --target=i386-pc "${NBD_DEVICE}" +run_root grub-mkconfig -o /boot/grub/grub.cfg + +# Install en configure SSH daemon. +run_root apt-get -y install openssh-server + +# Install haveged due to lack of entropy in ONE environment. +run_root apt-get -y install haveged +run_root systemctl enable haveged.service + +# Generate fstab file. +boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1") +root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2") +cat >>/mnt/etc/fstab <> /mnt/etc/resolv.conf +cp /etc/resolv.conf /mnt/etc/resolv.conf + +# Initialize /etc/hosts. +cat > /mnt/etc/hosts << EOF +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +EOF # See https://github.com/OpenNebula/addon-context-linux/issues/121 for details. # network-scripts.x86_64 : Legacy scripts for manipulating of network devices @@ -119,15 +123,32 @@ curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH" run_root dnf -y install "$ONE_CONTEXT_RPM_PATH" run_root rm "$ONE_CONTEXT_RPM_PATH" +# Install resize2fs, which is required to resize the root file-system. +run_root dnf -y install e2fsprogs + # Initalize base services. run_root systemd-machine-id-setup -run_root systemctl enable systemd-networkd.service run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime run_root systemctl enable systemd-timesyncd.service +# Install haveged due to lack of entropy in ONE environment. +run_root dnf -y install haveged +run_root systemctl enable haveged.service + # Install kernel and bootloader. +# Note: linux-firmware is not required our environment and takes almost 200M +# uncompressed but is a direct dependency of kernel-core... run_root dnf -y install kernel grub2 + +# Add support for virtio block devices at boot time. +cat > /mnt/etc/dracut.conf.d/virtio-blk.conf </dev/null || true + umount /mnt/dev/shm 2>/dev/null || true + umount /mnt/dev 2>/dev/null || true + umount /mnt/proc 2>/dev/null || true + umount /mnt/run 2>/dev/null || true + umount /mnt/sys 2>/dev/null || true + umount /mnt/boot 2>/dev/null || true + umount /mnt 2>/dev/null || true + qemu-nbd --disconnect "$NBD_DEVICE" || true +} + +run_root() { + chroot /mnt /usr/bin/env \ + PATH=/sbin:/usr/sbin:/bin:/usr/bin \ + sh -c "$*" +} + +if [ "$(whoami)" != 'root' ]; then + echo "This script must be run as root." >&2 + exit 1 +fi + +if [ $(lsb_release --short --id) != "Ubuntu" ]; then + echo "WARNING: this script has been designed to run on an Ubuntu system." >&2 + echo "WARNING: Not running Ubuntu. Giving you 5 seconds to abort." >&2 + sleep 5 +fi + +# Create base QCOW2 image. +qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE" +modprobe nbd max_part=16 +qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH" + +# Wait for qemu-nbd to settle. +sleep 1 + +# Don't forget to cleanup, even if the script crash. +trap cleanup EXIT + +# Create partition table, format partitions. +sfdisk --no-reread "$NBD_DEVICE" < /mnt/etc/hosts << EOF +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +EOF + +# Configure package sources and update package index. +cat >/mnt/etc/apt/sources.list < "/mnt$ONE_CONTEXT_DEB_PATH" +run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH" +run_root rm "$ONE_CONTEXT_DEB_PATH" + +# Manually install legacy network scripts used by one-context. +run_root apt-get -y install ifupdown + +# Initalize base services. +run_root systemd-machine-id-setup + +run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime +run_root systemctl enable systemd-timesyncd.service + +# Install kernel and bootloader. Do not autoconfigure grub. +run_root echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections +run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-base linux-image-generic grub-pc + +# Configure grub. +run_root grub-install --target=i386-pc "${NBD_DEVICE}" +run_root grub-mkconfig -o /boot/grub/grub.cfg + +# Install en configure SSH daemon. +run_root apt-get -y install openssh-server + +# Generate fstab file. +boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1") +root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2") +cat >>/mnt/etc/fstab <> ~/vm_vnc_list + done +done \ No newline at end of file diff --git a/vm_list_dual_uid b/vm_list_dual_uid new file mode 100644 index 0000000..bf21c0b --- /dev/null +++ b/vm_list_dual_uid @@ -0,0 +1,20 @@ +#!/bin/bash -e +#option $1 is ldap password +#option $2 is ou + + +uid_list=( $(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "ou=$2,dc=ungleich,dc=ch" | grep uid: | awk '{print $2}') ) + +for ((i=0; i<${#uid_list[@]}; i++)) do + uid_temp=$(echo ${uid_list[i]} | sed "s/b'//g" | sed "s/'//g") + list_email[$i]=$(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "uid=${uid_list[$i]},ou=$2,dc=ungleich,dc=ch" | grep mail: | awk '{print $2}' ) + list_vmid=() + list_vmid=( $(onevm list | grep ${list_email[$i]} | grep runn | awk '{print $1}' ) ) + for ((j=0; j<${#list_vmid[@]}; j++)) do + temp=$(onevm show ${list_vmid[$j]} | grep PORT) + temp1="${temp#*\"}" + port="${temp1%%\"*}" + host=$(onevm show ${list_vmid[$j]} | grep HOST | grep ungleich | awk '{print $3}') + echo $uid_temp ${list_vmid[$j]} $port $host >> ~/vm_vnc_list + done +done diff --git a/vm_map.sh b/vm_map.sh new file mode 100755 index 0000000..15c80dc --- /dev/null +++ b/vm_map.sh @@ -0,0 +1,5 @@ +vm_list=( $(virsh list | awk '{print $2}') ) + +for ((i=0; i<${#vm_list[@]}; i++)) do + ceph osd map hdd ${vm_list[i]} +done diff --git a/vnc_console_connection/.gitkeep b/vnc_console_connection/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/vnc_console_connection/config.py b/vnc_console_connection/config.py new file mode 100755 index 0000000..7b7acc7 --- /dev/null +++ b/vnc_console_connection/config.py @@ -0,0 +1,5 @@ +import configparser + +config = configparser.ConfigParser(allow_no_value=True) +config.read('/opt/ungleich-tools/vnc_console_connection/config-and-secrets.conf') + diff --git a/vnc_console_connection/db_export.py b/vnc_console_connection/db_export.py new file mode 100755 index 0000000..d283eb4 --- /dev/null +++ b/vnc_console_connection/db_export.py @@ -0,0 +1,55 @@ +import psycopg2 as pg2 +from config import config + +db_name = config['db']['db_name'] +db_user = config['db']['db_user'] +db_password = config['db']['db_password'] +db_port = config['db']['db_port'] + + +def setconn(u_id, vm_num, vm_port,vm_host): + conn = pg2.connect("host = localhost dbname={} user={} password={} port={}".format(db_name,db_user,db_password,db_port)) + conn.autocommit = True + cur = conn.cursor() + cur.execute("SELECT entity_id FROM guacamole_entity WHERE name = '{}'".format(u_id)) + row = cur.fetchone() + if row == None: + cur.execute("INSERT INTO guacamole_entity (name, type) VALUES ('{}','USER')".format(u_id)) + cur.execute("SELECT entity_id FROM guacamole_entity WHERE name = '{}'".format(u_id)) + row = cur.fetchone() + en_id = row[0] + cur.execute("INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('{}', '\x74657374', now())".format(en_id)) + print("create user : " , u_id) + else: + en_id = row[0] + cur.execute("SELECT password_hash FROM guacamole_user WHERE entity_id = '{}'".format(en_id)) + row = cur.fetchone() + if row == None: + cur.execute("INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('{}', '\x74657374', now())".format(en_id)) + print("user exsit") + cn = "{}{}".format(u_id,vm_num) + cur.execute("SELECT connection_id FROM guacamole_connection WHERE connection_name = '{}'".format(cn)) + row = cur.fetchone() + if row == None: + #create connection + cur.execute("INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('{}', 'vnc')".format(cn)) + cur.execute("SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '{}' AND parent_id IS NULL".format(cn)) + temp_cn_id = cur.fetchone() + cn_id = temp_cn_id[0] + cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','hostname','{}')".format(cn_id, vm_host)) + cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','port','{}')".format(cn_id,vm_port)) + #connection permission + cur.execute("INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('{}', '{}', 'READ')".format(en_id,cn_id)) + #clipboard-encoding + cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','clipboard-encoding','UTF-8')".format(cn_id)) + print("create connection") + else: + cur.execute("SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '{}' AND parent_id IS NULL".format(cn)) + temp_cn_id = cur.fetchone() + cn_id = temp_cn_id[0] + cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='{}' where connection_id='{}' and parameter_name='hostname'".format(vm_host,cn_id)) + cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='{}' where connection_id='{}' and parameter_name='port'".format(vm_port,cn_id)) + #cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='UTF-8' where connection_id='{}' and parameter_name='clipboard-encoding'".format(cn_id)) + print("no connection") + conn.close() + return None \ No newline at end of file diff --git a/vnc_console_connection/get_info.py b/vnc_console_connection/get_info.py new file mode 100755 index 0000000..e98ae72 --- /dev/null +++ b/vnc_console_connection/get_info.py @@ -0,0 +1,88 @@ +import json + +from enum import IntEnum +from xmlrpc.client import ServerProxy as RPCClient +from xmltodict import parse +from config import config +from ldap_list import vm_list +from db_export import setconn + +# Constants +ALL_VM_STATES = -1 +START_ID = -1 # First id whatever it is +END_ID = -1 # Last id whatever it is +session_string = config['oca']['client_secrets'] +opnserver = config['oca']['opn_server'] + +class VMState(IntEnum): + INIT = 0 + PENDING = 1 + HOLD = 2 + ACTIVE = 3 + STOPPED = 4 + SUSPENDED = 5 + DONE = 6 + FAILED = 7 + POWEROFF = 8 + UNDEPLOYED = 9 + CLONING = 10 + CLONING_FAILURE = 11 + + +class VmFilterFlag(IntEnum): + UIDUserResources = 0 # UID User’s Resources + UserAndItsGroupsResources = -1 # Resources belonging to the user and any of his groups + AllResources = -2 # All resources + UserResources = -3 # Resources belonging to the user + UserPrimaryGroupResources = -4 # Resources belonging to the user’s primary group + + +class VM: + def __init__(self, vm: dict): + self.id = vm.get('ID', None) + self.owner = { + 'id': vm.get('UID', None), + 'name': vm.get('UNAME', None), + 'gname': vm.get('GNAME', None) + } + self.name = vm.get('NAME', None) + self.status = vm.get('STATE', None) + if self.status: + self.status = VMState(int(self.status)).name.lower() + + template = vm['TEMPLATE'] + + self.graphics = template.get('GRAPHICS', {}) + self.memory = template.get('MEMORY', None) + self.vcpu = template.get('VCPU', None) + self.host = { + 'name': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None), + 'id': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HID', None), + } + + +def main(): + with RPCClient(opnserver) as rpc_client: + success, response, *_ = rpc_client.one.vmpool.infoextended( + session_string , VmFilterFlag.AllResources.value, START_ID, END_ID, VMState.ACTIVE.value + ) + if success: + vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM'] + for entry in vm_list.entries: + temp_uname = entry.uid + for i, vm in enumerate(vms): + vm_user = vm['UNAME'] + vm_id = vm['ID'] + vm_port = vm['TEMPLATE']['GRAPHICS'].get('PORT') + vm_host = vm['HISTORY_RECORDS']['HISTORY']['HOSTNAME'] + if vm['UNAME'] == temp_uname: + #print(entry.uid, vm_id, vm_port, vm_host) + setconn(entry.uid, vm_id, vm_port, vm_host) + + else: + print(response) + + +if __name__ == "__main__": + main() + diff --git a/vnc_console_connection/ldap_list.py b/vnc_console_connection/ldap_list.py new file mode 100755 index 0000000..a9e322f --- /dev/null +++ b/vnc_console_connection/ldap_list.py @@ -0,0 +1,30 @@ +import ldap3 +import sys +from config import config +from ldap3 import Server, Connection, ObjectDef, Reader, ALL, SUBTREE, ALL_ATTRIBUTES +from ldap3.core import exceptions + + +LDAP_SERVER = config['ldap']['server'] +LDAP_PASSWORD = config['ldap']['admin_password'] +LDAP_USER = config['ldap']['admin_dn'] +LDAP_PORT = int(config['ldap']['ldap_port']) + +# Create the Server object with the given address. +server = Server(LDAP_SERVER, LDAP_PORT, get_info=ALL) +#Create a connection object, and bind with the given DN and password. +try: + conn = Connection(server, LDAP_USER, LDAP_PASSWORD, auto_bind=True) + print('LDAP Bind Successful.') + # Perform a search for a pre-defined criteria. + # Mention the search filter / filter type and attributes. + conn.search('ou=customer,dc=ungleich,dc=ch', '(&(!({}={})))'.format('mail','*@ungleich.ch') , attributes=['uid','mail']) + #conn.search('ou=customer,dc=ungleich,dc=ch', '(objectClass=*)' , attributes=['uid','mail']) + # Print the resulting entriesn. + #for entry in conn.entries: + #print(entry.uid, entry.mail) + vm_list = conn +except exceptions.LDAPException as err: + sys.exit(f'LDAP Error: {err}') + + diff --git a/vpn-statistics.sh b/vpn-statistics.sh index a1e7960..c721cf9 100755 --- a/vpn-statistics.sh +++ b/vpn-statistics.sh @@ -9,3 +9,10 @@ done # countries with counter ( for ip in $(wg | grep endpoint | sed -e 's/endpoint: //' -e 's/\(.*\):[0-9]*/\1/' -e 's/\[//' -e 's/\]//'); do curl -s ipinfo.io/$ip | grep -e country ; done ) | sort | uniq -c | sort -g + +# Get number of configured VPNs +configured_vpns=$(wg show | grep ^peer | wc -l) +active_vpns=$(wg show | grep endpoint | wc -l) + +echo "Configured VPNs: ${configured_vpns}" +echo "Active VPNs: ${active_vpns}"