Compare commits
1 commit
Author | SHA1 | Date | |
---|---|---|---|
|
c70437e568 |
41 changed files with 2 additions and 2400 deletions
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -1,6 +0,0 @@
|
|||
opennebula-vm-etcd/config-and-secrets.conf
|
||||
|
||||
*.pyc
|
||||
|
||||
.idea
|
||||
.vscode
|
29
README.md
29
README.md
|
@ -1,27 +1,2 @@
|
|||
Hello hacker!
|
||||
|
||||
The tools used in this repository are used by the team of ungleich
|
||||
(www.ungleich.ch) to maintain servers and data centers.
|
||||
|
||||
A special data center that we maintain and that most tools are written
|
||||
for is the Data Center Light (www.datacenterlight.ch).
|
||||
|
||||
As we are truly dedicated and committed to open source, we are trying
|
||||
to release *everything* open source that we can (even if not
|
||||
everything is yet directly usable by others).
|
||||
|
||||
Feel free to clone, use, distribute the code according to GPLv3+
|
||||
licensing.
|
||||
|
||||
Best,
|
||||
|
||||
team ungleich
|
||||
|
||||
p.s.: Some stuff that you (will) find in this repo:
|
||||
|
||||
Tools to manage ...
|
||||
|
||||
- ceph cluster(s)
|
||||
- opennebula cluster(s)
|
||||
- host disk/raid configurations
|
||||
- monit
|
||||
This repository has been moved to
|
||||
[code.ungleich.ch](https://code.ungleich.ch/ungleich-public/ungleich-tools).
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 12019-11-25
|
||||
|
||||
set -e
|
||||
|
||||
echo "incomplete script: partition table, size"
|
||||
exit 1
|
||||
|
||||
version=3.10.3
|
||||
tmpdir=$(mktemp -d)
|
||||
|
||||
file=alpine-standard-${version}-x86_64.iso
|
||||
url=http://dl-cdn.alpinelinux.org/alpine/v3.10/releases/x86_64/$file
|
||||
|
||||
uefifile=${file%.iso}.efi
|
||||
|
||||
# FIXME: get this from the ISO and round up a bit - size in MiB
|
||||
uefisize=200
|
||||
|
||||
wget -c "$url"
|
||||
|
||||
# cretae the output file
|
||||
dd if=/dev/zero of=${uefifile} bs=1M count=${uefisize}
|
||||
|
||||
mkdir "$tmpdir/iso"
|
||||
mkdir "$tmpdir/efi"
|
||||
|
||||
mkfs.vfat -F32 ${uefifile}
|
||||
|
||||
mount -o loop "${file}" "$tmpdir/iso"
|
||||
mount -o loop "${uefifile}" "$tmpdir/efi"
|
||||
|
||||
cd "$tmpdir/iso"
|
||||
tar c . | ( cd "$tmpdir/efi"; tar xv )
|
||||
|
||||
umount "$tmpdir/iso" "$tmpdir/efi"
|
||||
|
||||
# FIXME: create partition!!
|
|
@ -1,90 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
MAJOR_VERSION=3.11
|
||||
MINOR_VERSION=2
|
||||
IMAGE=alpine-minirootfs-$MAJOR_VERSION.$MINOR_VERSION-x86_64.tar.gz
|
||||
SSH_KEYS=$(cat ~/.ssh/id_rsa.pub)
|
||||
RESOLVCONF=/etc/resolv.conf
|
||||
|
||||
working_directory=$(pwd -P)
|
||||
rootfs_tmpdir=$(mktemp -d)
|
||||
rootfs_url="http://dl-cdn.alpinelinux.org/alpine/v$MAJOR_VERSION/releases/x86_64/$IMAGE"
|
||||
|
||||
run_root () {
|
||||
chroot $rootfs_tmpdir /usr/bin/env \
|
||||
PATH=/bin:/sbin \
|
||||
/bin/sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download, extract inital rootfs.
|
||||
curl "$rootfs_url" -o "$working_directory/$IMAGE"
|
||||
tar xf $IMAGE -C $rootfs_tmpdir
|
||||
|
||||
# Add SSH keys
|
||||
run_root mkdir -p root/.ssh
|
||||
echo $SSH_KEYS > $rootfs_tmpdir/root/.ssh/authorized_keys
|
||||
run_root chmod 0600 root/.ssh/authorized_keys
|
||||
run_root chmod 0700 root/.ssh
|
||||
|
||||
# Import local resolv.conf.
|
||||
cat "$RESOLVCONF" > $rootfs_tmpdir/etc/resolv.conf
|
||||
|
||||
# Make sure init is found by the kernel.
|
||||
run_root ln -s /sbin/init /init
|
||||
|
||||
# Servers have static addresses, disable the standard
|
||||
# alpine setting of using tempaddr = 2
|
||||
cat > "$rootfs_tmpdir/etc/sysctl.d/99-ipv6.conf" <<EOF
|
||||
net.ipv6.conf.default.use_tempaddr = 0
|
||||
net.ipv6.conf.all.use_tempaddr = 0
|
||||
|
||||
net.ipv6.conf.all.accept_ra = 1
|
||||
EOF
|
||||
|
||||
cat > "$rootfs_tmpdir/etc/network/interfaces" <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet6 manual
|
||||
pre-up ip link set eth0 up
|
||||
post-up ip addr show dev eth0 | grep inet6 >> /etc/issue
|
||||
post-up echo post post up >> /etc/issue
|
||||
EOF
|
||||
|
||||
cat > "$rootfs_tmpdir/etc/hostname" <<EOF
|
||||
alpine-unconfigured
|
||||
EOF
|
||||
|
||||
echo ipv6 >> "$rootfs_tmpdir/etc/modules"
|
||||
|
||||
# Layer atop base rootfs.
|
||||
run_root apk update
|
||||
run_root apk upgrade
|
||||
run_root apk add openssh linux-vanilla openrc udev
|
||||
run_root rc-update add udev
|
||||
run_root rc-update add udev-trigger
|
||||
run_root rc-update add sshd
|
||||
run_root rc-update add networking
|
||||
run_root rc-update add hostname
|
||||
|
||||
# FIXME: add / install rdnssd / ndisc6 / start it on boot
|
||||
# ndisc6 is only @testing
|
||||
|
||||
# Generate iniramfs image
|
||||
(cd $rootfs_tmpdir; find . | cpio -H newc -o | gzip -9 > "$working_directory/alpine-initramfs.gz")
|
||||
cp "$rootfs_tmpdir/boot/vmlinuz-vanilla" "$working_directory/alpine-kernel"
|
||||
|
||||
# Cleanup.
|
||||
#rm -r "$rootfs_tmpdir"
|
||||
|
||||
# Upload to netboot server. - needs to be done outside sudo
|
||||
echo "Use alpine-initramfs.gz alpine-kernel from $working_directory"!
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-09-27
|
||||
# Objective: create an initramfs + kernel
|
||||
# that is netbootable
|
||||
|
||||
pkg="alpine-base"
|
||||
chroot=/chroot
|
||||
|
||||
apk -X https://nl.alpinelinux.org/alpine/edge/main -U --allow-untrusted --
|
||||
root $chroot --initdb add $pkg
|
||||
|
||||
cd $chroot
|
||||
|
||||
# For initramfs
|
||||
ln -s sbin/init init
|
||||
|
||||
# enabling base services
|
||||
for svc in devfs dmesg mdev; do
|
||||
chroot $chroot rc-update add $svc sysinit
|
||||
done
|
|
@ -1,107 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 17:19, 2018-02-09
|
||||
# Nico Schottelius
|
||||
|
||||
# Based on ceph-disk -v prepare --bluestore /dev/sdc --osd-id ${ID} --osd-uuid $(uuidgen) --crush-device-class "ssd"
|
||||
|
||||
# Create:
|
||||
# - block -> link to partuuid
|
||||
# - block_uuid -e> uuid if the block
|
||||
# - ceph_fsid -> get from ceph-conf
|
||||
# crush_device_class -> ssd, hdd
|
||||
# fsid -> uuidgen!
|
||||
# magic -> string "ceph osd volume v026"
|
||||
# type -> bluestore
|
||||
|
||||
fsid=$(ceph-conf --cluster=ceph --name=osd. --lookup fsid)
|
||||
fs_uuid=$(uuidgen)
|
||||
magic="ceph osd volume v026"
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "$0 disk class [osdweight]"
|
||||
echo "class = hdd or ssd"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export DEV=$1;shift
|
||||
export CLASS=$1; shift
|
||||
|
||||
|
||||
uuid_metadata=$(uuidgen)
|
||||
uuid_block=$(uuidgen)
|
||||
|
||||
osd_id=$(ceph osd create)
|
||||
|
||||
dev_metadata="/dev/disk/by-partuuid/$uuid_metadata"
|
||||
dev_block="/dev/disk/by-partuuid/$uuid_block"
|
||||
|
||||
/usr/bin/sgdisk --new=0:0:+100M --change-name="0:ceph data" \
|
||||
--partition-guid="0:$uuid_metadata" \
|
||||
--typecode=0:4fbd7e29-9d25-41b8-afd0-062c0ceff05d \
|
||||
--mbrtogpt -- $DEV
|
||||
/sbin/udevadm settle --timeout=600
|
||||
|
||||
# Using gdisk --largest-new does not change the name or set guid;
|
||||
# So use 2 steps instead
|
||||
/usr/bin/sgdisk --largest-new=0 --mbrtogpt -- $DEV
|
||||
/sbin/udevadm settle --timeout=600
|
||||
|
||||
|
||||
lastpart=$(gdisk -l $DEV | tail -n1 | awk '{ print $1 }')
|
||||
/usr/bin/sgdisk --change-name="${lastpart}:ceph block" \
|
||||
--partition-guid="${lastpart}:$uuid_block" \
|
||||
--typecode="${lastpart}:cafecafe-9b03-4f30-b4c6-b4b80ceff106" \
|
||||
--mbrtogpt -- $DEV
|
||||
/sbin/udevadm settle --timeout=600
|
||||
|
||||
#echo $1
|
||||
#echo $(blkid | grep $1"2")
|
||||
|
||||
#cblock=$(blkid | grep $1"2" | cut -d'"' -f4)
|
||||
#echo $cblock
|
||||
|
||||
/sbin/mkfs -t xfs -f -i size=2048 -- "$dev_metadata"
|
||||
|
||||
mountpath=/var/lib/ceph/osd/ceph-${osd_id}
|
||||
|
||||
mkdir -p "$mountpath"
|
||||
mount "$dev_metadata" "$mountpath"
|
||||
|
||||
ln -s $dev_block "$mountpath/block"
|
||||
|
||||
echo "$uuid_block" > "$mountpath/block_uuid"
|
||||
echo "$fsid" > "$mountpath/ceph_fsid"
|
||||
echo "$magic" > "$mountpath/magic"
|
||||
echo "$CLASS" > "$mountpath/crush_device_class"
|
||||
echo $(echo $dev_block | cut -c23-) > "$mountpath/fsid"
|
||||
|
||||
|
||||
# Important, otherwise --mkfs later will try to create filestore
|
||||
echo bluestore > "$mountpath/type"
|
||||
|
||||
ceph auth get-or-create "osd.${osd_id}" osd \
|
||||
'allow *' mon 'allow profile osd' > $mountpath/keyring
|
||||
|
||||
echo ${osd_id} > "$mountpath/whoami"
|
||||
touch "$mountpath/openrc"
|
||||
|
||||
ceph-osd --cluster ceph -i "${osd_id}" --mkfs
|
||||
chown -R ceph:ceph "$mountpath"
|
||||
|
||||
if [ $# -eq 1 ]; then
|
||||
WEIGHT=$1; shift
|
||||
else
|
||||
devname=$(readlink -f $dev_block)
|
||||
nodev=$(echo $devname | sed 's,/dev/,,')
|
||||
WEIGHT=$(lsblk -l -b | awk "/^$nodev/ { print \$4/(1024^4) }")
|
||||
fi
|
||||
|
||||
ceph osd crush add osd.${osd_id} ${WEIGHT} host=$(hostname)
|
||||
|
||||
echo "$metadata_dev /var/lib/ceph/osd/ceph-${osd_id} xfs noatime 0 0" >> /etc/fstab
|
||||
|
||||
# Starting with monit, if available
|
||||
ceph-osd -i ${osd_id}
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
#option $1 is vm_list file name
|
||||
#option $2 id DB location
|
||||
#option $3 is DB user
|
||||
#option $4 is DB name
|
||||
|
||||
#host='localhost'
|
||||
|
||||
user_arr=( $(cat $1 | awk '{print $1}' ))
|
||||
vmid_arr=( $(cat $1 | awk '{print $2}' ))
|
||||
port_arr=( $(cat $1 | awk '{print $3}' ))
|
||||
place_arr=( $(cat $1 | awk '{print $4}' ))
|
||||
|
||||
for ((i=0; i<${#user_arr[@]}; i++)) do
|
||||
#create user
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');"
|
||||
en_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';")
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());"
|
||||
|
||||
#create connection
|
||||
cn=${user_arr[i]}${vmid_arr[i]}
|
||||
echo $cn
|
||||
if [ 0 -eq $(psql -h $2 -U $3 -d $4 -tAc "SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';" | wc -l) ]; then
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');"
|
||||
cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;")
|
||||
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','${place_arr[i]}');"
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');"
|
||||
|
||||
#connection permission
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');"
|
||||
#clipboard-encoding
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','clipboard-encoding','UTF-8');"
|
||||
|
||||
else
|
||||
cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;")
|
||||
psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${place_arr[i]}' where connection_id='$cn_id' and parameter_name='hostname';"
|
||||
psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';"
|
||||
fi
|
||||
|
||||
done
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/bash
|
||||
#option $1 is vm_list file name
|
||||
#option $2 is DB name
|
||||
#this script should be run on guacamole server
|
||||
|
||||
|
||||
host='localhost'
|
||||
user_arr=( $(cat $1 | awk '{print $1}' ))
|
||||
vmid_arr=( $(cat $1 | awk '{print $2}' ))
|
||||
port_arr=( $(cat $1 | awk '{print $3}' ))
|
||||
place_arr=( $(cat $1 | awk '{print $4}' ))
|
||||
|
||||
for ((i=0; i<${#user_arr[@]}; i++)) do
|
||||
#create user
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');\""
|
||||
en_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';\"")
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());\""
|
||||
|
||||
#create connection
|
||||
cn=${user_arr[i]}${vmid_arr[i]}
|
||||
|
||||
if [ 0 -eq $(su - postgres -c "psql postgres -d $2 -tAc \"SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';\"" | wc -l) ]; then
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');\""
|
||||
cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"")
|
||||
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','$host');\""
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');\""
|
||||
|
||||
#connection permission
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');\""
|
||||
|
||||
else
|
||||
cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"")
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='$host' where connection_id='$cn_id' and parameter_name='hostname';\""
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';\""
|
||||
fi
|
||||
|
||||
done
|
|
@ -1,78 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-12-09
|
||||
# the ugly code is llnu
|
||||
|
||||
#this can only run in the ungleich-tools directory because of the cat magiccommand........
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo $0 suite out-directory
|
||||
echo out-directory: into which directory to place resulting files
|
||||
echo suite is for instance ascii, beowulf, etc
|
||||
exit 1
|
||||
fi
|
||||
|
||||
suite=$1; shift
|
||||
outdir=$1; shift
|
||||
|
||||
date=$(date +%F)
|
||||
|
||||
basename=${suite}-${date}
|
||||
dir=${outdir}/${basename}
|
||||
kernel=${outdir}/kernel-${basename}
|
||||
initramfs=${outdir}/initramfs-${basename}
|
||||
keyurl=https://code.ungleich.ch/ungleich-public/__ungleich_staff_ssh_access/raw/master/files
|
||||
|
||||
debootstrap "${suite}" "${dir}"
|
||||
|
||||
# need non-free for firmware-bnx2
|
||||
echo "deb http://pkgmaster.devuan.org/merged ${suite} main contrib non-free" > ${dir}/etc/apt/sources.list
|
||||
|
||||
chroot ${dir} apt update
|
||||
chroot ${dir} apt install -y openssh-server rdnssd linux-image-amd64 firmware-bnx2
|
||||
|
||||
|
||||
cp ${dir}/boot/vmlinuz-* ${kernel}
|
||||
|
||||
echo '* * * * * root ip -o -6 addr show | grep -E -v " lo |one" > /etc/issue' > ${dir}/etc/cron.d/ipv6addr
|
||||
|
||||
mkdir -p ${dir}/root/.ssh
|
||||
|
||||
for key in balazs dominique jinguk nico; do
|
||||
curl -s ${keyurl}/${key}.pub >> ${dir}/root/.ssh/authorized_keys
|
||||
done
|
||||
|
||||
################################################################################
|
||||
# networking
|
||||
|
||||
# always lo
|
||||
cat > ${dir}/etc/network/interfaces << EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
source-directory /etc/network/interfaces.d
|
||||
EOF
|
||||
|
||||
# find the boot interfaces at boot
|
||||
cat > ${dir}/etc/rc.local <<EOF
|
||||
mac=\$(cat /proc/cmdline | tr ' ' '\n' | awk -F= '/bootdev/ { print \$2 }')
|
||||
dev=\$(ip -o link | awk -F: "/\$mac/ { print \\\$2 }" | sed 's/ *//g')
|
||||
|
||||
cat > /etc/network/interfaces.d/bootinterface << eof
|
||||
auto \$dev
|
||||
iface \$dev inet6 auto
|
||||
eof
|
||||
|
||||
ifup "\${dev}"
|
||||
|
||||
exit 0
|
||||
EOF
|
||||
|
||||
chmod a+rx ${dir}/etc/rc.local"
|
||||
|
||||
# ensure there is /init in the initramfs -> otherwise there is a kernel panic
|
||||
# reason: initramfs is designed to be PRE regular os, so /init usually hands over to /sbin/init
|
||||
# in our case, they are just the same
|
||||
ln -s /sbin/init ${dir}/init
|
||||
|
||||
# Finally building the initramfs
|
||||
( cd ${dir} ; find . | cpio -H newc -o | gzip -9 > ${initramfs} )
|
|
@ -1,25 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Nico Schottelius, 2020-01-07
|
||||
# Detect the DNS64 prefix
|
||||
# Based on https://tools.ietf.org/html/draft-ietf-behave-nat64-discovery-heuristic-05
|
||||
#
|
||||
# How it works:
|
||||
# - ipv4only.arpa only has A records.
|
||||
# - a DNS64 server will add AAAA records
|
||||
# - we take this response (if any) and derive the IPv6 prefix from it
|
||||
#
|
||||
|
||||
import dns.resolver
|
||||
import ipaddress
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dns64_prefix = None
|
||||
answers = dns.resolver.query('ipv4only.arpa', 'AAAA')
|
||||
|
||||
for rdata in answers:
|
||||
address = str(rdata)
|
||||
network = ipaddress.IPv6Network("{}/96".format(address),
|
||||
strict=False)
|
||||
# print("{}: {}".format(rdata, network))
|
||||
print("{}".format(network))
|
|
@ -1,28 +0,0 @@
|
|||
import json
|
||||
import pprint
|
||||
#import etcd3
|
||||
|
||||
with open("nico-vm-one.json", "r") as fd:
|
||||
vmcontent = fd.read()
|
||||
|
||||
#vm = json.loads(vmcontent.decode('utf-8'))
|
||||
vm = json.loads(vmcontent)
|
||||
pprint.pprint(vm['TEMPLATE']['DISK'])
|
||||
|
||||
# storing info
|
||||
|
||||
for_etcd={}
|
||||
for_etcd['data_version'] = "1"
|
||||
for_etcd['vm_id'] = vm['ID']
|
||||
for_etcd['owner'] = vm['UNAME']
|
||||
|
||||
for_etcd['disks'] = []
|
||||
for disk in vm['TEMPLATE']['DISK']:
|
||||
disk_etcd = {}
|
||||
disk_etcd['image_name'] = disk['IMAGE']
|
||||
disk_etcd['image_id'] = disk['IMAGE_ID']
|
||||
disk_etcd['datastore_name'] = disk['DATASTORE']
|
||||
disk_etcd['datastore_id'] = disk['DATASTORE_ID']
|
||||
for_etcd['disks'].append(disk_etcd)
|
||||
|
||||
pprint.pprint(for_etcd)
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2019-11-05, Nico Schottelius
|
||||
|
||||
set -e
|
||||
|
||||
version=4.0.6
|
||||
|
||||
cat <<EOF
|
||||
Required for building:
|
||||
|
||||
Alpine:
|
||||
|
||||
apk add build-base libnl3-dev iptables-dev argp-standalone
|
||||
|
||||
Compiling on alpine requires:
|
||||
|
||||
LDFLAGS=-largp
|
||||
LDFLAGS=-largp ./configure
|
||||
LDFLAGS=-largp make
|
||||
make install
|
||||
|
||||
Build requirements devuan:
|
||||
|
||||
- dkms
|
||||
- kernel headers for current kernel
|
||||
|
||||
apt install libnl-genl-3-dev libxtables-dev
|
||||
|
||||
EOF
|
||||
|
||||
wget -c https://github.com/NICMx/Jool/releases/download/v${version}/jool-${version}.tar.gz
|
||||
tar xfz jool-${version}.tar.gz
|
||||
|
||||
# 1. kernel module
|
||||
dkms install jool-${version}
|
||||
|
||||
# 2. user space
|
||||
cd jool-${version}
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
|
||||
|
||||
# add openrc script for alpine
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# List mail addresses found under base DN $1 (defaults to dc=ungleich,dc=ch)
|
||||
|
||||
set -e
|
||||
|
||||
# Hardcoded parameters.
|
||||
LDAP_SERVER="ldaps://ldap1.ungleich.ch"
|
||||
LDAP_BIND_DN="cn=manager,dc=ungleich,dc=ch"
|
||||
|
||||
if [ "$1" != "" ]; then
|
||||
LDAP_SEARCH_BASE="$1"
|
||||
else
|
||||
LDAP_SEARCH_BASE="dc=ungleich,dc=ch"
|
||||
fi
|
||||
|
||||
# Read secrets from environment.
|
||||
if [ "$LDAP_BIND_PASSWD" = "" ]; then
|
||||
echo "You have to define LDAP_BIND_PASSWD before launching this script." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract mail addresses from LDAP directory.
|
||||
ldap_search_result="$(
|
||||
ldapsearch -x -H "$LDAP_SERVER" \
|
||||
-D "$LDAP_BIND_DN" \
|
||||
-w "$LDAP_BIND_PASSWD" \
|
||||
-b "$LDAP_SEARCH_BASE" mail
|
||||
)"
|
||||
|
||||
echo "$ldap_search_result" | grep 'mail:' | cut -d ' ' -f 2 -
|
|
@ -1,2 +0,0 @@
|
|||
* * * * * root ip -o -6 addr show | grep -E -v "lo |one" | awk '{print $1" " $2": "$4}' >> /dev/tty1
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-12-02
|
||||
# Setup standard mikrotik settings
|
||||
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 <target> [target]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target=$1; shift
|
||||
|
||||
|
||||
conf() {
|
||||
echo $@
|
||||
ssh admin@${target} "$@"
|
||||
}
|
||||
copy() {
|
||||
scp "$1" admin@${target}:
|
||||
}
|
||||
|
||||
# store ssh key in the admin user!
|
||||
copy ~/.ssh/id_rsa.pub
|
||||
conf "/user ssh-keys import user=admin public-key-file=id_rsa.pub"
|
||||
conf "/file remove id_rsa.pub"
|
||||
|
||||
# remove unecessary stuff
|
||||
for unusedpkg in calea gps lora mpls openflow tr069-client ups \
|
||||
advanced-tools hotspot ntp; do
|
||||
conf "/system package uninstall $unusedpkg"
|
||||
done
|
||||
|
||||
# ensure important stuff is enabled
|
||||
for wantpkg in wireless; do
|
||||
conf "/system package enable $wantpkg"
|
||||
done
|
||||
|
||||
# TODOs:
|
||||
# setup capsman
|
||||
# setup IPv6
|
||||
# setup password
|
||||
# disable dhcp server
|
||||
|
||||
# New script for setting up the main capsman:
|
||||
# certificate generation!
|
|
@ -1,37 +0,0 @@
|
|||
#!/bin
|
||||
# Nico Schottelius, 2019-12-02
|
||||
# Update mikrotik routers to the latest package
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "$0 <version> <arch> router [router...]"
|
||||
cat <<EOF
|
||||
Arch:
|
||||
- rb4011: arm
|
||||
- hapac: mipsbe
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
version=$1; shift
|
||||
arch=$1; shift
|
||||
|
||||
file=all_packages-${arch}-${version}.zip
|
||||
url=https://download.mikrotik.com/routeros/${version}/${file}
|
||||
tmp=$(mktemp -d)
|
||||
|
||||
cd "$tmp"
|
||||
wget "${url}"
|
||||
unzip "${file}"
|
||||
|
||||
pkg_list="dhcp ipv6 lcd lte multicast ppp routing security system user-manager wireless"!
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
target=$1; shift
|
||||
echo "Updating ${target}"
|
||||
for pkg in $pkg_list; do
|
||||
scp ${pkg}-${version}-${arch}.npk "admin@${target}:"
|
||||
done
|
||||
ssh admin@${target} "/system reboot"
|
||||
done
|
||||
|
||||
rm -rf "${tmp}"!
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# This script extract VM IDs and filter them if a pattern is provided as first
|
||||
# argument.
|
||||
|
||||
set -e
|
||||
|
||||
# Extract instances from ONE.
|
||||
instances=$(onevm list --csv | tail -n +2)
|
||||
|
||||
# Filter them is a pattern has been provided.
|
||||
if [ "$1" != "" ]; then
|
||||
filtered_instances="$(echo "$instances" | grep -E "$1")"
|
||||
instances="$filtered_instances"
|
||||
fi
|
||||
|
||||
# Outputs instance IDs.
|
||||
echo "$instances" | cut -d ',' -f 1 -
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# This script is expected to run on the ONE server (i.e.
|
||||
# opennebula.ungleich.ch).
|
||||
|
||||
set -e
|
||||
|
||||
# Fetch instance list from STDIN.
|
||||
instances=$(cat -)
|
||||
|
||||
# For every instance, extract relevant information:
|
||||
for id in $instances; do
|
||||
nics_raw="$(onevm show --xml $id | xml_grep 'NIC')"
|
||||
networks="$(echo $nics_raw | xml_grep --text_only 'NETWORK' | tr '\n' ',' | sed 's/,$//')"
|
||||
ip="$(echo $nics_raw | xml_grep --text_only 'IP' | tr '\n' ',' | sed 's/,$//')"
|
||||
ip6="$(echo $nics_raw | xml_grep --text_only 'IP6_GLOBAL' | tr '\n' ',' | sed 's/,$//')"
|
||||
echo "$id,$networks,$ip,$ip6"
|
||||
done
|
|
@ -1,243 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2020 -- Evilham <contact@evilham.com>
|
||||
# This is BSD licensed as it's based on BSD-licensed code
|
||||
#
|
||||
# We could have used e.g. something like:
|
||||
# - https://git.sr.ht/~sircmpwn/builds.sr.ht/tree/master/images/freebsd/genimg
|
||||
#
|
||||
# But we actually do want to compile the kernel, so that the IPv6-only images
|
||||
# are different and don't support INET.
|
||||
|
||||
# Explode if something goes wrong
|
||||
set -e
|
||||
|
||||
# What are we building?
|
||||
# These are the only configuration options.
|
||||
# They default to current environment.
|
||||
# RELEASE: should be 'CURRENT' for current or 'X.Y' Defaults to 'CURRENT'.
|
||||
# ARCH: probably amd64 for DCL
|
||||
# VMFORMATS: defaults to qcow2, can also be raw. See man mkimg.
|
||||
# OPENNEBULA_CONTEXT_VERSION: For DCL's OpenNebula that'd be 5.10.0 (default)
|
||||
# OPENNEBULA_CONTEXT_REVISION: Defaults to 1.
|
||||
RELEASE=${RELEASE:-CURRENT}
|
||||
if [ "${RELEASE}" == "CURRENT" ]; then
|
||||
SRCBRANCH="master"
|
||||
else
|
||||
SRCBRANCH="releng/${RELEASE}"
|
||||
fi
|
||||
ARCH=${ARCH:-amd64}
|
||||
VMFORMATS=${VMFORMATS:-qcow2}
|
||||
OPENNEBULA_CONTEXT_VERSION=${OPENNEBULA_CONTEXT_VERSION:-5.10.0}
|
||||
OPENNEBULA_CONTEXT_REVISION=${OPENNEBULA_CONTEXT_REVISION:-1}
|
||||
|
||||
# Didn't see a need to make these configurable.
|
||||
CHROOTDIR="/scratch"
|
||||
SRCDIR="${CHROOTDIR}/usr/src"
|
||||
OUR_DIR="$(realpath $(dirname "${0}"))"
|
||||
OUR_SRCCONF="${SRCDIR}/release/src.conf"
|
||||
OUR_RELEASE_CONF="${SRCDIR}/release/release.conf"
|
||||
# Shorthand for the package file name.
|
||||
OPENNEBULA_CONTEXT="one-context-${OPENNEBULA_CONTEXT_VERSION}_${OPENNEBULA_CONTEXT_REVISION}.txz"
|
||||
|
||||
setup_sources() {
|
||||
# Let's use git, we might need to install it
|
||||
if ! which git 2>&1 > /dev/null; then
|
||||
pkg install -y git
|
||||
fi
|
||||
|
||||
if [ ! -d "$(dirname ${SRCDIR})" ]; then
|
||||
mkdir -p "$(dirname ${SRCDIR})"
|
||||
fi
|
||||
|
||||
# Checkout needed branch
|
||||
if [ ! -d "${SRCDIR}" ]; then
|
||||
git clone "https://github.com/freebsd/freebsd" \
|
||||
--branch "${SRCBRANCH}" "${SRCDIR}"
|
||||
else
|
||||
GIT_CMD="git -C ${SRCDIR}"
|
||||
${GIT_CMD} clean -df
|
||||
${GIT_CMD} reset --hard
|
||||
${GIT_CMD} fetch
|
||||
${GIT_CMD} checkout "${SRCBRANCH}"
|
||||
${GIT_CMD} pull
|
||||
fi
|
||||
|
||||
# Add settings for IPv6-only kernel
|
||||
cat > "${SRCDIR}/sys/${ARCH}/conf/GENERIC-IPV6ONLY" << EOF
|
||||
include GENERIC
|
||||
ident GENERIC-IPV6ONLY
|
||||
makeoptions MKMODULESENV+="WITHOUT_INET_SUPPORT="
|
||||
nooptions INET
|
||||
nodevice gre
|
||||
EOF
|
||||
# Fix vmimage.subr to install custom package and fix other things
|
||||
cat >> "${SRCDIR}/release/tools/vmimage.subr" << EOF
|
||||
vm_extra_install_ports() {
|
||||
# Make sure we install the opennbula context package
|
||||
cp "/${OPENNEBULA_CONTEXT}" "\${DESTDIR}/tmp/${OPENNEBULA_CONTEXT}"
|
||||
chroot \${DESTDIR} \${EMULATOR} env ASSUME_ALWAYS_YES=yes \\
|
||||
/usr/sbin/pkg add '/tmp/${OPENNEBULA_CONTEXT}'
|
||||
|
||||
# Now make sure the system has better defaults
|
||||
cat >> "\${DESTDIR}/etc/rc.conf" << eof
|
||||
# Update to latest patch on first boot
|
||||
firstboot_freebsd_update_enable="YES"
|
||||
# Enable OpenNebula's service.
|
||||
one_context_enable="YES"
|
||||
# Enable SSH for customers
|
||||
sshd_enable="YES"
|
||||
# Clear tmp on boot
|
||||
clear_tmp_enable="YES"
|
||||
# Disable sendmail by default
|
||||
sendmail_enable="NONE"
|
||||
# Disable crash dumps
|
||||
dumpdev="NO"
|
||||
eof
|
||||
# Enable root access with SSH key.
|
||||
# It is user's responsibility to further secure their system.
|
||||
sed -i '' -E \
|
||||
's/(^#[ ]*|^)PermitRootLogin .*/PermitRootLogin without-password/' \
|
||||
"\${DESTDIR}/etc/ssh/sshd_config"
|
||||
}
|
||||
EOF
|
||||
# Skip building iso images
|
||||
rm "${SRCDIR}/release/${ARCH}/mkisoimages.sh"
|
||||
# This is a hack to not build the memstick
|
||||
cat > "${SRCDIR}/release/${ARCH}/make-memstick.sh" <<EOF
|
||||
# Create an empty file, else checksums fail
|
||||
touch "\${2}" || true
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_our_env() {
|
||||
# Required by META_MODE to build faster next time
|
||||
# This saves a lot of time when e.g. compiling GENERIC and GENERIC-IPV6ONLY
|
||||
if ! kldstat | grep -q filemon; then
|
||||
kldload filemon
|
||||
fi
|
||||
}
|
||||
|
||||
gen_releaseconf() {
|
||||
cat << EOF
|
||||
#!/bin/sh
|
||||
#
|
||||
# Based off FreeBSD's release/release.conf.sample
|
||||
#
|
||||
|
||||
# This redefines the prototype defined in release.sh.
|
||||
# At this stage, the build chroot exists.
|
||||
buildenv_setup() {
|
||||
# Ensure META_MODE is on
|
||||
echo "WITH_META_MODE=yes" > \${CHROOTDIR}/etc/src-env.conf
|
||||
}
|
||||
|
||||
## Set the directory within which the release will be built.
|
||||
CHROOTDIR="${CHROOTDIR}"
|
||||
|
||||
## Set to override the default target architecture and kernel
|
||||
TARGET="${ARCH}"
|
||||
TARGET_ARCH="${ARCH}"
|
||||
KERNEL="${KERNEL_CONFIG}"
|
||||
|
||||
## Set to specify a custom make.conf and/or src.conf
|
||||
SRC_CONF="${OUR_SRCCONF}"
|
||||
|
||||
# Since these are VMs, users should add other components if they want to.
|
||||
NODOC=YES
|
||||
NOPORTS=YES
|
||||
NOSRC=YES
|
||||
|
||||
# We manage sources manually
|
||||
SRC_UPDATE_SKIP=YES
|
||||
|
||||
## Set to pass additional flags to make(1) for the build chroot setup, such
|
||||
## as TARGET/TARGET_ARCH.
|
||||
# This was necessary for "cross-compiling"
|
||||
CHROOT_MAKEENV="MK_LLVM_TARGET_X86=yes"
|
||||
|
||||
WITH_VMIMAGES=YES
|
||||
|
||||
# VM image size, see man 1 truncate
|
||||
VMSIZE="10G"
|
||||
|
||||
# List of disk image formats, see man mkgimg.
|
||||
VMFORMATS="${VMFORMATS}"
|
||||
|
||||
# These variables have to be exported because they are needed in subprocesses.
|
||||
export NOSWAP=YES
|
||||
# Custom ports
|
||||
# - firstboot-freebsd-update helps us not have to create an image for each
|
||||
# patch level. We still will have to do it for each minor version update.
|
||||
# - bash is apparently needed for one-context
|
||||
export VM_EXTRA_PACKAGES="firstboot-freebsd-update bash"
|
||||
EOF
|
||||
}
|
||||
|
||||
_do_run_release() {
|
||||
. "${SRCDIR}/release/release.sh"
|
||||
}
|
||||
run_release() {
|
||||
_do_run_release -c "${OUR_RELEASE_CONF}"
|
||||
}
|
||||
|
||||
|
||||
build_image() {
|
||||
# Generate configuration
|
||||
echo "${2}" > "${OUR_SRCCONF}"
|
||||
KERNEL_CONFIG="${1}"
|
||||
gen_releaseconf > "${OUR_RELEASE_CONF}"
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
# Continue with the release script
|
||||
run_release
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
|
||||
mv "${CHROOTDIR}/R/vmimages" "${OUR_DIR}/FreeBSD-${RELEASE}-${1}"
|
||||
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
}
|
||||
|
||||
our_main() {
|
||||
case "$1" in
|
||||
--dualstack)
|
||||
BUILD_DUALSTACK=yes
|
||||
;;
|
||||
--ipv6only)
|
||||
BUILD_IPV6ONLY=yes
|
||||
;;
|
||||
*)
|
||||
cat << EOF
|
||||
Run with --dualstack or --ipv6only depending on the image you want.
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
setup_sources
|
||||
setup_our_env
|
||||
# Fetch OpenNebula's context package
|
||||
fetch "https://github.com/OpenNebula/addon-context-linux/releases/download/v${OPENNEBULA_CONTEXT_VERSION}/${OPENNEBULA_CONTEXT}" \
|
||||
-o "${CHROOTDIR}/${OPENNEBULA_CONTEXT}"
|
||||
# Do run
|
||||
if [ -n "${BUILD_DUALSTACK}" ]; then
|
||||
build_image "GENERIC"
|
||||
fi
|
||||
if [ -n "${BUILD_IPV6ONLY}" ]; then
|
||||
build_image "GENERIC-IPV6ONLY" "$(cat << EOF
|
||||
WITHOUT_INET=yes
|
||||
WITHOUT_INET_SUPPORT=yes
|
||||
EOF
|
||||
)"
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
|
||||
*************** DONE ***************
|
||||
You will find the images under "${OUR_DIR}".
|
||||
************************************
|
||||
EOF
|
||||
}
|
||||
|
||||
our_main "${@}"
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2020 -- Evilham <contact@evilham.com>
|
||||
# This is BSD licensed as it's based on BSD-licensed code
|
||||
#
|
||||
#
|
||||
# This builds all needed FreeBSD images for ungleich's Data Center Light
|
||||
# When there are new releases, they should be updated here and the script
|
||||
# should run.
|
||||
# 11.4 is scheduled end of June 2020
|
||||
# 12.2 is scheduled end of October 2020
|
||||
#
|
||||
|
||||
SUPPORTED_RELEASES="11.3 12.1"
|
||||
|
||||
# This should run in a DCL VM with an OK amount of cores (4/8 minimum),
|
||||
# 4G RAM, and storage of roughly 20G + 5G * #resulting_images.
|
||||
#
|
||||
# This is because there is the base system, a 'pristine chroot', and during the
|
||||
# build there can be 2 copies of the resulting system written to the system.
|
||||
# Since there are 4 combinations of images:
|
||||
# {STABLE,RELEASE} x {dualstack, IPv6ONLY}
|
||||
#
|
||||
# That means we'll need to assign about 40G storage to be on the safe side.
|
||||
|
||||
for release in ${SUPPORTED_RELEASES}; do
|
||||
for build in dualstack ipv6only; do
|
||||
env RELEASE=${release} sh FreeBSD-build-opennebula-image-generic.sh --${build} \
|
||||
| tee "FreeBSD-${release}-${build}.log"
|
||||
done
|
||||
done
|
|
@ -1,179 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Alpine images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=v3.11
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=alpine-$RELEASE-$(date -I).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
APK_MIRROR=http://dl-2.alpinelinux.org/alpine/ # Mind the trailing /
|
||||
|
||||
ONE_CONTEXT_APK_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-r1.apk"
|
||||
ONE_CONTEXT_APK_PATH=/root/one-context.apk
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(lsb_release --short --id)" != "Alpine" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Alpine system." >&2
|
||||
echo "WARNING: Not running Alpine. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
|
||||
# TODO: Remove bash
|
||||
apk add -U -X $APK_MIRROR$RELEASE/main/ \
|
||||
--allow-untrusted \
|
||||
--arch="$ARCH" \
|
||||
--root=/mnt \
|
||||
--initdb \
|
||||
alpine-base alpine-conf openssh sudo tzdata gnupg haveged bash eudev
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize networking.
|
||||
run_root setup-interfaces -i << EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
EOF
|
||||
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
run_root setup-timezone -z UTC
|
||||
if [ "$RELEASE" = "edge" ]
|
||||
then
|
||||
cat >/mnt/etc/apk/repositories <<EOF
|
||||
$APK_MIRROR$RELEASE/main
|
||||
$APK_MIRROR$RELEASE/community
|
||||
$APK_MIRROR$RELEASE/testing
|
||||
EOF
|
||||
else
|
||||
cat >/mnt/etc/apk/repositories <<EOF
|
||||
$APK_MIRROR$RELEASE/main
|
||||
$APK_MIRROR$RELEASE/community
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Update package index.
|
||||
run_root apk update
|
||||
|
||||
# Initialize base services.
|
||||
for i in devfs dmesg hwdrivers mdev; do
|
||||
run_root rc-update add $i sysinit
|
||||
done
|
||||
|
||||
for i in bootmisc hostname hwclock modules sysctl syslog acpid networking urandom haveged; do
|
||||
run_root rc-update add $i boot
|
||||
done
|
||||
|
||||
for i in ntpd sshd crond; do
|
||||
run_root rc-update add $i default
|
||||
done
|
||||
|
||||
for i in mount-ro killprocs savecache; do
|
||||
run_root rc-update add $i shutdown
|
||||
done
|
||||
|
||||
# Set hostname.
|
||||
run_root setup-hostname -n alpine
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Install kernel and bootloader.
|
||||
run_root apk add linux-virt syslinux
|
||||
|
||||
dd if=/usr/share/syslinux/mbr.bin of="$NBD_DEVICE" bs=1 count=440
|
||||
extlinux -i /mnt/boot
|
||||
|
||||
cat >/mnt/boot/extlinux.conf <<EOF
|
||||
DEFAULT linux
|
||||
LABEL linux
|
||||
LINUX vmlinuz-virt
|
||||
INITRD initramfs-virt
|
||||
APPEND root=UUID=$root_uuid rw modules=sd-mod,usb-storage,ext4 quiet rootfstype=ext4
|
||||
EOF
|
||||
|
||||
# Install one-context APK and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_APK_URL" > "/mnt$ONE_CONTEXT_APK_PATH"
|
||||
run_root apk add --allow-untrusted "$ONE_CONTEXT_APK_PATH"
|
||||
run_root rm "$ONE_CONTEXT_APK_PATH"
|
||||
|
||||
# Remove resolvconf: handled by uncloud-init.
|
||||
run_root rm /etc/resolv.conf
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,170 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates CentOS images for OpenNebula.
|
||||
|
||||
# Depends on the following packages (as of CentOS 8):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=8
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=centos-$RELEASE-$(date --iso-8601).img
|
||||
IMAGE_SIZE=10G
|
||||
LOOPBACK_DEVICE=/dev/loop0
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el8.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
losetup -d "$LOOPBACK_DEVICE"
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/centos-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a CentOS system." >&2
|
||||
echo "WARNING: Not running CentOS. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base RAW image (no LOOPBACK support in RHEL/CentOS).
|
||||
qemu-img create -f raw "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
losetup "$LOOPBACK_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
{
|
||||
sfdisk --no-reread "$LOOPBACK_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
} || true
|
||||
|
||||
partprobe "$LOOPBACK_DEVICE"
|
||||
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p1"
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${LOOPBACK_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${LOOPBACK_DEVICE}p1" /mnt/boot
|
||||
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=BaseOS \
|
||||
--enablerepo=AppStream \
|
||||
--enablerepo=extras \
|
||||
--setopt=install_weak_deps=False install \
|
||||
bash basesystem systemd systemd-udev dnf centos-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details.
|
||||
# network-scripts.x86_64 : Legacy scripts for manipulating of network devices
|
||||
run_root dnf -y install network-scripts
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
|
||||
# Install and configure NTP client.
|
||||
run_root dnf install -y chrony
|
||||
run_root systemctl enable chronyd.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
run_root grub2-install --target=i386-pc "${LOOPBACK_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,164 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Debian images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=buster # 10.X
|
||||
ARCH=amd64
|
||||
IMAGE_PATH=debian-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_DEB_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context_5.10.0-1.deb"
|
||||
ONE_CONTEXT_DEB_PATH=/root/one-context.deb
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(lsb_release --short --id) != "Ubuntu" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Ubuntu system." >&2
|
||||
echo "WARNING: Not running Ubuntu. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
debootstrap \
|
||||
--arch=$ARCH $RELEASE \
|
||||
/mnt http://ftp.ch.debian.org/debian
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat >/mnt/etc/apt/sources.list <<EOF
|
||||
# Stable
|
||||
deb http://ftp.ch.debian.org/debian $RELEASE main contrib non-free
|
||||
deb-src http://ftp.ch.debian.org/debian $RELEASE main contrib non-free
|
||||
|
||||
# Security updates
|
||||
deb http://ftp.ch.debian.org/debian $RELEASE-updates main contrib non-free
|
||||
deb-src http://ftp.ch.debian.org/debian $RELEASE-updates main contrib non-free
|
||||
|
||||
# Backports
|
||||
#deb http://ftp.ch.debian.org/debian $RELEASE-backports main
|
||||
#deb-src http://ftp.ch.debian.org/debian $RELEASE-backports main
|
||||
EOF
|
||||
run_root apt-get update
|
||||
|
||||
# Install (magic?) one-context DEB and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_DEB_URL" > "/mnt$ONE_CONTEXT_DEB_PATH"
|
||||
run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH"
|
||||
run_root rm "$ONE_CONTEXT_DEB_PATH"
|
||||
|
||||
# Manually install legacy network scripts used by one-context.
|
||||
run_root apt-get -y install ifupdown
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install kernel and bootloader. Do not autoconfigure grub.
|
||||
run_root 'echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections'
|
||||
run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-image-amd64 grub-pc
|
||||
|
||||
# Configure grub.
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root apt-get -y install openssh-server
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root apt-get -y install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
run_root apt-get clean
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,177 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Fedora images for OpenNebula, being heavily inspired
|
||||
# from srht's Fedora build image definition.
|
||||
|
||||
# We could have used the Fedora Server Edition or even the @Core package group
|
||||
# (detailed below) but the result image would be quite large/bloated with
|
||||
# unecessary dependencies. This scheme allows maximum flexibility, and is
|
||||
# definitely opinionated.
|
||||
|
||||
# Depends on the following packages (as of Fedora 31):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=32
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=fedora-$RELEASE-$(date +%+F).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd1
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el8.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/fedora-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a Fedora system." >&2
|
||||
echo "WARNING: Not running Fedora. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
# Note: we could use the @Core package group but it pulls quite a lot of
|
||||
# 'unwanted' dependencies. Run `dnf group info Core` for details.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=fedora \
|
||||
--enablerepo=updates install \
|
||||
--setopt=install_weak_deps=False \
|
||||
basesystem systemd systemd-udev passwd dnf fedora-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details.
|
||||
# network-scripts.x86_64 : Legacy scripts for manipulating of network devices
|
||||
run_root dnf -y install network-scripts
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root dnf -y install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
run_root grub2-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,153 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Ubuntu images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=eoan # 19.10
|
||||
ARCH=amd64
|
||||
IMAGE_PATH=ubuntu-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_DEB_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context_5.10.0-1.deb"
|
||||
ONE_CONTEXT_DEB_PATH=/root/one-context.deb
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(lsb_release --short --id) != "Ubuntu" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Ubuntu system." >&2
|
||||
echo "WARNING: Not running Ubuntu. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
debootstrap \
|
||||
--arch=$ARCH $RELEASE \
|
||||
/mnt http://archive.ubuntu.com/ubuntu/
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat >/mnt/etc/apt/sources.list <<EOF
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-security main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-updates main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-backports main restricted universe multiverse
|
||||
EOF
|
||||
run_root apt-get update
|
||||
|
||||
# Install (magic?) one-context DEB and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_DEB_URL" > "/mnt$ONE_CONTEXT_DEB_PATH"
|
||||
run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH"
|
||||
run_root rm "$ONE_CONTEXT_DEB_PATH"
|
||||
|
||||
# Manually install legacy network scripts used by one-context.
|
||||
run_root apt-get -y install ifupdown
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install kernel and bootloader. Do not autoconfigure grub.
|
||||
run_root echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections
|
||||
run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-base linux-image-generic grub-pc
|
||||
|
||||
# Configure grub.
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root apt-get -y install openssh-server
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
run_root apt-get clean
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,12 +0,0 @@
|
|||
import configparser
|
||||
|
||||
from etcd_wrapper import EtcdWrapper
|
||||
|
||||
config = configparser.ConfigParser(allow_no_value=True)
|
||||
config.read('config-and-secrets.conf')
|
||||
|
||||
etcd_client = EtcdWrapper(
|
||||
host=config['etcd']['url'], port=config['etcd']['port'],
|
||||
ca_cert=config['etcd']['ca_cert'], cert_key=config['etcd']['cert_key'],
|
||||
cert_cert=config['etcd']['cert_cert']
|
||||
)
|
|
@ -1,73 +0,0 @@
|
|||
import etcd3
|
||||
import json
|
||||
import logging
|
||||
|
||||
from functools import wraps
|
||||
|
||||
|
||||
class EtcdEntry:
|
||||
def __init__(self, meta_or_key, value, value_in_json=True):
|
||||
if hasattr(meta_or_key, 'key'):
|
||||
# if meta has attr 'key' then get it
|
||||
self.key = meta_or_key.key.decode('utf-8')
|
||||
else:
|
||||
# otherwise meta is the 'key'
|
||||
self.key = meta_or_key
|
||||
self.value = value.decode('utf-8')
|
||||
|
||||
if value_in_json:
|
||||
self.value = json.loads(self.value)
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError:
|
||||
raise etcd3.exceptions.ConnectionFailedError('Cannot connect to etcd: is etcd running as configured?')
|
||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
||||
except Exception as err:
|
||||
logging.exception('Some etcd error occured. See syslog for details.', err)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class EtcdWrapper:
|
||||
@readable_errors
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = etcd3.client(*args, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get(self, *args, value_in_json=True, **kwargs):
|
||||
_value, _key = self.client.get(*args, **kwargs)
|
||||
if _key is None or _value is None:
|
||||
return None
|
||||
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def put(self, *args, value_in_json=True, **kwargs):
|
||||
_key, _value = args
|
||||
if value_in_json:
|
||||
_value = json.dumps(_value)
|
||||
|
||||
if not isinstance(_key, str):
|
||||
_key = _key.decode('utf-8')
|
||||
|
||||
return self.client.put(_key, _value, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, *args, value_in_json=True, **kwargs):
|
||||
event_iterator = self.client.get_prefix(*args, **kwargs)
|
||||
for e in event_iterator:
|
||||
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def watch_prefix(self, key, value_in_json=True):
|
||||
event_iterator, cancel = self.client.watch_prefix(key)
|
||||
for e in event_iterator:
|
||||
if hasattr(e, '_event'):
|
||||
e = getattr('e', '_event')
|
||||
if e.type == e.PUT:
|
||||
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
|
|
@ -1,98 +0,0 @@
|
|||
import json
|
||||
|
||||
from enum import IntEnum
|
||||
from xmlrpc.client import ServerProxy as RPCClient
|
||||
|
||||
from xmltodict import parse
|
||||
|
||||
from config import config, etcd_client
|
||||
|
||||
|
||||
# Constants
|
||||
ALL_VM_STATES = -1
|
||||
START_ID = -1 # First id whatever it is
|
||||
END_ID = -1 # Last id whatever it is
|
||||
|
||||
|
||||
def put_under_list(obj):
|
||||
if not isinstance(obj, list):
|
||||
return [obj]
|
||||
return obj
|
||||
|
||||
|
||||
class VMState(IntEnum):
|
||||
INIT = 0
|
||||
PENDING = 1
|
||||
HOLD = 2
|
||||
ACTIVE = 3
|
||||
STOPPED = 4
|
||||
SUSPENDED = 5
|
||||
DONE = 6
|
||||
FAILED = 7
|
||||
POWEROFF = 8
|
||||
UNDEPLOYED = 9
|
||||
CLONING = 10
|
||||
CLONING_FAILURE = 11
|
||||
|
||||
|
||||
class VmFilterFlag(IntEnum):
|
||||
UIDUserResources = 0 # UID User’s Resources
|
||||
UserAndItsGroupsResources = -1 # Resources belonging to the user and any of his groups
|
||||
AllResources = -2 # All resources
|
||||
UserResources = -3 # Resources belonging to the user
|
||||
UserPrimaryGroupResources = -4 # Resources belonging to the user’s primary group
|
||||
|
||||
|
||||
class VM:
|
||||
def __init__(self, vm: dict):
|
||||
self.id = vm.get('ID', None)
|
||||
self.owner = {
|
||||
'id': vm.get('UID', None),
|
||||
'name': vm.get('UNAME', None),
|
||||
'gname': vm.get('GNAME', None)
|
||||
}
|
||||
self.name = vm.get('NAME', None)
|
||||
self.status = vm.get('STATE', None)
|
||||
if self.status:
|
||||
self.status = VMState(int(self.status)).name.lower()
|
||||
|
||||
template = vm['TEMPLATE']
|
||||
|
||||
self.disk = put_under_list(template.get('DISK', []))
|
||||
self.graphics = template.get('GRAPHICS', {})
|
||||
self.memory = template.get('MEMORY', None)
|
||||
self.nic = put_under_list(template.get('NIC', []))
|
||||
self.vcpu = template.get('VCPU', None)
|
||||
self.host = {
|
||||
'name': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None),
|
||||
'id': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HID', None),
|
||||
}
|
||||
self.snapshots = put_under_list(vm.get('SNAPSHOTS', []))
|
||||
|
||||
def get_data(self):
|
||||
return {
|
||||
attr: getattr(self, attr)
|
||||
for attr in dir(self)
|
||||
if not attr.startswith('__') and not callable(getattr(self, attr))
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
with RPCClient('https://opennebula.ungleich.ch:2634/RPC2') as rpc_client:
|
||||
success, response, *_ = rpc_client.one.vmpool.infoextended(
|
||||
config['oca']['client_secrets'], VmFilterFlag.AllResources.value, START_ID, END_ID, ALL_VM_STATES
|
||||
)
|
||||
if success:
|
||||
vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
|
||||
for i, vm in enumerate(vms):
|
||||
vm_id = vm['ID']
|
||||
etcd_client.put(f'/opennebula/vm/{vm_id}', vm)
|
||||
|
||||
parsed_vm = VM(vm)
|
||||
etcd_client.put(f'/opennebula/parsed_vm/{parsed_vm.id}', parsed_vm.get_data())
|
||||
else:
|
||||
print(response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,56 +0,0 @@
|
|||
from pprint import pprint
|
||||
|
||||
from config import etcd_client
|
||||
|
||||
|
||||
def get_vm_by_ip(vms, ip, status='active'):
|
||||
vms_by_status = {
|
||||
vm_id: vm
|
||||
for vm_id, vm in vms.items()
|
||||
if vm['status'] == status
|
||||
}
|
||||
for vm_id, vm in vms_by_status.items():
|
||||
vm_ips = []
|
||||
for nic in vm.get('nic', []):
|
||||
global_ipv6 = nic.get('IP6_GLOBAL', None)
|
||||
local_ipv6 = nic.get('IP6_LINK', None)
|
||||
ipv4 = nic.get('IP', None)
|
||||
vm_ips += [global_ipv6, local_ipv6, ipv4]
|
||||
|
||||
if ip in vm_ips:
|
||||
return {vm_id: vm}
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
vm_prefix = '/opennebula/parsed_vm/'
|
||||
|
||||
vms = {
|
||||
int(vm.key.split('/')[-1]): vm.value
|
||||
for vm in etcd_client.get_prefix(vm_prefix)
|
||||
}
|
||||
|
||||
VM_ID = 10761 # One of nico's VM
|
||||
|
||||
# Get all data related to a VM
|
||||
pprint(vms.get(VM_ID))
|
||||
|
||||
# Get host of a VM
|
||||
print(vms.get(VM_ID).get('host').get('name'))
|
||||
|
||||
# Get VNC Port of a VM
|
||||
print(vms.get(VM_ID).get('graphics').get('PORT'))
|
||||
|
||||
# Get all disks attached with VM
|
||||
pprint(vms.get(VM_ID).get('disk'))
|
||||
|
||||
# Who is owner of a VM?
|
||||
print(vms.get(VM_ID).get('owner').get('name'))
|
||||
|
||||
# Get VM who has 2a0a:e5c0:0:5:0:78ff:fe11:d75f
|
||||
search_ungleich_ch = get_vm_by_ip(vms, '2a0a:e5c0:0:5:0:78ff:fe11:d75f')
|
||||
pprint(search_ungleich_ch)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
14
pg_repair
14
pg_repair
|
@ -1,14 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
pglist_arr=( $(ceph health detail | grep pg | grep active | awk '{print $2}' ))
|
||||
|
||||
echo ${pglist_arr[*]}
|
||||
|
||||
for ((i=0; i<${#pglist_arr[@]}; i++)) do
|
||||
if [ 1 -eq $(ceph pg repair ${pglist_arr[$i]} | grep repair | grep instructing | wc -l) ]; then
|
||||
echo repair script error
|
||||
break
|
||||
fi
|
||||
echo ${pglist_arr[$i]} repair script done
|
||||
sleep 10
|
||||
done
|
|
@ -1,45 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-09-20, Seoul, Coffebean, 23:56
|
||||
# Copying: GPLv3
|
||||
echo "If you are running alpine, these packages are needed:"
|
||||
echo "apk add alpine-sdk xz-dev"
|
||||
set -x
|
||||
set -e
|
||||
|
||||
if [ ! -e ipxe ]; then
|
||||
git clone git://git.ipxe.org/ipxe.git
|
||||
else
|
||||
(cd ipxe; git pull)
|
||||
fi
|
||||
|
||||
cd ipxe/src
|
||||
|
||||
sed -i -e 's/^#undef.*NET_PROTO_IPV6/#define NET_PROTO_IPV6/' \
|
||||
-e 's/^#undef.*DOWNLOAD_PROTO_HTTPS/#define DOWNLOAD_PROTO_HTTPS/' \
|
||||
-e 's,^//#define POWEROFF_CMD,#define POWEROFF_CMD,' \
|
||||
-e 's,^//#define PING_CMD,#define PING_CMD,' \
|
||||
-e 's,^//#define NTP_CMD,#define NTP_CMD,' config/general.h
|
||||
|
||||
|
||||
mkdir -p output
|
||||
|
||||
make bin/ipxe.iso
|
||||
cp bin/ipxe.iso output/
|
||||
|
||||
make bin/undionly.kpxe
|
||||
cp bin/undionly.kpxe output/
|
||||
|
||||
make bin/ipxe.usb
|
||||
cp bin/ipxe.usb output/
|
||||
|
||||
make bin-x86_64-efi/ipxe.efi
|
||||
cp bin-x86_64-efi/ipxe.efi output/
|
||||
|
||||
cat <<EOF
|
||||
Outputs in
|
||||
|
||||
- PXE chain-loadable: undionly.kpxe (put on tftp server)
|
||||
- USB loadable ipxe.usb (dd to usb stick)
|
||||
- EFI loadable: ipxe.efi (put on vfat partition)
|
||||
|
||||
EOF
|
52
vm-create
52
vm-create
|
@ -1,52 +0,0 @@
|
|||
#!/bin/random
|
||||
# This is a sample script / prototype to create a VM:
|
||||
# 1. user registers a payment method (Credit card) -> stores at stripe
|
||||
# 2. user adds ssh key(s)
|
||||
# 3. user creates a VM
|
||||
#
|
||||
# Flow to register payment method:
|
||||
#
|
||||
# - Connect to account.ungleich.ch with (username, password) for getting (name, realm, seed)
|
||||
# - Connect to pay.ungleich.ch with (name, realm, token) { JSON }
|
||||
# Json similar to:
|
||||
#
|
||||
# { type: "credit-card" cc number, name, verify, ... }
|
||||
#
|
||||
#
|
||||
# Flow to add an ssh key:
|
||||
# - Connect to account.ungleich.ch with (username, password) for getting (name, realm, seed)
|
||||
# - Connect to infra.ungleich.ch/api using (name, realm, token) POST { json }
|
||||
# { key: ... }
|
||||
# Standard rest, registering it internally to a user
|
||||
#
|
||||
# Flow to create a VM:
|
||||
#
|
||||
# - Connect to account.ungleich.ch with (username, password) for getting (name, realm, seed)
|
||||
# - Connect to infra.ungleich.ch/api using (name, realm, token) POST { json }
|
||||
# - infra.ungleich.ch then connects to otp.ungleich.ch verifying the (name, realm, token)
|
||||
# - infra.ungleich.ch checks that user has >= 1 ssh keys registered, otherwise gives error message
|
||||
# - infra.ungleich.ch then connects to pay.ungleich.ch verifying that the user can "afford" the VM / books it
|
||||
# infra passes (user, product, productvariant)
|
||||
# --> infra needs to be able to derive a product from the parameters to pass to pay.ungleich.ch
|
||||
# --> if user is not able to afford, return error to the user
|
||||
# - pay.ungleich.ch stores the order / subscription (depending on the type)
|
||||
# - Variant a)
|
||||
# - infra.ungleich.ch returns { OK + ticket number }
|
||||
# - client can poll / get the status of the VM on infra.ungleich.ch
|
||||
# - Meanwhile infra.ungleich.ch then creates the VM/configures the VM/ensures the ssh key(s) are added
|
||||
# - Variant b)
|
||||
# - infra.ungleich.ch then creates the VM/configures the VM/ensures the ssh key(s) are added
|
||||
#
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "$0: username password template ssdsizegb ramgb cpunum hddsizegb onlyipv6"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl https://...
|
19
vm_list
19
vm_list
|
@ -1,19 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
#option $1 is ldap password
|
||||
#option $2 is ou
|
||||
|
||||
|
||||
uid_list=( $(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "ou=$2,dc=ungleich,dc=ch" | grep uid: | awk '{print $2}') )
|
||||
|
||||
for ((i=0; i<${#uid_list[@]}; i++)) do
|
||||
list_email[$i]=$(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "uid=${uid_list[$i]},ou=$2,dc=ungleich,dc=ch" | grep mail: | awk '{print $2}' )
|
||||
list_vmid=()
|
||||
list_vmid=( $(onevm list | grep ${list_email[$i]} | grep runn | awk '{print $1}' ) )
|
||||
for ((j=0; j<${#list_vmid[@]}; j++)) do
|
||||
temp=$(onevm show ${list_vmid[$j]} | grep PORT)
|
||||
temp1="${temp#*\"}"
|
||||
port="${temp1%%\"*}"
|
||||
host=$(onevm show ${list_vmid[$j]} | grep HOST | grep ungleich | awk '{print $3}')
|
||||
echo ${uid_list[$i]} ${list_vmid[$j]} $port $host >> ~/vm_vnc_list
|
||||
done
|
||||
done
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/bash -e
|
||||
#option $1 is ldap password
|
||||
#option $2 is ou
|
||||
|
||||
|
||||
uid_list=( $(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "ou=$2,dc=ungleich,dc=ch" | grep uid: | awk '{print $2}') )
|
||||
|
||||
for ((i=0; i<${#uid_list[@]}; i++)) do
|
||||
uid_temp=$(echo ${uid_list[i]} | sed "s/b'//g" | sed "s/'//g")
|
||||
list_email[$i]=$(ldapsearch -x -H ldaps://ldap1.ungleich.ch:636 -D cn=manager,dc=ungleich,dc=ch -w $1 -b "uid=${uid_list[$i]},ou=$2,dc=ungleich,dc=ch" | grep mail: | awk '{print $2}' )
|
||||
list_vmid=()
|
||||
list_vmid=( $(onevm list | grep ${list_email[$i]} | grep runn | awk '{print $1}' ) )
|
||||
for ((j=0; j<${#list_vmid[@]}; j++)) do
|
||||
temp=$(onevm show ${list_vmid[$j]} | grep PORT)
|
||||
temp1="${temp#*\"}"
|
||||
port="${temp1%%\"*}"
|
||||
host=$(onevm show ${list_vmid[$j]} | grep HOST | grep ungleich | awk '{print $3}')
|
||||
echo $uid_temp ${list_vmid[$j]} $port $host >> ~/vm_vnc_list
|
||||
done
|
||||
done
|
|
@ -1,5 +0,0 @@
|
|||
vm_list=( $(virsh list | awk '{print $2}') )
|
||||
|
||||
for ((i=0; i<${#vm_list[@]}; i++)) do
|
||||
ceph osd map hdd ${vm_list[i]}
|
||||
done
|
|
@ -1,5 +0,0 @@
|
|||
import configparser
|
||||
|
||||
config = configparser.ConfigParser(allow_no_value=True)
|
||||
config.read('/opt/ungleich-tools/vnc_console_connection/config-and-secrets.conf')
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
import psycopg2 as pg2
|
||||
from config import config
|
||||
|
||||
db_name = config['db']['db_name']
|
||||
db_user = config['db']['db_user']
|
||||
db_password = config['db']['db_password']
|
||||
db_port = config['db']['db_port']
|
||||
|
||||
|
||||
def setconn(u_id, vm_num, vm_port,vm_host):
|
||||
conn = pg2.connect("host = localhost dbname={} user={} password={} port={}".format(db_name,db_user,db_password,db_port))
|
||||
conn.autocommit = True
|
||||
cur = conn.cursor()
|
||||
cur.execute("SELECT entity_id FROM guacamole_entity WHERE name = '{}'".format(u_id))
|
||||
row = cur.fetchone()
|
||||
if row == None:
|
||||
cur.execute("INSERT INTO guacamole_entity (name, type) VALUES ('{}','USER')".format(u_id))
|
||||
cur.execute("SELECT entity_id FROM guacamole_entity WHERE name = '{}'".format(u_id))
|
||||
row = cur.fetchone()
|
||||
en_id = row[0]
|
||||
cur.execute("INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('{}', '\x74657374', now())".format(en_id))
|
||||
print("create user : " , u_id)
|
||||
else:
|
||||
en_id = row[0]
|
||||
cur.execute("SELECT password_hash FROM guacamole_user WHERE entity_id = '{}'".format(en_id))
|
||||
row = cur.fetchone()
|
||||
if row == None:
|
||||
cur.execute("INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('{}', '\x74657374', now())".format(en_id))
|
||||
print("user exsit")
|
||||
cn = "{}{}".format(u_id,vm_num)
|
||||
cur.execute("SELECT connection_id FROM guacamole_connection WHERE connection_name = '{}'".format(cn))
|
||||
row = cur.fetchone()
|
||||
if row == None:
|
||||
#create connection
|
||||
cur.execute("INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('{}', 'vnc')".format(cn))
|
||||
cur.execute("SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '{}' AND parent_id IS NULL".format(cn))
|
||||
temp_cn_id = cur.fetchone()
|
||||
cn_id = temp_cn_id[0]
|
||||
cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','hostname','{}')".format(cn_id, vm_host))
|
||||
cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','port','{}')".format(cn_id,vm_port))
|
||||
#connection permission
|
||||
cur.execute("INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('{}', '{}', 'READ')".format(en_id,cn_id))
|
||||
#clipboard-encoding
|
||||
cur.execute("INSERT INTO guacamole_connection_parameter VALUES ('{}','clipboard-encoding','UTF-8')".format(cn_id))
|
||||
print("create connection")
|
||||
else:
|
||||
cur.execute("SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '{}' AND parent_id IS NULL".format(cn))
|
||||
temp_cn_id = cur.fetchone()
|
||||
cn_id = temp_cn_id[0]
|
||||
cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='{}' where connection_id='{}' and parameter_name='hostname'".format(vm_host,cn_id))
|
||||
cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='{}' where connection_id='{}' and parameter_name='port'".format(vm_port,cn_id))
|
||||
#cur.execute("UPDATE guacamole_connection_parameter SET parameter_value='UTF-8' where connection_id='{}' and parameter_name='clipboard-encoding'".format(cn_id))
|
||||
print("no connection")
|
||||
conn.close()
|
||||
return None
|
|
@ -1,88 +0,0 @@
|
|||
import json
|
||||
|
||||
from enum import IntEnum
|
||||
from xmlrpc.client import ServerProxy as RPCClient
|
||||
from xmltodict import parse
|
||||
from config import config
|
||||
from ldap_list import vm_list
|
||||
from db_export import setconn
|
||||
|
||||
# Constants
|
||||
ALL_VM_STATES = -1
|
||||
START_ID = -1 # First id whatever it is
|
||||
END_ID = -1 # Last id whatever it is
|
||||
session_string = config['oca']['client_secrets']
|
||||
opnserver = config['oca']['opn_server']
|
||||
|
||||
class VMState(IntEnum):
|
||||
INIT = 0
|
||||
PENDING = 1
|
||||
HOLD = 2
|
||||
ACTIVE = 3
|
||||
STOPPED = 4
|
||||
SUSPENDED = 5
|
||||
DONE = 6
|
||||
FAILED = 7
|
||||
POWEROFF = 8
|
||||
UNDEPLOYED = 9
|
||||
CLONING = 10
|
||||
CLONING_FAILURE = 11
|
||||
|
||||
|
||||
class VmFilterFlag(IntEnum):
|
||||
UIDUserResources = 0 # UID User’s Resources
|
||||
UserAndItsGroupsResources = -1 # Resources belonging to the user and any of his groups
|
||||
AllResources = -2 # All resources
|
||||
UserResources = -3 # Resources belonging to the user
|
||||
UserPrimaryGroupResources = -4 # Resources belonging to the user’s primary group
|
||||
|
||||
|
||||
class VM:
|
||||
def __init__(self, vm: dict):
|
||||
self.id = vm.get('ID', None)
|
||||
self.owner = {
|
||||
'id': vm.get('UID', None),
|
||||
'name': vm.get('UNAME', None),
|
||||
'gname': vm.get('GNAME', None)
|
||||
}
|
||||
self.name = vm.get('NAME', None)
|
||||
self.status = vm.get('STATE', None)
|
||||
if self.status:
|
||||
self.status = VMState(int(self.status)).name.lower()
|
||||
|
||||
template = vm['TEMPLATE']
|
||||
|
||||
self.graphics = template.get('GRAPHICS', {})
|
||||
self.memory = template.get('MEMORY', None)
|
||||
self.vcpu = template.get('VCPU', None)
|
||||
self.host = {
|
||||
'name': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None),
|
||||
'id': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HID', None),
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
with RPCClient(opnserver) as rpc_client:
|
||||
success, response, *_ = rpc_client.one.vmpool.infoextended(
|
||||
session_string , VmFilterFlag.AllResources.value, START_ID, END_ID, VMState.ACTIVE.value
|
||||
)
|
||||
if success:
|
||||
vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
|
||||
for entry in vm_list.entries:
|
||||
temp_uname = entry.uid
|
||||
for i, vm in enumerate(vms):
|
||||
vm_user = vm['UNAME']
|
||||
vm_id = vm['ID']
|
||||
vm_port = vm['TEMPLATE']['GRAPHICS'].get('PORT')
|
||||
vm_host = vm['HISTORY_RECORDS']['HISTORY']['HOSTNAME']
|
||||
if vm['UNAME'] == temp_uname:
|
||||
#print(entry.uid, vm_id, vm_port, vm_host)
|
||||
setconn(entry.uid, vm_id, vm_port, vm_host)
|
||||
|
||||
else:
|
||||
print(response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
import ldap3
|
||||
import sys
|
||||
from config import config
|
||||
from ldap3 import Server, Connection, ObjectDef, Reader, ALL, SUBTREE, ALL_ATTRIBUTES
|
||||
from ldap3.core import exceptions
|
||||
|
||||
|
||||
LDAP_SERVER = config['ldap']['server']
|
||||
LDAP_PASSWORD = config['ldap']['admin_password']
|
||||
LDAP_USER = config['ldap']['admin_dn']
|
||||
LDAP_PORT = int(config['ldap']['ldap_port'])
|
||||
|
||||
# Create the Server object with the given address.
|
||||
server = Server(LDAP_SERVER, LDAP_PORT, get_info=ALL)
|
||||
#Create a connection object, and bind with the given DN and password.
|
||||
try:
|
||||
conn = Connection(server, LDAP_USER, LDAP_PASSWORD, auto_bind=True)
|
||||
print('LDAP Bind Successful.')
|
||||
# Perform a search for a pre-defined criteria.
|
||||
# Mention the search filter / filter type and attributes.
|
||||
conn.search('ou=customer,dc=ungleich,dc=ch', '(&(!({}={})))'.format('mail','*@ungleich.ch') , attributes=['uid','mail'])
|
||||
#conn.search('ou=customer,dc=ungleich,dc=ch', '(objectClass=*)' , attributes=['uid','mail'])
|
||||
# Print the resulting entriesn.
|
||||
#for entry in conn.entries:
|
||||
#print(entry.uid, entry.mail)
|
||||
vm_list = conn
|
||||
except exceptions.LDAPException as err:
|
||||
sys.exit(f'LDAP Error: {err}')
|
||||
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2019-09-09, Nico Schottelius
|
||||
# Show countries / region of VPN clients connected with wireguard
|
||||
|
||||
# countries + region
|
||||
for ip in $(wg | grep endpoint | sed -e 's/endpoint: //' -e 's/\(.*\):[0-9]*/\1/' -e 's/\[//' -e 's/\]//'); do
|
||||
curl -s ipinfo.io/$ip | grep -e country -e region;
|
||||
done
|
||||
|
||||
# countries with counter
|
||||
( for ip in $(wg | grep endpoint | sed -e 's/endpoint: //' -e 's/\(.*\):[0-9]*/\1/' -e 's/\[//' -e 's/\]//'); do curl -s ipinfo.io/$ip | grep -e country ; done ) | sort | uniq -c | sort -g
|
||||
|
||||
# Get number of configured VPNs
|
||||
configured_vpns=$(wg show | grep ^peer | wc -l)
|
||||
active_vpns=$(wg show | grep endpoint | wc -l)
|
||||
|
||||
echo "Configured VPNs: ${configured_vpns}"
|
||||
echo "Active VPNs: ${active_vpns}"
|
Loading…
Reference in a new issue