Compare commits
1 commit
master
...
fnuxfedora
Author | SHA1 | Date | |
---|---|---|---|
|
366c9c65b2 |
153 changed files with 171 additions and 9141 deletions
12
.gitignore
vendored
12
.gitignore
vendored
|
@ -1,12 +0,0 @@
|
|||
opennebula-vm-etcd/config-and-secrets.conf
|
||||
|
||||
*.pyc
|
||||
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
ipxe/
|
||||
|
||||
openwrt-*-*.bin
|
||||
alpine-minirootfs-*.tar.gz
|
||||
opennebula-images/*.qcow2
|
|
@ -1,24 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
maintenance=35
|
||||
networking=10
|
||||
cpu=3
|
||||
ram=4
|
||||
hdd=(2/100)
|
||||
ssd=(3.5/10)
|
||||
|
||||
package = {}
|
||||
# maint, net, cpu, ram, hdd, ssd
|
||||
package['starter'] = [ 1, 1, 4, 6, 200, 20 ]
|
||||
package['community'] = [ 1, 1, 8, 12, 1000, 100 ]
|
||||
package['pro'] = [ 1, 1, 16, 24, 5000, 500 ]
|
||||
|
||||
for k,v in package.items():
|
||||
price=v[0] * maintenance
|
||||
price+=v[1] * networking
|
||||
price+=v[2] * cpu
|
||||
price+=v[3] * ram
|
||||
price+=v[4] * hdd
|
||||
price+=v[5] * ssd
|
||||
|
||||
print(f"Price for {k} with {v[2]} CPUs, {v[3]} GB RAM, {v[4]} GB HDD, {v[5]} GB SSD = {price}")
|
|
@ -1,203 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "$0 disk ssh-keyfile [efi|bios|efinvram]"
|
||||
echo " disk: which disk to install to"
|
||||
echo " ssh-keyfile: ssh keys to add into the image"
|
||||
echo " use efi or bios partitioning"
|
||||
echo "pre install in OS: apk add hdparm sudo wget efibootmgr git sfdisk"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
DISK=$1; shift
|
||||
SSH_KEYS=$1; shift
|
||||
BOOT_VIA=$1; shift
|
||||
|
||||
MAJOR_VERSION=3.21
|
||||
MINOR_VERSION=0
|
||||
|
||||
IMAGE=alpine-minirootfs-$MAJOR_VERSION.$MINOR_VERSION-x86_64.tar.gz
|
||||
|
||||
RESOLVCONF=/etc/resolv.conf
|
||||
|
||||
working_directory=$(pwd -P)
|
||||
rootfs_tmpdir=$(mktemp -d)
|
||||
|
||||
rootfs_url="http://dl-cdn.alpinelinux.org/alpine/v$MAJOR_VERSION/releases/x86_64/$IMAGE"
|
||||
|
||||
case $DISK in
|
||||
/dev/sd*)
|
||||
partition1=${DISK}1
|
||||
partition2=${DISK}2
|
||||
;;
|
||||
/dev/mmcblk*|/dev/nvme*)
|
||||
partition1=${DISK}p1
|
||||
partition2=${DISK}p2
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported disk - edit this script" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
run_root () {
|
||||
sudo chroot $rootfs_tmpdir /usr/bin/env \
|
||||
PATH=/sbin:/bin:/usr/sbin:/usr/bin \
|
||||
/bin/sh -c "$*"
|
||||
}
|
||||
|
||||
wget -c "$rootfs_url" -O "$IMAGE"
|
||||
|
||||
# Clean the first 2M - getting rid of old things
|
||||
# in the gap and also the paritition table
|
||||
dd if=/dev/zero of=${DISK} bs=1M count=2
|
||||
|
||||
case "$BOOT_VIA" in
|
||||
bios)
|
||||
sudo sfdisk "$DISK" <<EOF
|
||||
label: dos
|
||||
,,L
|
||||
EOF
|
||||
sleep 10
|
||||
sudo hdparm -z $DISK
|
||||
# For creation, if an existing filesystem is on the partitions
|
||||
sudo mkfs.ext4 -F ${partition1}
|
||||
sudo mount -t ext4 ${partition1} $rootfs_tmpdir
|
||||
;;
|
||||
efi*)
|
||||
sudo sfdisk "$DISK" <<EOF
|
||||
label: gpt
|
||||
,500MiB,U
|
||||
,,L
|
||||
EOF
|
||||
sudo hdparm -z $DISK
|
||||
sudo mkfs.vfat ${partition1}
|
||||
sudo mkfs.ext4 -F ${partition2}
|
||||
sudo mount -t ext4 ${partition2} "$rootfs_tmpdir"
|
||||
sudo mkdir "${rootfs_tmpdir}/boot"
|
||||
sudo mount -t vfat ${partition1} "${rootfs_tmpdir}/boot"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown disk format, $BOOT_VIA" >&2
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# keep right permissions, use sudo
|
||||
sudo tar xf $IMAGE -C $rootfs_tmpdir
|
||||
|
||||
# These are required by grub-install
|
||||
# And also for generating grub config that contains rootfstype
|
||||
for dir in dev proc sys; do
|
||||
sudo mount --bind /${dir} ${rootfs_tmpdir}/${dir}
|
||||
done
|
||||
|
||||
# Add SSH keys
|
||||
run_root mkdir -p root/.ssh
|
||||
sudo cp $SSH_KEYS $rootfs_tmpdir/root/.ssh/authorized_keys
|
||||
run_root chown root:root /root/.ssh/authorized_keys
|
||||
run_root chmod 0600 /root/.ssh/authorized_keys
|
||||
run_root chmod 0700 /root/.ssh
|
||||
|
||||
# Import local resolv.conf.
|
||||
sudo cp "$RESOLVCONF" $rootfs_tmpdir/etc/resolv.conf
|
||||
|
||||
# Generate fstab which is later included in the initramfs
|
||||
|
||||
# Add filesystem to fstab, because busybox mount does not work
|
||||
# without -t ext4 for mounting and returns "No such file or directory"
|
||||
# nb2:~# blkid| grep ^${DISK}1 | awk '{ print $2 }'
|
||||
# UUID="fecf4182-f6dd-4d2c-9af7-8f36444ee25c"
|
||||
eval $(blkid | grep ^${DISK}1 | awk '{ print $2 }')
|
||||
UUID_1=$UUID
|
||||
|
||||
|
||||
run_root apk update
|
||||
run_root apk add linux-lts openrc udev openssh e2fsprogs
|
||||
|
||||
# For ansible
|
||||
run_root apk add python3
|
||||
|
||||
run_root rc-update add udev
|
||||
run_root rc-update add udev-trigger
|
||||
run_root rc-update add sshd
|
||||
run_root rc-update add networking
|
||||
run_root rc-update add hostname
|
||||
run_root rc-update add sysctl
|
||||
run_root rc-update add modules
|
||||
run_root sed -i 's/root:!::0:::::/root:*::0:::::/' /etc/shadow
|
||||
|
||||
sudo tee "$rootfs_tmpdir/etc/network/interfaces" <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet6 manual
|
||||
up /sbin/ip link set \$IFACE up
|
||||
|
||||
EOF
|
||||
|
||||
sudo tee "$rootfs_tmpdir/etc/hostname" <<EOF
|
||||
alpine-unconfigured
|
||||
EOF
|
||||
|
||||
# Setup bootloader
|
||||
|
||||
run_root apk add grub-bios grub-efi
|
||||
echo 'GRUB_CMDLINE_LINUX_DEFAULT="quiet rootfstype=ext4"' >> ${rootfs_tmpdir}/etc/default/grub
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
case "$BOOT_VIA" in
|
||||
bios)
|
||||
run_root grub-install --target=i386-pc ${DISK}
|
||||
echo "UUID=$UUID_1 / ext4 defaults 0 1" >> ${rootfs_tmpdir}/etc/fstab
|
||||
;;
|
||||
efi*)
|
||||
eval $(blkid | grep ^${DISK}2 | awk '{ print $2 }')
|
||||
UUID_2=$UUID
|
||||
|
||||
echo "UUID=$UUID_2 / ext4 defaults 0 1" >> ${rootfs_tmpdir}/etc/fstab
|
||||
echo "UUID=$UUID_1 /boot vfat defaults 0 2" >> ${rootfs_tmpdir}/etc/fstab
|
||||
|
||||
|
||||
# Actually do add us to the bios
|
||||
if [ $BOOT_VIA = "efinvram" ]; then
|
||||
dir=/sys/firmware/efi/efivars/
|
||||
sudo mount --bind ${dir} ${rootfs_tmpdir}${dir}
|
||||
run_root apk add efibootmgr
|
||||
run_root grub-install --efi-directory=/boot
|
||||
sudo umount ${rootfs_tmpdir}${dir}
|
||||
else
|
||||
run_root grub-install --efi-directory=/boot --no-nvram
|
||||
|
||||
# FIX for some machines (?)
|
||||
run_root mkdir /boot/EFI/boot
|
||||
run_root cp /boot/EFI/alpine/grubx64.efi /boot/EFI/boot/bootx64.efi
|
||||
run_root cp /boot/grub/grub.cfg /boot/EFI/boot/
|
||||
fi
|
||||
|
||||
sudo umount ${rootfs_tmpdir}/boot
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# Debug
|
||||
run_root cat /etc/fstab
|
||||
|
||||
# Cleanup
|
||||
run_root rm -f /etc/resolv.conf
|
||||
for dir in dev proc sys; do
|
||||
sudo umount ${rootfs_tmpdir}/${dir}
|
||||
done
|
||||
sudo umount $rootfs_tmpdir
|
||||
|
||||
sync
|
||||
rmdir ${rootfs_tmpdir}
|
||||
|
||||
echo "${DISK} has been setup with Alpine Linux"
|
||||
|
||||
exit 0
|
|
@ -1,106 +1,90 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "$0 ssh-keyfile"
|
||||
echo " ssh-keyfile: ssh keys to add into the image"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
SSH_KEYS=$1; shift
|
||||
|
||||
MAJOR_VERSION=3.13
|
||||
MINOR_VERSION=5
|
||||
MAJOR_VERSION=3.10
|
||||
MINOR_VERSION=3
|
||||
IMAGE=alpine-minirootfs-$MAJOR_VERSION.$MINOR_VERSION-x86_64.tar.gz
|
||||
|
||||
SSH_KEYS=$(cat ~/.ssh/id_rsa.pub)
|
||||
RESOLVCONF=/etc/resolv.conf
|
||||
|
||||
working_directory=$(pwd -P)
|
||||
#rootfs_tmpdir=$(mktemp -d)
|
||||
rootfs_tmpdir=alpine_${MAJOR_VERSION}-${MINOR_VERSION}-rootfs
|
||||
|
||||
initramfs="$working_directory/initramfs-alpine-${MAJOR_VERSION}.${MINOR_VERSION}"
|
||||
kernel="$working_directory/kernel-alpine-${MAJOR_VERSION}.${MINOR_VERSION}"
|
||||
|
||||
mkdir -p ${rootfs_tmpdir}
|
||||
|
||||
rootfs_tmpdir=$(mktemp -d)
|
||||
rootfs_url="http://dl-cdn.alpinelinux.org/alpine/v$MAJOR_VERSION/releases/x86_64/$IMAGE"
|
||||
|
||||
run_root () {
|
||||
sudo chroot $rootfs_tmpdir /usr/bin/env \
|
||||
chroot $rootfs_tmpdir /usr/bin/env \
|
||||
PATH=/bin:/sbin \
|
||||
/bin/sh -c "$*"
|
||||
}
|
||||
|
||||
wget -c "$rootfs_url" -O "$IMAGE"
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# keep right permissions, use sudo
|
||||
sudo tar xf $IMAGE -C $rootfs_tmpdir
|
||||
# Download, extract inital rootfs.
|
||||
curl "$rootfs_url" -o "$working_directory/$IMAGE"
|
||||
tar xf $IMAGE -C $rootfs_tmpdir
|
||||
|
||||
# Add SSH keys
|
||||
run_root mkdir -p root/.ssh
|
||||
sudo cp $SSH_KEYS $rootfs_tmpdir/root/.ssh/authorized_keys
|
||||
run_root chown root:root /root/.ssh/authorized_keys
|
||||
run_root chmod 0600 /root/.ssh/authorized_keys
|
||||
run_root chmod 0700 /root/.ssh
|
||||
echo $SSH_KEYS > $rootfs_tmpdir/root/.ssh/authorized_keys
|
||||
run_root chmod 0600 root/.ssh/authorized_keys
|
||||
run_root chmod 0700 root/.ssh
|
||||
|
||||
# Import local resolv.conf.
|
||||
sudo cp "$RESOLVCONF" $rootfs_tmpdir/etc/resolv.conf
|
||||
cat "$RESOLVCONF" > $rootfs_tmpdir/etc/resolv.conf
|
||||
|
||||
# Make sure init is found by the kernel.
|
||||
run_root ln -sf /sbin/init /init
|
||||
run_root ln -s /sbin/init /init
|
||||
|
||||
run_root apk update
|
||||
run_root apk add linux-lts openrc udev openssh rdnssd
|
||||
# rdnssd
|
||||
run_root rc-update add udev
|
||||
run_root rc-update add udev-trigger
|
||||
run_root rc-update add sshd
|
||||
run_root rc-update add rdnssd
|
||||
run_root rc-update add networking
|
||||
run_root rc-update add hostname
|
||||
run_root sed -i 's/root:!::0:::::/root:*::0:::::/' /etc/shadow
|
||||
|
||||
# Fix not yet updated initscript for rdnssd
|
||||
sudo tee "$rootfs_tmpdir/etc/init.d/rdnssd" <<EOF
|
||||
#!/sbin/openrc-run
|
||||
|
||||
supervisor=supervise-daemon
|
||||
command=/usr/sbin/rdnssd
|
||||
command_args="-H /etc/rdnssd/resolvconf"
|
||||
command_args_foreground="-f"
|
||||
|
||||
start_pre() {
|
||||
checkpath -d -m 0755 -o nobody:nobody -q /run/rdnssd
|
||||
}
|
||||
# Servers have static addresses, disable the standard
|
||||
# alpine setting of using tempaddr = 2
|
||||
cat > "$rootfs_tmpdir/etc/sysctl.d/99-ipv6.conf" <<EOF
|
||||
net.ipv6.conf.default.use_tempaddr = 0
|
||||
net.ipv6.conf.all.use_tempaddr = 0
|
||||
|
||||
net.ipv6.conf.all.accept_ra = 1
|
||||
EOF
|
||||
|
||||
sudo tee "$rootfs_tmpdir/etc/network/interfaces" <<EOF
|
||||
cat > "$rootfs_tmpdir/etc/network/interfaces" <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet6 manual
|
||||
up /sbin/ip link set \$IFACE up
|
||||
|
||||
pre-up ip link set eth0 up
|
||||
post-up ip addr show dev eth0 | grep inet6 >> /etc/issue
|
||||
post-up echo post post up >> /etc/issue
|
||||
EOF
|
||||
|
||||
sudo tee "$rootfs_tmpdir/etc/hostname" <<EOF
|
||||
cat > "$rootfs_tmpdir/etc/hostname" <<EOF
|
||||
alpine-unconfigured
|
||||
EOF
|
||||
|
||||
# Cleanup resolv.conf
|
||||
run_root rm -f /etc/resolv.conf
|
||||
echo ipv6 >> "$rootfs_tmpdir/etc/modules"
|
||||
|
||||
# Layer atop base rootfs.
|
||||
run_root apk update
|
||||
run_root apk upgrade
|
||||
run_root apk add openssh linux-vanilla openrc udev
|
||||
run_root rc-update add udev
|
||||
run_root rc-update add udev-trigger
|
||||
run_root rc-update add sshd
|
||||
run_root rc-update add networking
|
||||
run_root rc-update add hostname
|
||||
|
||||
# FIXME: add / install rdnssd / ndisc6 / start it on boot
|
||||
# ndisc6 is only @testing
|
||||
|
||||
# Generate iniramfs image
|
||||
(cd $rootfs_tmpdir; sudo find . | sudo cpio -H newc -o | gzip -9 > ${initramfs})
|
||||
cp "$rootfs_tmpdir/boot/vmlinuz-lts" "${kernel}"
|
||||
(cd $rootfs_tmpdir; find . | cpio -H newc -o | gzip -9 > "$working_directory/alpine-initramfs.gz")
|
||||
cp "$rootfs_tmpdir/boot/vmlinuz-vanilla" "$working_directory/alpine-kernel"
|
||||
|
||||
echo rm -rf "$rootfs_tmpdir"
|
||||
# Cleanup.
|
||||
#rm -r "$rootfs_tmpdir"
|
||||
|
||||
echo "Use ${initramfs} and ${kernel} from $working_directory"!
|
||||
|
||||
exit 0
|
||||
# Upload to netboot server. - needs to be done outside sudo
|
||||
echo "Use alpine-initramfs.gz alpine-kernel from $working_directory"!
|
||||
|
|
0
build-alpine-chroot.sh
Executable file → Normal file
0
build-alpine-chroot.sh
Executable file → Normal file
|
@ -3,21 +3,25 @@
|
|||
name=$(hostname)
|
||||
|
||||
CEPH_PATH=/var/lib/ceph
|
||||
MGR_PATH=$CEPH_PATH/mgr/ceph-${name}
|
||||
MGR_PATH=$CEPH_PATH/mgr/ceph-$name
|
||||
|
||||
if [ -e "$MGR_PATH" ]; then
|
||||
echo "$MGR_PATH exists - aborting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$MGR_PATH"
|
||||
mkdir "$MGR_PATH"
|
||||
chown ceph:ceph "$MGR_PATH"
|
||||
touch "$MGR_PATH/sysvinit"
|
||||
|
||||
ceph auth get-or-create mgr.${name} \
|
||||
ceph auth get-or-create mgr.$name \
|
||||
mon 'allow profile mgr' \
|
||||
osd 'allow *' \
|
||||
mds 'allow *' > "$MGR_PATH/keyring"
|
||||
|
||||
# Starting with monit - same on every os
|
||||
/opt/ungleich-tools/monit-ceph-create-start mgr.${name}
|
||||
# Starting with monit, if available
|
||||
if [ -e /etc/monit ]; then
|
||||
/opt/ungleich-tools/monit-ceph-create-start mgr.${name}
|
||||
else
|
||||
/etc/init.d/ceph start mgr.${name}
|
||||
fi
|
21
ceph-mon-create-start
Executable file
21
ceph-mon-create-start
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "$0 initial-key-file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fname=$1
|
||||
|
||||
ceph-mon --mkfs -i $(hostname) --keyring "$fname" --setuser ceph --setgroup ceph
|
||||
touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
|
||||
|
||||
# Fix broken permissions
|
||||
chown ceph:ceph /var/run/ceph/
|
||||
|
||||
# Starting with monit, if available
|
||||
if [ -e /etc/monit ]; then
|
||||
/opt/ungleich-tools/monit-ceph-create-start mon.$(hostname)
|
||||
else
|
||||
/etc/init.d/ceph start mon.$(hostname)
|
||||
fi
|
|
@ -1,12 +1,11 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "$0 <monitor>"
|
||||
echo "f.i. $0 serverX"
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "$0 (no arguments"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mon=mon.$1
|
||||
mon=mon.$(hostname)
|
||||
|
||||
# Starting with monit, if available
|
||||
if [ -e /etc/monit ]; then
|
30
ceph-osd-activate-all
Executable file
30
ceph-osd-activate-all
Executable file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2018-02-20
|
||||
# Copyright ungleich glarus ag
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
|
||||
for dev in $(fdisk -l | awk '$6 ~/Ceph/ { print $1 }'); do
|
||||
mount "$dev" "$tmpdir"
|
||||
id=$(cat "${tmpdir}/whoami")
|
||||
|
||||
# Chown the dev device to be accessible for ceph
|
||||
chown ceph:ceph "${tmpdir}/block"
|
||||
|
||||
umount "$dev"
|
||||
|
||||
dir="/var/lib/ceph/osd/ceph-$id"
|
||||
mkdir -p "$dir"
|
||||
mount "$dev" "$dir"
|
||||
|
||||
if [ -e /etc/monit ]; then
|
||||
/opt/ungleich-tools/monit-ceph-create-start "osd.$id"
|
||||
else
|
||||
/etc/init.d/ceph start "osd.$id"
|
||||
fi
|
||||
done
|
||||
|
||||
rmdir "$tmpdir"
|
|
@ -38,7 +38,7 @@ osd_id=$(ceph osd create)
|
|||
dev_metadata="/dev/disk/by-partuuid/$uuid_metadata"
|
||||
dev_block="/dev/disk/by-partuuid/$uuid_block"
|
||||
|
||||
/usr/bin/sgdisk --new=0:0:+100M --change-name="0:ceph data" \
|
||||
/sbin/sgdisk --new=0:0:+100M --change-name="0:ceph data" \
|
||||
--partition-guid="0:$uuid_metadata" \
|
||||
--typecode=0:4fbd7e29-9d25-41b8-afd0-062c0ceff05d \
|
||||
--mbrtogpt -- $DEV
|
||||
|
@ -46,23 +46,17 @@ dev_block="/dev/disk/by-partuuid/$uuid_block"
|
|||
|
||||
# Using gdisk --largest-new does not change the name or set guid;
|
||||
# So use 2 steps instead
|
||||
/usr/bin/sgdisk --largest-new=0 --mbrtogpt -- $DEV
|
||||
/sbin/sgdisk --largest-new=0 --mbrtogpt -- $DEV
|
||||
/sbin/udevadm settle --timeout=600
|
||||
|
||||
|
||||
lastpart=$(gdisk -l $DEV | tail -n1 | awk '{ print $1 }')
|
||||
/usr/bin/sgdisk --change-name="${lastpart}:ceph block" \
|
||||
/sbin/sgdisk --change-name="${lastpart}:ceph block" \
|
||||
--partition-guid="${lastpart}:$uuid_block" \
|
||||
--typecode="${lastpart}:cafecafe-9b03-4f30-b4c6-b4b80ceff106" \
|
||||
--mbrtogpt -- $DEV
|
||||
/sbin/udevadm settle --timeout=600
|
||||
|
||||
#echo $1
|
||||
#echo $(blkid | grep $1"2")
|
||||
|
||||
#cblock=$(blkid | grep $1"2" | cut -d'"' -f4)
|
||||
#echo $cblock
|
||||
|
||||
/sbin/mkfs -t xfs -f -i size=2048 -- "$dev_metadata"
|
||||
|
||||
mountpath=/var/lib/ceph/osd/ceph-${osd_id}
|
||||
|
@ -76,8 +70,6 @@ echo "$uuid_block" > "$mountpath/block_uuid"
|
|||
echo "$fsid" > "$mountpath/ceph_fsid"
|
||||
echo "$magic" > "$mountpath/magic"
|
||||
echo "$CLASS" > "$mountpath/crush_device_class"
|
||||
echo $(echo $dev_block | cut -c23-) > "$mountpath/fsid"
|
||||
|
||||
|
||||
# Important, otherwise --mkfs later will try to create filestore
|
||||
echo bluestore > "$mountpath/type"
|
||||
|
@ -86,7 +78,7 @@ ceph auth get-or-create "osd.${osd_id}" osd \
|
|||
'allow *' mon 'allow profile osd' > $mountpath/keyring
|
||||
|
||||
echo ${osd_id} > "$mountpath/whoami"
|
||||
touch "$mountpath/openrc"
|
||||
touch "$mountpath/sysvinit"
|
||||
|
||||
ceph-osd --cluster ceph -i "${osd_id}" --mkfs
|
||||
chown -R ceph:ceph "$mountpath"
|
||||
|
@ -104,4 +96,8 @@ ceph osd crush add osd.${osd_id} ${WEIGHT} host=$(hostname)
|
|||
echo "$metadata_dev /var/lib/ceph/osd/ceph-${osd_id} xfs noatime 0 0" >> /etc/fstab
|
||||
|
||||
# Starting with monit, if available
|
||||
ceph-osd -i ${osd_id}
|
||||
if [ -e /etc/monit ]; then
|
||||
/opt/ungleich-tools/monit-ceph-create-start osd.${osd_id}
|
||||
else
|
||||
/etc/init.d/ceph start osd.${osd_id}
|
||||
fi
|
|
@ -25,7 +25,6 @@ fi
|
|||
|
||||
ceph osd crush remove $osd_name
|
||||
ceph osd rm $osd_name
|
||||
ceph auth del $osd_name
|
||||
|
||||
echo "Mount path before umounting: "
|
||||
mount | grep "$mountpath"
|
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Inspired from https://rook.io/docs/rook/v1.7/ceph-teardown.html
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo $0 disk
|
||||
echo f.i. $0 /dev/sdx
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DISK="$1"; shift
|
||||
|
||||
# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean)
|
||||
|
||||
# You will have to run this step for all disks.
|
||||
sgdisk --zap-all $DISK
|
||||
|
||||
# Clean hdds with dd
|
||||
dd if=/dev/zero of="$DISK" bs=1M count=100
|
||||
|
||||
# Clean disks such as ssd with blkdiscard instead of dd
|
||||
blkdiscard $DISK
|
||||
|
||||
# These steps only have to be run once on each node
|
||||
# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks.
|
||||
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
|
||||
|
||||
# ceph-volume setup can leave ceph-<UUID> directories in /dev and /dev/mapper (unnecessary clutter)
|
||||
rm -rf /dev/ceph-*
|
||||
rm -rf /dev/mapper/ceph--*
|
||||
|
||||
# Inform the OS of partition table changes
|
||||
partprobe $DISK
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
monkey=$(mktemp)
|
||||
monmap=$(mktemp)
|
||||
|
||||
ceph auth get mon. -o $monkey
|
||||
ceph mon getmap -o $monmap
|
||||
|
||||
mkdir /var/lib/ceph/mon/ceph-$(hostname)
|
||||
ceph-mon -i $(hostname) --mkfs --monmap $monmap --keyring $monkey
|
||||
chown -R ceph:ceph /var/lib/ceph/mon/ceph-$(hostname)
|
||||
|
||||
# Fix broken permissions on Debian
|
||||
chown ceph:ceph /var/run/ceph/
|
||||
|
||||
# Starting with monit
|
||||
/opt/ungleich-tools/monit-ceph-create-start mon.$(hostname)
|
||||
|
||||
rm -f ${monkey} ${monmap}
|
|
@ -1,51 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2018-02-20
|
||||
# Copyright ungleich glarus ag
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
tmpdir=$(mktemp -d)
|
||||
|
||||
# XFS based partition scheme
|
||||
for dev in $(fdisk -l | awk '$6 ~/Ceph/ { print $1 }'); do
|
||||
if mount | grep ^$dev ; then
|
||||
echo Skipping $dev, already mounted
|
||||
continue
|
||||
fi
|
||||
mount "$dev" "$tmpdir"
|
||||
id=$(cat "${tmpdir}/whoami")
|
||||
|
||||
# Chown the dev device to be accessible for ceph
|
||||
chown ceph:ceph "${tmpdir}/block"
|
||||
|
||||
umount "$dev"
|
||||
|
||||
dir="/var/lib/ceph/osd/ceph-$id"
|
||||
mkdir -p "$dir"
|
||||
mount "$dev" "$dir"
|
||||
|
||||
if [ -e /etc/monit ]; then
|
||||
/opt/ungleich-tools/monit-ceph-create-start "osd.$id"
|
||||
else
|
||||
/etc/init.d/ceph start "osd.$id"
|
||||
fi
|
||||
done
|
||||
|
||||
# LVM based / manual does not work atm:
|
||||
# + grep ceph.block_device
|
||||
# + sed -e s/.*ceph.osd_id=// -e s/,.*//
|
||||
# + ceph-volume lvm activate --no-systemd 112
|
||||
# --> UnboundLocalError: local variable 'tags' referenced before assignment
|
||||
|
||||
# Activate all volumes in the OS
|
||||
vgchange -ay
|
||||
# Using this with a fake /bin/systemctl
|
||||
ceph-volume lvm activate --all
|
||||
|
||||
for osdid in $(lvs -o lv_tags | grep ceph.block_device| sed -e 's/.*ceph.osd_id=//' -e 's/,.*//'); do
|
||||
#ceph-volume lvm activate --no-systemd $osdid
|
||||
/opt/ungleich-tools/monit-ceph-create-start "osd.$osdid"
|
||||
done
|
||||
|
||||
rmdir "$tmpdir"
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 17:19, 2018-02-09
|
||||
# Nico Schottelius
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "$0 disk class [nostart]"
|
||||
echo "class = hdd or ssd"
|
||||
echo "If specifying anything after the class, monit will not be created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export DEV=$1;shift
|
||||
export CLASS=$1; shift
|
||||
|
||||
set -e
|
||||
|
||||
# Ensure ceph-volume has all pre-requisites
|
||||
if [ ! -f /var/lib/ceph/bootstrap-osd/ceph.keyring ]; then
|
||||
mkdir -p /var/lib/ceph/bootstrap-osd
|
||||
ceph auth get client.bootstrap-osd > /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
fi
|
||||
if [ ! -f /etc/ceph/ceph.client.bootstrap-osd.keyring ]; then
|
||||
ceph auth get client.bootstrap-osd > /etc/ceph/ceph.client.bootstrap-osd.keyring
|
||||
fi
|
||||
|
||||
# We are redirecting to a tempfile so that the output is visible for debugging,
|
||||
# but we can still easily filter for the osd id
|
||||
tmp=$(mktemp)
|
||||
|
||||
ceph-volume lvm prepare --data $DEV --crush-device-class $CLASS 2>&1 | tee ${tmp}
|
||||
osd_id=$(grep /var/lib/ceph/osd/ceph- ${tmp} | sed -e 's/.*ceph-//' -e 's,/.*,,' | head -n1)
|
||||
rm -f ${tmp}
|
||||
|
||||
if [ $# -eq 1 ]; then
|
||||
echo "Not executing: /opt/ungleich-tools/monit-ceph-create-start osd.${osd_id}"
|
||||
else
|
||||
# Start it
|
||||
/opt/ungleich-tools/monit-ceph-create-start osd.${osd_id}
|
||||
fi
|
|
@ -1,19 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2022-12-07, 10:46
|
||||
# Nico Schottelius
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "$0 osd.id"
|
||||
echo "i.e. $0 17"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
osd_id=$1; shift
|
||||
osd_name=osd.${osd_id}
|
||||
|
||||
ceph osd crush remove $osd_name
|
||||
ceph osd rm $osd_name
|
||||
ceph auth del $osd_name
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2022-02-01
|
||||
# Update 2024-08-05: include mgr and osds
|
||||
|
||||
/etc/init.d/lvm2 start
|
||||
|
||||
|
||||
/opt/ungleich-tools/ceph/ceph-osd-activate-all
|
||||
|
||||
ceph-volume lvm activate --all
|
||||
|
||||
|
||||
for osd in \
|
||||
/etc/monit/conf.d/mon.* \
|
||||
/etc/monit/conf.d/mgr.* \
|
||||
/etc/monit/conf.d/osd.* ; do
|
||||
|
||||
cmd=$(cat $osd | grep "start pro" | sed -e 's/.* = "//' -e 's/".*//')
|
||||
echo $cmd
|
||||
( $cmd & )
|
||||
done
|
|
@ -1,16 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
rm -f /etc/apt/sources.list.d/ceph.list
|
||||
|
||||
cat > /etc/apt/sources.list <<EOF
|
||||
|
||||
deb http://pkgmaster.devuan.org/merged beowulf main contrib non-free
|
||||
deb http://pkgmaster.devuan.org/merged beowulf-updates main contrib non-free
|
||||
deb http://pkgmaster.devuan.org/merged beowulf-security main contrib non-free
|
||||
|
||||
EOF
|
||||
|
||||
echo deb http://ftp.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list
|
||||
apt update
|
||||
apt dist-upgrade -y
|
||||
apt install -t buster-backports -y ceph
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Locate which block device corresponds to the OSD
|
||||
# Nico Schottelius, 2023-06-10
|
||||
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo $0 osdnum
|
||||
echo f.i. $0 99
|
||||
exit 1
|
||||
fi
|
||||
|
||||
osd_id=$1; shift
|
||||
|
||||
osd_path=/var/lib/ceph/osd/ceph-${osd_id}
|
||||
|
||||
if ! mount | grep -q " on ${osd_path} "; then
|
||||
echo "Nothing mounted on ${osd_path}, are you on the right host?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
blockdev=$(readlink -f ${osd_path}/block)
|
||||
|
||||
# Is directly referring to sdX? print and exit
|
||||
if echo $blockdev | grep -q ^/dev/sd; then
|
||||
echo $blockdev
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# try the non-recursive variant, resulting in finding pv/vg
|
||||
blockdev=$(readlink ${osd_path}/block)
|
||||
lvm_vg=$(echo $blockdev | awk -F/ '{ print $3 }')
|
||||
|
||||
pv_name=$(pvdisplay | grep -B1 $lvm_vg | awk '/PV Name/ { print $3 }')
|
||||
|
||||
if [ "$pv_name" ]; then
|
||||
echo $pv_name
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Cannot determine block device for osd.${osdid}" >&2
|
||||
exit 1
|
|
@ -1,19 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo $0: server [server...]
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
server=$1; shift
|
||||
|
||||
ssh root@$server "
|
||||
cd /sys/block/
|
||||
for dev in sd*; do
|
||||
size=\$(fdisk -l | grep ^Disk | grep \$dev | awk '/bytes/ { print \$3 \" \" \$4 }')
|
||||
printf \"${server} \${dev} \${size} rotational: \"
|
||||
cat \$dev/queue/rotational
|
||||
done
|
||||
"
|
||||
done
|
|
@ -1,22 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "$0 host [args for alpine-install-on-disk.sh]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
host=$1; shift
|
||||
|
||||
disk=$1; shift
|
||||
key=$1; shift
|
||||
mode=$1; shift
|
||||
|
||||
cat <<EOF | ssh -t root@$host
|
||||
echo nameserver 2a0a:e5c0:10:a::a > /etc/resolv.conf
|
||||
apk update
|
||||
apk add git sfdisk sudo wget
|
||||
cd /opt
|
||||
if [ ! -d /opt/ungleich-tools ]; then git clone https://code.ungleich.ch/ungleich-public/ungleich-tools.git; fi
|
||||
/opt/ungleich-tools/alpine-install-on-disk.sh $disk $key $mode
|
||||
|
||||
EOF
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
#option $1 is vm_list file name
|
||||
#option $2 id DB location
|
||||
#option $3 is DB user
|
||||
#option $4 is DB name
|
||||
|
||||
#host='localhost'
|
||||
|
||||
user_arr=( $(cat $1 | awk '{print $1}' ))
|
||||
vmid_arr=( $(cat $1 | awk '{print $2}' ))
|
||||
port_arr=( $(cat $1 | awk '{print $3}' ))
|
||||
place_arr=( $(cat $1 | awk '{print $4}' ))
|
||||
|
||||
for ((i=0; i<${#user_arr[@]}; i++)) do
|
||||
#create user
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');"
|
||||
en_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';")
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());"
|
||||
|
||||
#create connection
|
||||
cn=${user_arr[i]}${vmid_arr[i]}
|
||||
echo $cn
|
||||
if [ 0 -eq $(psql -h $2 -U $3 -d $4 -tAc "SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';" | wc -l) ]; then
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');"
|
||||
cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;")
|
||||
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','${place_arr[i]}');"
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');"
|
||||
|
||||
#connection permission
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');"
|
||||
#clipboard-encoding
|
||||
psql -h $2 -U $3 -d $4 -tAc "INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','clipboard-encoding','UTF-8');"
|
||||
|
||||
else
|
||||
cn_id=$(psql -h $2 -U $3 -d $4 -tAc "SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;")
|
||||
psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${place_arr[i]}' where connection_id='$cn_id' and parameter_name='hostname';"
|
||||
psql -h $2 -U $3 -d $4 -tAc "UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';"
|
||||
fi
|
||||
|
||||
done
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/bash
|
||||
#option $1 is vm_list file name
|
||||
#option $2 is DB name
|
||||
#this script should be run on guacamole server
|
||||
|
||||
|
||||
host='localhost'
|
||||
user_arr=( $(cat $1 | awk '{print $1}' ))
|
||||
vmid_arr=( $(cat $1 | awk '{print $2}' ))
|
||||
port_arr=( $(cat $1 | awk '{print $3}' ))
|
||||
place_arr=( $(cat $1 | awk '{print $4}' ))
|
||||
|
||||
for ((i=0; i<${#user_arr[@]}; i++)) do
|
||||
#create user
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_entity (name, type) VALUES ('${user_arr[i]}','USER');\""
|
||||
en_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT entity_id FROM guacamole_entity WHERE name = '${user_arr[i]}';\"")
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_user(entity_id, password_hash, password_date) VALUES ('$en_id', '\x74657374', now());\""
|
||||
|
||||
#create connection
|
||||
cn=${user_arr[i]}${vmid_arr[i]}
|
||||
|
||||
if [ 0 -eq $(su - postgres -c "psql postgres -d $2 -tAc \"SELECT connection_id FROM guacamole_connection WHERE connection_name = '$cn';\"" | wc -l) ]; then
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection (connection_name, protocol) VALUES ('$cn', 'vnc');\""
|
||||
cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"")
|
||||
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','hostname','$host');\""
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_parameter VALUES ('$cn_id','port','${port_arr[i]}');\""
|
||||
|
||||
#connection permission
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"INSERT INTO guacamole_connection_permission(entity_id, connection_id, permission) VALUES ('$en_id', '$cn_id', 'READ');\""
|
||||
|
||||
else
|
||||
cn_id=$(su - postgres -c "psql postgres -d $2 -tAc \"SELECT MAX(connection_id) FROM guacamole_connection WHERE connection_name = '$cn' AND parent_id IS NULL;\"")
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='$host' where connection_id='$cn_id' and parameter_name='hostname';\""
|
||||
su - postgres -c "psql postgres -d $2 -tAc \"UPDATE guacamole_connection_parameter SET parameter_value='${port_arr[i]}' where connection_id='$cn_id' and parameter_name='port';\""
|
||||
fi
|
||||
|
||||
done
|
|
@ -1,121 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-12-09
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo $0 suite keyfile disk
|
||||
echo suite: beowulf or similar
|
||||
echo keyfile: file containing the ssh keys
|
||||
echo disk: the block device
|
||||
exit 1
|
||||
fi
|
||||
|
||||
suite=$1; shift
|
||||
keyfile=$1; shift
|
||||
disk=$1; shift
|
||||
|
||||
case $disk in
|
||||
/dev/sd*)
|
||||
partition=${disk}1
|
||||
;;
|
||||
/dev/mmcblk*|/dev/nvme*|/dev/loop*)
|
||||
partition=${disk}p1
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported disk - edit this script" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
chroot_dir=$(mktemp -d)
|
||||
date=$(date +%F)
|
||||
|
||||
################################################################################
|
||||
# Disk preparation
|
||||
|
||||
# Clean the first 2M - getting rid of old things
|
||||
# in the gap and also the paritition table
|
||||
dd if=/dev/zero of=${disk} bs=1M count=2
|
||||
|
||||
# Partition disk with 1 Linux partition
|
||||
sudo sfdisk "$disk" <<EOF
|
||||
label: dos
|
||||
,,L
|
||||
EOF
|
||||
|
||||
partprobe "${disk}"
|
||||
sleep 3
|
||||
|
||||
# For creation, if an existing filesystem is on the partitions
|
||||
mkfs.ext4 -F ${partition}
|
||||
|
||||
mount ${partition} ${chroot_dir}
|
||||
|
||||
# Devuan: debootstrap beowulf /tmp/tmp.teGuJxytz0 http://packages.devuan.org/devuan
|
||||
debootstrap "${suite}" "${chroot_dir}"
|
||||
|
||||
# need non-free for firmware-bnx2
|
||||
echo "deb http://pkgmaster.devuan.org/merged ${suite} main contrib non-free" > ${chroot_dir}/etc/apt/sources.list
|
||||
|
||||
chroot ${chroot_dir} apt update
|
||||
chroot ${chroot_dir} apt install -y openssh-server rdnssd linux-image-amd64 firmware-bnx2 ifenslave vlan grub-pc
|
||||
|
||||
echo "unconfigured-host" > ${chroot_dir}/etc/hostname
|
||||
|
||||
echo '* * * * * root ip -o -6 addr show | grep -E -v " lo |one" > /etc/issue' > ${chroot_dir}/etc/cron.d/ipv6addr
|
||||
|
||||
mkdir -p ${chroot_dir}/root/.ssh
|
||||
|
||||
cat ${keyfile} > ${chroot_dir}/root/.ssh/authorized_keys
|
||||
|
||||
# Fix possible permission issue from above
|
||||
chmod -R og-rwx ${chroot_dir}/root/
|
||||
|
||||
################################################################################
|
||||
# networking
|
||||
|
||||
# echo bonding
|
||||
|
||||
cat > ${chroot_dir}/etc/network/interfaces << EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# I would like to have a generic block like this below
|
||||
# But as long as interface
|
||||
auto bond0
|
||||
iface bond0 inet manual
|
||||
bond-miimon 500
|
||||
bond-mode 4
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
bond-slaves none
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet manual
|
||||
bond-master bond0
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
|
||||
auto eth1
|
||||
iface eth1 inet manual
|
||||
bond-master bond0
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
|
||||
EOF
|
||||
|
||||
for dir in dev sys proc; do
|
||||
mount --bind /${dir} ${chroot_dir}/${dir}
|
||||
done
|
||||
|
||||
chroot ${chroot_dir} grub-install ${disk}
|
||||
# Ensure boot loader has a configuration
|
||||
chroot ${chroot_dir} grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
for dir in dev sys proc; do
|
||||
umount ${chroot_dir}/${dir}
|
||||
done
|
||||
|
||||
umount ${chroot_dir}
|
||||
sync
|
||||
|
||||
rmdir ${chroot_dir}
|
|
@ -1,120 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2019-12-09
|
||||
# the ugly code is llnu
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo $0 suite out-directory
|
||||
echo out-directory: into which directory to place resulting files
|
||||
echo suite is for instance ascii, beowulf, etc
|
||||
exit 1
|
||||
fi
|
||||
|
||||
suite=$1; shift
|
||||
outdir=$1; shift
|
||||
|
||||
date=$(date +%F)
|
||||
mkdir -p ${outdir}
|
||||
|
||||
basename=${suite}-${date}
|
||||
abs_outdir=$(cd ${outdir} && pwd -P)
|
||||
|
||||
chroot_dir=${abs_outdir}/${basename}
|
||||
kernel=${abs_outdir}/kernel-${basename}
|
||||
initramfs=${abs_outdir}/initramfs-${basename}
|
||||
|
||||
#keyurl=https://code.ungleich.ch/ungleich-public/__ungleich_staff_ssh_access/raw/master/files
|
||||
keyurl=https://key.wf
|
||||
|
||||
debootstrap "${suite}" "${chroot_dir}"
|
||||
|
||||
# need non-free for firmware-bnx2
|
||||
echo "deb http://pkgmaster.devuan.org/merged ${suite} main contrib non-free" > ${chroot_dir}/etc/apt/sources.list
|
||||
|
||||
chroot ${chroot_dir} apt update
|
||||
chroot ${chroot_dir} apt install -y openssh-server rdnssd linux-image-amd64 firmware-bnx2 ifenslave vlan
|
||||
|
||||
echo "unconfigured-host" > ${chroot_dir}/etc/hostname
|
||||
|
||||
cp ${chroot_dir}/boot/vmlinuz-* ${kernel}
|
||||
|
||||
echo '* * * * * root ip -o -6 addr show | grep -E -v " lo |one" > /etc/issue' > ${chroot_dir}/etc/cron.d/ipv6addr
|
||||
|
||||
mkdir -p ${chroot_dir}/root/.ssh
|
||||
|
||||
for key in sami dominique jinguk nico; do
|
||||
curl -s ${keyurl}/${key} >> ${chroot_dir}/root/.ssh/authorized_keys
|
||||
done
|
||||
|
||||
# Fix possible permission issue from above
|
||||
chown -R root:root ${chroot_dir}/root/
|
||||
|
||||
################################################################################
|
||||
# networking
|
||||
|
||||
# echo bonding
|
||||
|
||||
cat > ${chroot_dir}/etc/network/interfaces << EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# I would like to have a generic block like this below
|
||||
# But as long as interface
|
||||
auto bond0
|
||||
iface bond0 inet manual
|
||||
bond-miimon 500
|
||||
bond-mode 4
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
bond-slaves none
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet manual
|
||||
bond-master bond0
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
|
||||
auto eth1
|
||||
iface eth1 inet manual
|
||||
bond-master bond0
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
|
||||
# server network
|
||||
auto bond0.11
|
||||
iface bond0.11 inet6 auto
|
||||
post-up /sbin/ip link set \$IFACE mtu 9000
|
||||
vlan-raw-device bond0
|
||||
|
||||
EOF
|
||||
|
||||
# # find the boot interfaces at boot: HP servers still have ifnames=1
|
||||
# cat > ${chroot_dir}/etc/rc.local <<EOF
|
||||
# mac=\$(cat /proc/cmdline | tr ' ' '\n' | awk -F= '/bootdev/ { print \$2 }')
|
||||
# dev=\$(ip -o link | awk -F: "/\$mac/ { print \\\$2 }" | sed 's/ *//g')
|
||||
|
||||
# cat >> /etc/network/interfaces << eof
|
||||
# auto \$dev
|
||||
# iface \$dev inet6 auto
|
||||
# eof
|
||||
|
||||
# ifup "\${dev}"
|
||||
|
||||
# exit 0
|
||||
# EOF
|
||||
|
||||
# chmod a+rx "${chroot_dir}/etc/rc.local"
|
||||
|
||||
# ensure there is /init in the initramfs -> otherwise there is a kernel panic
|
||||
# reason: initramfs is designed to be PRE regular os, so /init usually hands over to /sbin/init
|
||||
# in our case, they are just the same
|
||||
ln -fs /sbin/init ${chroot_dir}/init
|
||||
|
||||
# Finally building the initramfs
|
||||
( cd ${chroot_dir} ; find . | cpio -H newc -o | gzip -9 > ${initramfs} )
|
||||
|
||||
# Fix paranoid permissions
|
||||
chmod a+rx ${abs_outdir}
|
||||
chmod a+r ${kernel} ${initramfs}
|
||||
|
||||
|
||||
exit 0
|
|
@ -1,9 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# reverting for a running system that still needs access to old style
|
||||
# rules
|
||||
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
|
||||
update-alternatives --set arptables /usr/sbin/arptables-legacy
|
||||
update-alternatives --set ebtables /usr/sbin/ebtables-legacy
|
|
@ -1,25 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Nico Schottelius, 2020-01-07
|
||||
# Detect the DNS64 prefix
|
||||
# Based on https://tools.ietf.org/html/draft-ietf-behave-nat64-discovery-heuristic-05
|
||||
#
|
||||
# How it works:
|
||||
# - ipv4only.arpa only has A records.
|
||||
# - a DNS64 server will add AAAA records
|
||||
# - we take this response (if any) and derive the IPv6 prefix from it
|
||||
#
|
||||
|
||||
import dns.resolver
|
||||
import ipaddress
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dns64_prefix = None
|
||||
answers = dns.resolver.query('ipv4only.arpa', 'AAAA')
|
||||
|
||||
for rdata in answers:
|
||||
address = str(rdata)
|
||||
network = ipaddress.IPv6Network("{}/96".format(address),
|
||||
strict=False)
|
||||
# print("{}: {}".format(rdata, network))
|
||||
print("{}".format(network))
|
8
devuan-netboot.sh
Normal file
8
devuan-netboot.sh
Normal file
|
@ -0,0 +1,8 @@
|
|||
#!/bin/sh
|
||||
|
||||
date=$(date +%F)
|
||||
suite=ascii
|
||||
|
||||
dir=${suit}-${date}
|
||||
|
||||
debootstrap ${suite}
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2024-08-14
|
||||
#
|
||||
# Retrieve an IPv6 interface address and generate a generic
|
||||
# DNS zone file
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "$0 address interface hostname"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
address=$1; shift
|
||||
interface=$1; shift
|
||||
hostname=$1; shift
|
||||
|
||||
interface_addr=$(ssh "root@${address}" "ip -o a sh dev ${interface}" | awk '/inet6/ { print $4 }' | grep -v ^fe80| sed 's,/.*,,')
|
||||
|
||||
echo "${hostname} AAAA ${interface_addr}"
|
|
@ -1,28 +0,0 @@
|
|||
import json
|
||||
import pprint
|
||||
#import etcd3
|
||||
|
||||
with open("nico-vm-one.json", "r") as fd:
|
||||
vmcontent = fd.read()
|
||||
|
||||
#vm = json.loads(vmcontent.decode('utf-8'))
|
||||
vm = json.loads(vmcontent)
|
||||
pprint.pprint(vm['TEMPLATE']['DISK'])
|
||||
|
||||
# storing info
|
||||
|
||||
for_etcd={}
|
||||
for_etcd['data_version'] = "1"
|
||||
for_etcd['vm_id'] = vm['ID']
|
||||
for_etcd['owner'] = vm['UNAME']
|
||||
|
||||
for_etcd['disks'] = []
|
||||
for disk in vm['TEMPLATE']['DISK']:
|
||||
disk_etcd = {}
|
||||
disk_etcd['image_name'] = disk['IMAGE']
|
||||
disk_etcd['image_id'] = disk['IMAGE_ID']
|
||||
disk_etcd['datastore_name'] = disk['DATASTORE']
|
||||
disk_etcd['datastore_id'] = disk['DATASTORE_ID']
|
||||
for_etcd['disks'].append(disk_etcd)
|
||||
|
||||
pprint.pprint(for_etcd)
|
|
@ -9,7 +9,7 @@
|
|||
# definitely opinionated.
|
||||
|
||||
# Depends on the following packages (as of Fedora 31):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
# qemu-img util-linux coreutils dnf curl
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
@ -18,14 +18,14 @@ set -e
|
|||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=40
|
||||
RELEASE=31
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=fedora-$RELEASE-$(date +%+F).img.qcow2
|
||||
IMAGE_PATH=fedora-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
NBD_DEVICE=/dev/nbd1
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/one-apps/releases/download/v6.8.1/one-context-6.8.1-1.el9.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el8.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
|
@ -68,7 +68,7 @@ trap cleanup EXIT
|
|||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
1M,100M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
|
@ -83,6 +83,8 @@ mount "${NBD_DEVICE}p2" /mnt
|
|||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
# XXX: dnf has a lot a weird (libX11?) dependencies, use microdnf instead?
|
||||
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
|
@ -90,7 +92,7 @@ dnf -y \
|
|||
--enablerepo=fedora \
|
||||
--enablerepo=updates install \
|
||||
--setopt=install_weak_deps=False \
|
||||
basesystem systemd systemd-udev passwd dnf fedora-release glibc-langpack-en
|
||||
basesystem systemd systemd-udev passwd dnf fedora-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
|
@ -100,51 +102,32 @@ mount --bind /run /mnt/run
|
|||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/one-apps for details.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
# Required to resolve package mirror in chroot.
|
||||
# TODO: use non-$BIGCORP DNS service.
|
||||
echo 'nameserver 1.1.1.1' >> /mnt/etc/resolv.conf
|
||||
|
||||
EOF
|
||||
|
||||
# Install and enable NetworkManager.
|
||||
run_root dnf -y install NetworkManager
|
||||
run_root systemctl enable NetworkManager
|
||||
# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details.
|
||||
# network-scripts.x86_64 : Legacy scripts for manipulating of network devices
|
||||
run_root dnf -y install network-scripts
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root systemctl enable systemd-networkd.service
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root dnf -y install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
echo "GRUB_DISABLE_OS_PROBER=true" >> /mnt/etc/default/grub
|
||||
run_root grub2-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
|
@ -160,15 +143,5 @@ UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
|||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
echo "fedora" > /mnt/etc/hostname
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,16 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import random
|
||||
import ipaddress
|
||||
import datetime
|
||||
|
||||
net = ipaddress.IPv6Network("2a0a:e5c0:11:2::/64")
|
||||
offset = random.randint(0, 2**64)
|
||||
coupon = net[offset]
|
||||
|
||||
today = datetime.datetime.now()
|
||||
today_in_2y = today + datetime.timedelta(days=365*2)
|
||||
|
||||
print(f"Coupon ID : {coupon}")
|
||||
print(f"Today : {today}")
|
||||
print(f"Today in 2 years: {today_in_2y}")
|
|
@ -1,56 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2021-07-09
|
||||
# Objective: install any Linux automatically to a disk
|
||||
# Made by ungleich
|
||||
# Made for bare metal
|
||||
# Requirements:
|
||||
# The OS image needs to be in tar format and needs to contain grub
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "$0 os-image.tar [usb|disk] [target-usb-disk]"
|
||||
echo "os-image.tar contains the OS"
|
||||
echo "usb mode: create a bootable usb stick including this script to auto install to disk"
|
||||
echo "disk mode: actually install os-image.tar to the first disk"
|
||||
echo ""
|
||||
echo "In usb mode, specify the usb disk to install to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IMAGE=$1; shift
|
||||
mode=$1; shift
|
||||
|
||||
# isohybrid
|
||||
|
||||
setup_usb() {
|
||||
rootfs_tmpdir=$(mktemp -d)
|
||||
|
||||
sudo sfdisk "$DISK" <<EOF
|
||||
label: dos
|
||||
,,L
|
||||
EOF
|
||||
|
||||
sudo mkfs.ext4 -F ${DISK}1
|
||||
sudo mount ${DISK}1 $rootfs_tmpdir
|
||||
sudo tar xf $IMAGE -C $rootfs_tmpdir
|
||||
|
||||
for dir in dev proc sys; do
|
||||
sudo mount --bind /${dir} ${rootfs_tmpdir}/${dir}
|
||||
done
|
||||
|
||||
...
|
||||
|
||||
}
|
||||
|
||||
setup_disk()
|
||||
{
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
case "$mode" in
|
||||
usb)
|
||||
if [ $# -ne 1 ]; then echo "Specify usb disk to install to"; exit 1; fi
|
||||
DISK=$1; shift
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2023-06-09
|
||||
# Enter the ceph toolbox
|
||||
|
||||
if [ -z "$@" ]; then
|
||||
set -- -c bird -- birdc
|
||||
fi
|
||||
set -x
|
||||
|
||||
kubectl exec -ti $(kubectl get pods -l app.kubernetes.io/component=bird -o jsonpath='{.items[*].metadata.name}') "$@"
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius
|
||||
# 2021-07-25
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 cdist-workdir list-of-ipv6-addresses-of-vms"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
workdir=$1; shift
|
||||
|
||||
names=""
|
||||
|
||||
for vm in "$@"; do
|
||||
# get name and remove trailing dot
|
||||
name=$(dig +short -x $vm | sed 's/\.$//')
|
||||
names="$names $name"
|
||||
done
|
||||
|
||||
cd "${workdir}"
|
||||
cdist config -vv -j6 -p30 ${names}
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Connect to the pod running ceph-tools
|
||||
|
||||
POD=$(kubectl -n rook-ceph get pods \
|
||||
-l app=rook-ceph-tools --output=jsonpath={.items..metadata.name})
|
||||
|
||||
kubectl -n rook-ceph exec -ti $POD -- bash
|
|
@ -1,64 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# Connect to the router pod
|
||||
# On Alpine: nb3:~# apk add py3-kubernetes
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
from kubernetes import client, config
|
||||
|
||||
# ~/k8s contains the config files
|
||||
K8SCONFIGDIR=os.path.join(os.environ['HOME'], "k8s")
|
||||
|
||||
routermap = {
|
||||
"p5-r1": "server137",
|
||||
"p5-r2": "server138",
|
||||
"p6-r1": "server139",
|
||||
"p6-r2": "server140",
|
||||
"p10-r1": "server122",
|
||||
"p10-r2": "server123",
|
||||
"p15-r1": "server120",
|
||||
"p15-r2": "server121",
|
||||
}
|
||||
|
||||
|
||||
if not len(sys.argv) == 2:
|
||||
print(f"{sys.argv[0]} <pX-r1|r2>")
|
||||
sys.exit(1)
|
||||
|
||||
router=sys.argv[1]
|
||||
|
||||
|
||||
if not router in routermap:
|
||||
print(f"Router {router} not known")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
k8sconfig = os.path.join(K8SCONFIGDIR, f"{routermap[router]}.conf")
|
||||
|
||||
print(f"Using KUBECONFIG={k8sconfig} for accessing {router} ...")
|
||||
|
||||
if not os.path.exists(k8sconfig):
|
||||
print(f"You need to have {k8sconfig} for accessing {router}")
|
||||
sys.exit(1)
|
||||
|
||||
config.load_kube_config(config_file=k8sconfig)
|
||||
v1 = client.CoreV1Api()
|
||||
|
||||
pods = v1.list_pod_for_all_namespaces(watch=False,
|
||||
label_selector="app.kubernetes.io/component=bird")
|
||||
|
||||
num_pods = len(pods.items)
|
||||
print("Number of pods: " + str(num_pods))
|
||||
if not num_pods == 1:
|
||||
print(f"There should be exactly 1 matching pod - there are {num_pods} pods")
|
||||
sys.exit(1)
|
||||
|
||||
pod=pods.items[0].metadata.name
|
||||
print(f"Pod: {pod}")
|
||||
|
||||
os.environ["KUBECONFIG"] = k8sconfig
|
||||
|
||||
cmd = f"kubectl exec -ti {pod} -c bird -- sh"
|
||||
p = subprocess.run(cmd, shell=True)
|
|
@ -1,8 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Execute commands on all hosts of the currently selected kubernetes cluster
|
||||
# Do export KUBECONFIG=~/your-admin.conf before using this script
|
||||
# Can be used to pass into cdist
|
||||
|
||||
domain=$1; shift
|
||||
|
||||
echo $(kubectl get node -o name | sed -e 's,node/,,' -e "s,\$,.$domain,")
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Execute commands on all hosts of the currently selected kubernetes cluster
|
||||
# Do export KUBECONFIG=~/your-admin.conf before using this script
|
||||
|
||||
#set -x
|
||||
|
||||
domain=$1; shift
|
||||
|
||||
tmp=$(mktemp)
|
||||
kubectl get node -o name | sed -e 's,node/,,' -e "s,\$,.$domain,"> "$tmp"
|
||||
#cat "$tmp"
|
||||
pssh -h "$tmp" -l root -i "$@"
|
||||
rm -f "$tmp"
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2023-06-09
|
||||
# Enter the ceph toolbox
|
||||
|
||||
if [ -z "$@" ]; then
|
||||
# set $1 to bash
|
||||
set -- bash
|
||||
fi
|
||||
|
||||
kubectl exec -n rook-ceph -ti $(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[*].metadata.name}') -- "$@"
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# List mail addresses found under base DN $1 (defaults to dc=ungleich,dc=ch)
|
||||
|
||||
set -e
|
||||
|
||||
# Hardcoded parameters.
|
||||
LDAP_SERVER="ldaps://ldap1.ungleich.ch"
|
||||
LDAP_BIND_DN="cn=manager,dc=ungleich,dc=ch"
|
||||
|
||||
if [ "$1" != "" ]; then
|
||||
LDAP_SEARCH_BASE="$1"
|
||||
else
|
||||
LDAP_SEARCH_BASE="dc=ungleich,dc=ch"
|
||||
fi
|
||||
|
||||
# Read secrets from environment.
|
||||
if [ "$LDAP_BIND_PASSWD" = "" ]; then
|
||||
echo "You have to define LDAP_BIND_PASSWD before launching this script." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract mail addresses from LDAP directory.
|
||||
ldap_search_result="$(
|
||||
ldapsearch -x -H "$LDAP_SERVER" \
|
||||
-D "$LDAP_BIND_DN" \
|
||||
-w "$LDAP_BIND_PASSWD" \
|
||||
-b "$LDAP_SEARCH_BASE" mail
|
||||
)"
|
||||
|
||||
echo "$ldap_search_result" | grep 'mail:' | cut -d ' ' -f 2 -
|
|
@ -1,2 +0,0 @@
|
|||
This directory contains old scripts that are not used anymore but might still
|
||||
be useful.
|
|
@ -1,243 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2020 -- Evilham <contact@evilham.com>
|
||||
# This is BSD licensed as it's based on BSD-licensed code
|
||||
#
|
||||
# We could have used e.g. something like:
|
||||
# - https://git.sr.ht/~sircmpwn/builds.sr.ht/tree/master/images/freebsd/genimg
|
||||
#
|
||||
# But we actually do want to compile the kernel, so that the IPv6-only images
|
||||
# are different and don't support INET.
|
||||
|
||||
# Explode if something goes wrong
|
||||
set -e
|
||||
|
||||
# What are we building?
|
||||
# These are the only configuration options.
|
||||
# They default to current environment.
|
||||
# RELEASE: should be 'CURRENT' for current or 'X.Y' Defaults to 'CURRENT'.
|
||||
# ARCH: probably amd64 for DCL
|
||||
# VMFORMATS: defaults to qcow2, can also be raw. See man mkimg.
|
||||
# OPENNEBULA_CONTEXT_VERSION: For DCL's OpenNebula that'd be 5.10.0 (default)
|
||||
# OPENNEBULA_CONTEXT_REVISION: Defaults to 1.
|
||||
RELEASE=${RELEASE:-CURRENT}
|
||||
if [ "${RELEASE}" == "CURRENT" ]; then
|
||||
SRCBRANCH="master"
|
||||
else
|
||||
SRCBRANCH="releng/${RELEASE}"
|
||||
fi
|
||||
ARCH=${ARCH:-amd64}
|
||||
VMFORMATS=${VMFORMATS:-qcow2}
|
||||
OPENNEBULA_CONTEXT_VERSION=${OPENNEBULA_CONTEXT_VERSION:-5.10.0}
|
||||
OPENNEBULA_CONTEXT_REVISION=${OPENNEBULA_CONTEXT_REVISION:-1}
|
||||
|
||||
# Didn't see a need to make these configurable.
|
||||
CHROOTDIR="/scratch"
|
||||
SRCDIR="${CHROOTDIR}/usr/src"
|
||||
OUR_DIR="$(realpath $(dirname "${0}"))"
|
||||
OUR_SRCCONF="${SRCDIR}/release/src.conf"
|
||||
OUR_RELEASE_CONF="${SRCDIR}/release/release.conf"
|
||||
# Shorthand for the package file name.
|
||||
OPENNEBULA_CONTEXT="one-context-${OPENNEBULA_CONTEXT_VERSION}_${OPENNEBULA_CONTEXT_REVISION}.txz"
|
||||
|
||||
setup_sources() {
|
||||
# Let's use git, we might need to install it
|
||||
if ! which git 2>&1 > /dev/null; then
|
||||
pkg install -y git
|
||||
fi
|
||||
|
||||
if [ ! -d "$(dirname ${SRCDIR})" ]; then
|
||||
mkdir -p "$(dirname ${SRCDIR})"
|
||||
fi
|
||||
|
||||
# Checkout needed branch
|
||||
if [ ! -d "${SRCDIR}" ]; then
|
||||
git clone "https://github.com/freebsd/freebsd" \
|
||||
--branch "${SRCBRANCH}" "${SRCDIR}"
|
||||
else
|
||||
GIT_CMD="git -C ${SRCDIR}"
|
||||
${GIT_CMD} clean -df
|
||||
${GIT_CMD} reset --hard
|
||||
${GIT_CMD} fetch
|
||||
${GIT_CMD} checkout "${SRCBRANCH}"
|
||||
${GIT_CMD} pull
|
||||
fi
|
||||
|
||||
# Add settings for IPv6-only kernel
|
||||
cat > "${SRCDIR}/sys/${ARCH}/conf/GENERIC-IPV6ONLY" << EOF
|
||||
include GENERIC
|
||||
ident GENERIC-IPV6ONLY
|
||||
makeoptions MKMODULESENV+="WITHOUT_INET_SUPPORT="
|
||||
nooptions INET
|
||||
nodevice gre
|
||||
EOF
|
||||
# Fix vmimage.subr to install custom package and fix other things
|
||||
cat >> "${SRCDIR}/release/tools/vmimage.subr" << EOF
|
||||
vm_extra_install_ports() {
|
||||
# Make sure we install the opennbula context package
|
||||
cp "/${OPENNEBULA_CONTEXT}" "\${DESTDIR}/tmp/${OPENNEBULA_CONTEXT}"
|
||||
chroot \${DESTDIR} \${EMULATOR} env ASSUME_ALWAYS_YES=yes \\
|
||||
/usr/sbin/pkg add '/tmp/${OPENNEBULA_CONTEXT}'
|
||||
|
||||
# Now make sure the system has better defaults
|
||||
cat >> "\${DESTDIR}/etc/rc.conf" << eof
|
||||
# Update to latest patch on first boot
|
||||
firstboot_freebsd_update_enable="YES"
|
||||
# Enable OpenNebula's service.
|
||||
one_context_enable="YES"
|
||||
# Enable SSH for customers
|
||||
sshd_enable="YES"
|
||||
# Clear tmp on boot
|
||||
clear_tmp_enable="YES"
|
||||
# Disable sendmail by default
|
||||
sendmail_enable="NONE"
|
||||
# Disable crash dumps
|
||||
dumpdev="NO"
|
||||
eof
|
||||
# Enable root access with SSH key.
|
||||
# It is user's responsibility to further secure their system.
|
||||
sed -i '' -E \
|
||||
's/(^#[ ]*|^)PermitRootLogin .*/PermitRootLogin without-password/' \
|
||||
"\${DESTDIR}/etc/ssh/sshd_config"
|
||||
}
|
||||
EOF
|
||||
# Skip building iso images
|
||||
rm "${SRCDIR}/release/${ARCH}/mkisoimages.sh"
|
||||
# This is a hack to not build the memstick
|
||||
cat > "${SRCDIR}/release/${ARCH}/make-memstick.sh" <<EOF
|
||||
# Create an empty file, else checksums fail
|
||||
touch "\${2}" || true
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_our_env() {
|
||||
# Required by META_MODE to build faster next time
|
||||
# This saves a lot of time when e.g. compiling GENERIC and GENERIC-IPV6ONLY
|
||||
if ! kldstat | grep -q filemon; then
|
||||
kldload filemon
|
||||
fi
|
||||
}
|
||||
|
||||
gen_releaseconf() {
|
||||
cat << EOF
|
||||
#!/bin/sh
|
||||
#
|
||||
# Based off FreeBSD's release/release.conf.sample
|
||||
#
|
||||
|
||||
# This redefines the prototype defined in release.sh.
|
||||
# At this stage, the build chroot exists.
|
||||
buildenv_setup() {
|
||||
# Ensure META_MODE is on
|
||||
echo "WITH_META_MODE=yes" > \${CHROOTDIR}/etc/src-env.conf
|
||||
}
|
||||
|
||||
## Set the directory within which the release will be built.
|
||||
CHROOTDIR="${CHROOTDIR}"
|
||||
|
||||
## Set to override the default target architecture and kernel
|
||||
TARGET="${ARCH}"
|
||||
TARGET_ARCH="${ARCH}"
|
||||
KERNEL="${KERNEL_CONFIG}"
|
||||
|
||||
## Set to specify a custom make.conf and/or src.conf
|
||||
SRC_CONF="${OUR_SRCCONF}"
|
||||
|
||||
# Since these are VMs, users should add other components if they want to.
|
||||
NODOC=YES
|
||||
NOPORTS=YES
|
||||
NOSRC=YES
|
||||
|
||||
# We manage sources manually
|
||||
SRC_UPDATE_SKIP=YES
|
||||
|
||||
## Set to pass additional flags to make(1) for the build chroot setup, such
|
||||
## as TARGET/TARGET_ARCH.
|
||||
# This was necessary for "cross-compiling"
|
||||
CHROOT_MAKEENV="MK_LLVM_TARGET_X86=yes"
|
||||
|
||||
WITH_VMIMAGES=YES
|
||||
|
||||
# VM image size, see man 1 truncate
|
||||
VMSIZE="10G"
|
||||
|
||||
# List of disk image formats, see man mkgimg.
|
||||
VMFORMATS="${VMFORMATS}"
|
||||
|
||||
# These variables have to be exported because they are needed in subprocesses.
|
||||
export NOSWAP=YES
|
||||
# Custom ports
|
||||
# - firstboot-freebsd-update helps us not have to create an image for each
|
||||
# patch level. We still will have to do it for each minor version update.
|
||||
# - bash is apparently needed for one-context
|
||||
export VM_EXTRA_PACKAGES="firstboot-freebsd-update bash"
|
||||
EOF
|
||||
}
|
||||
|
||||
_do_run_release() {
|
||||
. "${SRCDIR}/release/release.sh"
|
||||
}
|
||||
run_release() {
|
||||
_do_run_release -c "${OUR_RELEASE_CONF}"
|
||||
}
|
||||
|
||||
|
||||
build_image() {
|
||||
# Generate configuration
|
||||
echo "${2}" > "${OUR_SRCCONF}"
|
||||
KERNEL_CONFIG="${1}"
|
||||
gen_releaseconf > "${OUR_RELEASE_CONF}"
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
# Continue with the release script
|
||||
run_release
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
|
||||
mv "${CHROOTDIR}/R/vmimages" "${OUR_DIR}/FreeBSD-${RELEASE}-${1}"
|
||||
|
||||
# Be paranoid about files and stuff
|
||||
sync
|
||||
}
|
||||
|
||||
our_main() {
|
||||
case "$1" in
|
||||
--dualstack)
|
||||
BUILD_DUALSTACK=yes
|
||||
;;
|
||||
--ipv6only)
|
||||
BUILD_IPV6ONLY=yes
|
||||
;;
|
||||
*)
|
||||
cat << EOF
|
||||
Run with --dualstack or --ipv6only depending on the image you want.
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
setup_sources
|
||||
setup_our_env
|
||||
# Fetch OpenNebula's context package
|
||||
fetch "https://github.com/OpenNebula/addon-context-linux/releases/download/v${OPENNEBULA_CONTEXT_VERSION}/${OPENNEBULA_CONTEXT}" \
|
||||
-o "${CHROOTDIR}/${OPENNEBULA_CONTEXT}"
|
||||
# Do run
|
||||
if [ -n "${BUILD_DUALSTACK}" ]; then
|
||||
build_image "GENERIC"
|
||||
fi
|
||||
if [ -n "${BUILD_IPV6ONLY}" ]; then
|
||||
build_image "GENERIC-IPV6ONLY" "$(cat << EOF
|
||||
WITHOUT_INET=yes
|
||||
WITHOUT_INET_SUPPORT=yes
|
||||
EOF
|
||||
)"
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
|
||||
*************** DONE ***************
|
||||
You will find the images under "${OUR_DIR}".
|
||||
************************************
|
||||
EOF
|
||||
}
|
||||
|
||||
our_main "${@}"
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2020 -- Evilham <contact@evilham.com>
|
||||
# This is BSD licensed as it's based on BSD-licensed code
|
||||
#
|
||||
#
|
||||
# This builds all needed FreeBSD images for ungleich's Data Center Light
|
||||
# When there are new releases, they should be updated here and the script
|
||||
# should run.
|
||||
# 11.4 is scheduled end of June 2020
|
||||
# 12.2 is scheduled end of October 2020
|
||||
#
|
||||
|
||||
SUPPORTED_RELEASES="11.3 12.1"
|
||||
|
||||
# This should run in a DCL VM with an OK amount of cores (4/8 minimum),
|
||||
# 4G RAM, and storage of roughly 20G + 5G * #resulting_images.
|
||||
#
|
||||
# This is because there is the base system, a 'pristine chroot', and during the
|
||||
# build there can be 2 copies of the resulting system written to the system.
|
||||
# Since there are 4 combinations of images:
|
||||
# {STABLE,RELEASE} x {dualstack, IPv6ONLY}
|
||||
#
|
||||
# That means we'll need to assign about 40G storage to be on the safe side.
|
||||
|
||||
date=$(date -I)
|
||||
for release in ${SUPPORTED_RELEASES}; do
|
||||
for build in dualstack ipv6only; do
|
||||
env RELEASE=${release} sh freebsd-build-opennebula-image-generic.sh --${build} \
|
||||
| tee "freebsd-${release}-${build}-${date}.log"
|
||||
done
|
||||
done
|
|
@ -1,2 +0,0 @@
|
|||
* * * * * root ip -o -6 addr show | grep -E -v "lo |one" | awk '{print $1" " $2": "$4}' >> /dev/tty1
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script will find the locally active osd-s and display their information with the help of Megacli tools.
|
||||
# Assumes that you run it on a host which has at least 1 osd that matches hdd or ssd disk device class in ceph osd df tree output.
|
||||
#
|
||||
# An example for usage:
|
||||
# for NUM in 14 2 3 4 6 ; do printf "server$NUM\n" >> /tmp/osd_infos; ssh root@server"$NUM".place6.ungleich.ch "/opt/ungleich-tools/map-osd-to-disktype" >> /tmp/osd_infos ; printf "\n \n" >> /tmp/osd_infos; done
|
||||
#
|
||||
# llnu's most hacky/advanced script at the time of writing (2020-05-27)
|
||||
|
||||
|
||||
# Future functionality (arguments, and filtering):
|
||||
#OSDTYPE=ssd
|
||||
|
||||
|
||||
# Tempfile
|
||||
/opt/ungleich-tools/megaclisas-status > /tmp/megaclisas-status.out
|
||||
|
||||
# Gets osd numbers of a particular device class, and gets their mountpoints on the host, and puts them into a tempfile
|
||||
#for osd in $(ceph osd tree | grep $OSDTYPE | grep -v down | cut -b 1-3); do findmnt -t xfs -n -o TARGET,SOURCE | grep "ceph-$osd " | cut -c 24- >> /tmp/list_osd_mountpoint.out; done
|
||||
for osd in $(ceph osd tree | grep 'ssd\|hdd' | grep -v down | cut -b 1-3); do lsblk -p -o NAME,MOUNTPOINT | grep -w "/var/lib/ceph/osd/ceph-$osd" | cut -c 3- >> /tmp/list_osd_mountpoint.out ; done
|
||||
|
||||
# Gets the Megacli mappings for the mountpoints
|
||||
for MOUNT in $(cat /tmp/list_osd_mountpoint.out | awk '{print $1}' | sed 's/[0-9]*//g') ; do cat /tmp/megaclisas-status.out | grep $MOUNT | awk '{print $1}' >> /tmp/megacli-mappings.out; done
|
||||
|
||||
# Gets the hardware types for the Megacli mappings
|
||||
for megacli_mappings in $(cat /tmp/megacli-mappings.out); do awk '/Disk info/,0' /tmp/megaclisas-status.out | grep -w "$megacli_mappings"p0 | cut -d '|' -f 2-6,8 >> /tmp/disk_types.out; done
|
||||
|
||||
# Formatting, to get the local $OSDTYPE osd-s
|
||||
for osd_num in $(cat /tmp/list_osd_mountpoint.out | awk '{print $2}' | cut -c 24- ); do printf "%-7s%s\n" "osd-$osd_num" >> /tmp/local_osds.out; done
|
||||
|
||||
# Combine and display the outputs
|
||||
paste /tmp/local_osds.out /tmp/disk_types.out -d '|'
|
||||
|
||||
# Cleanup *.out files in the temp dir
|
||||
rm /tmp/*.out
|
|
@ -1,11 +0,0 @@
|
|||
while read A B C D E
|
||||
do
|
||||
pw=$E
|
||||
name=$A" "$B
|
||||
ad=$C
|
||||
id="@"$D
|
||||
data='{"password":"'${pw}'", "displayname": "'${name}'", "threepids": [ { "medium": "email", "address": "'${ad}'" }], "admin": false, "deactivated": false, "avatar_url": null }'
|
||||
h='Authorization: Bearer <AccessToken>'
|
||||
curl -v -X PUT -H "$h" -d "$data" http://localhost:8008/_synapse/admin/v2/users/$id:<matrix domain>
|
||||
sleep 2
|
||||
done < info.txt
|
|
@ -1 +0,0 @@
|
|||
FirstName LastName Email UserID PW
|
|
@ -1,35 +0,0 @@
|
|||
matrixserver="<matrix domain>"
|
||||
|
||||
generate_post_data()
|
||||
{
|
||||
cat <<EOF
|
||||
{
|
||||
"user_id":"@$D:$matrixserver"
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
roomID=( 'room_ID1' 'zroom_ID2' 'room_ID3' )
|
||||
errcode="M_LIMIT_EXCEEDED"
|
||||
for rm_id in ${roomID[*]}
|
||||
do
|
||||
while read A B C D E
|
||||
do
|
||||
echo $rm_id
|
||||
res2=""
|
||||
res=$(curl -XPOST -d "$(generate_post_data)" "http://localhost:8008/_matrix/client/r0/rooms/%21$rm_id%3A<matrix domain>/invite?access_token=<ACCESS_TOKEN>")
|
||||
echo $res
|
||||
#avoid error { "errcode": "M_LIMIT_EXCEEDED", "error": "Too Many Requests", "retry_after_ms": 2895 }
|
||||
if [[ $res =~ $errcode ]];then
|
||||
sleep 5
|
||||
res2=$(curl -XPOST -d "$(generate_post_data)" "http://localhost:8008/_matrix/client/r0/rooms/%21$rm_id%3A<matrix domain>/invite?access_token=<ACCESS_TOKEN>")
|
||||
fi
|
||||
|
||||
if [[ $res2 =~ $errcode ]];then
|
||||
echo "===error==="
|
||||
echo $res2
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done < info.txt
|
||||
done
|
|
@ -1,280 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# ungleich glarus ag, 2025-01-23
|
||||
|
||||
import base58
|
||||
import base64
|
||||
import json
|
||||
import argparse
|
||||
import requests
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import hashes, padding
|
||||
from cryptography.hazmat.primitives.asymmetric import ec
|
||||
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||
from secrets import token_bytes
|
||||
from cryptography.hazmat.primitives.hmac import HMAC
|
||||
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey,X25519PublicKey
|
||||
|
||||
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||
|
||||
from olm import Account,InboundGroupSession
|
||||
|
||||
|
||||
class UngleichMatrixClient:
|
||||
def __init__(self, args):
|
||||
self.server = args.server_url
|
||||
self.room_id = args.room_id
|
||||
self.username = args.login_username
|
||||
self.password = args.login_password
|
||||
self.security_key_unparsed = args.security_key
|
||||
|
||||
self.access_token = False
|
||||
self.room_keys = False
|
||||
self.room_messages = []
|
||||
|
||||
self.matrix_url = {}
|
||||
self.matrix_url['login'] = f"{args.server_url}/_matrix/client/v3/login"
|
||||
self.matrix_url['room_keys'] = f"{args.server_url}/_matrix/client/v3/room_keys/keys?version=1"
|
||||
self.matrix_url['room_messages'] = f"{args.server_url}/_matrix/client/v3/rooms/{self.room_id}/messages"
|
||||
|
||||
|
||||
def login_to_server(self):
|
||||
login_data = {
|
||||
'identifier': {
|
||||
"type": "m.id.user",
|
||||
"user": f"{self.username}"
|
||||
},
|
||||
'type': "m.login.password",
|
||||
'device_id': "ungleich-matrix-client",
|
||||
'initial_device_display_name' : "ungleich-matrix-client",
|
||||
'password': f"{self.password}"
|
||||
}
|
||||
r = requests.post(self.matrix_url['login'], json=login_data)
|
||||
|
||||
if not r.status_code == 200:
|
||||
raise Exception("Login Failed")
|
||||
return r
|
||||
|
||||
def _ensure_logged_in(self):
|
||||
if not self.access_token:
|
||||
self.login_response = self.login_to_server()
|
||||
self.access_token = self.login_response.json()['access_token']
|
||||
|
||||
def get_room_keys(self):
|
||||
"""
|
||||
We assume version == 1 is correct because that's what's seen in reality
|
||||
In theory we need to query the current version on the server first.
|
||||
"""
|
||||
self._ensure_logged_in()
|
||||
|
||||
params = {
|
||||
'version': "1",
|
||||
'access_token': self.access_token
|
||||
}
|
||||
|
||||
if not self.room_keys:
|
||||
print("Getting room keys ... this can take a while ...")
|
||||
r = requests.get(self.matrix_url['room_keys'],
|
||||
params=params)
|
||||
self.room_keys = r.json()
|
||||
|
||||
def get_room_messages(self):
|
||||
"""
|
||||
Get messages from a room, requires to use pagination!
|
||||
Continue until no end property is in the reply anyomer
|
||||
|
||||
"""
|
||||
self._ensure_logged_in()
|
||||
|
||||
params = {
|
||||
'access_token': self.access_token
|
||||
}
|
||||
|
||||
more_messages = True
|
||||
next_batch = ""
|
||||
while more_messages:
|
||||
if next_batch:
|
||||
params['from'] = next_batch
|
||||
|
||||
r = requests.get(self.matrix_url['room_messages'],
|
||||
params=params)
|
||||
|
||||
for message in r.json()['chunk']:
|
||||
self.room_messages.append(message)
|
||||
|
||||
if 'end' in r.json():
|
||||
next_batch = r.json()['end']
|
||||
else:
|
||||
more_messages = False
|
||||
|
||||
def parse_security_key(self):
|
||||
security_key = self.security_key_unparsed.replace(" ", "")
|
||||
security_key_binary = base58.b58decode(security_key)
|
||||
|
||||
self.security_key = security_key_binary
|
||||
|
||||
# without useless bytes and without parity
|
||||
self.real_security_key = security_key_binary[2:-1]
|
||||
|
||||
|
||||
def check_security_key_parity(self):
|
||||
parity_byte = self.security_key[-1]
|
||||
calculated_parity=0
|
||||
for key_byte in self.security_key[:-1]:
|
||||
calculated_parity ^= key_byte
|
||||
|
||||
print(f"Parity byte = {parity_byte} calculated parity = {calculated_parity}")
|
||||
|
||||
if parity_byte != calculated_parity:
|
||||
raise Exception("Security key is broken")
|
||||
|
||||
def setup_security_key_pair(self):
|
||||
self.security_private_key = X25519PrivateKey.from_private_bytes(self.real_security_key)
|
||||
print(f"Private key = {self.security_private_key}")
|
||||
self.security_public_key = self.security_private_key.public_key()
|
||||
print(f"Public key = {self.security_public_key}")
|
||||
|
||||
|
||||
def decrypt_session_key(self, encrypted_session_key, ephemeral_key, session_mac):
|
||||
|
||||
# Construct the public ephemeral key
|
||||
# use + b'==') to expand padding https://stackoverflow.com/questions/2941995/python-ignore-incorrect-padding-error-when-base64-decoding
|
||||
ephemeral_key_bytes = base64.b64decode(ephemeral_key + '==')
|
||||
ephemeral_public_key = X25519PublicKey.from_public_bytes(ephemeral_key_bytes)
|
||||
|
||||
# This is effectively ECDH provided by cryptography library
|
||||
shared_key = self.security_private_key.exchange(ephemeral_public_key)
|
||||
|
||||
# when we have shared secret, use HDKF to get the AES part
|
||||
# "Using the shared secret,
|
||||
# generate 80 bytes
|
||||
# by performing an HKDF
|
||||
# using SHA-256 as the hash,
|
||||
# with a salt of 32 bytes of 0,
|
||||
# and with the empty string as the info.
|
||||
|
||||
# The first 32 bytes are used as the AES key,
|
||||
# the next 32 bytes are used as the MAC key,
|
||||
# and the last 16 bytes are used as the AES initialization vector."
|
||||
# Using a key derivation function
|
||||
derived_key = HKDF(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=80,
|
||||
salt=bytes(32),
|
||||
info=b'',
|
||||
).derive(shared_key)
|
||||
|
||||
print(f"Derived key = %s, len=%s" % (derived_key, len(derived_key) ))
|
||||
|
||||
aes_key = derived_key[:32]
|
||||
mac_key = derived_key[32:64]
|
||||
aes_iv = derived_key[64:]
|
||||
|
||||
print("AES key = {0} / len = {1}".format(aes_key, len(aes_key)))
|
||||
print("Mac key = {0} / len = {1}".format(mac_key, len(mac_key)))
|
||||
print("AES IV = {0} / len = {1}".format(aes_iv, len(aes_iv)))
|
||||
|
||||
# Pass an empty string through HMAC-SHA-256 using the MAC key generated above. The first 8 bytes of the resulting MAC are base64-encoded, and become the mac property of the session_data.
|
||||
|
||||
# hashed message authentication code = HMAC
|
||||
# This basically allows us to check if we derived the correct key
|
||||
mac = HMAC(mac_key, hashes.SHA256())
|
||||
mac.update(b'')
|
||||
|
||||
# only use first 8 bytes
|
||||
signature = mac.finalize()[:8]
|
||||
print(f"Calculated signature over empty string = {signature}")
|
||||
|
||||
session_signature = base64.b64decode(session_mac + '==')
|
||||
print(f"Session signature = {session_signature}")
|
||||
|
||||
if signature == session_signature:
|
||||
print("Signature seems to be correct")
|
||||
else:
|
||||
print("Signature likely incorrect")
|
||||
raise Exception("Session key signature broken")
|
||||
|
||||
cipher = Cipher(algorithms.AES(aes_key), modes.CBC(aes_iv))
|
||||
decryptor = cipher.decryptor()
|
||||
|
||||
# use + b'==') to expand padding https://stackoverflow.com/questions/2941995/python-ignore-incorrect-padding-error-when-base64-decoding
|
||||
encrypted_session_key_bytes = base64.b64decode(encrypted_session_key + '==')
|
||||
session_key_bytes = decryptor.update(encrypted_session_key_bytes) + decryptor.finalize()
|
||||
|
||||
# Remove PKCS7 padding - block size 128 was guessed / tested to be correct
|
||||
# Needs to be verified - it should in theory be 256
|
||||
unpadder = padding.PKCS7(256).unpadder()
|
||||
data = unpadder.update(session_key_bytes)
|
||||
data += unpadder.finalize()
|
||||
|
||||
session_key_json_string = data.decode("utf8")
|
||||
|
||||
print(f"Unencrypted session key JSON: {session_key_json_string}")
|
||||
session_key_json = json.loads(session_key_json_string)
|
||||
session_key_base64 = session_key_json['session_key']
|
||||
|
||||
print("session key = {session_key_base64}, {length}".format(session_key_base64=session_key_base64, length=len(session_key_base64)))
|
||||
|
||||
return session_key_base64
|
||||
|
||||
def decrypt_message(self, ciphertext, session_id):
|
||||
room_key = self.room_keys['rooms'][self.room_id]['sessions']
|
||||
print(f"Messages key data: {room_key}")
|
||||
|
||||
encrypted_session_key = room_key[session_id]['session_data']['ciphertext']
|
||||
ephemeral_key = room_key[session_id]['session_data']['ephemeral']
|
||||
session_mac = room_key[session_id]['session_data']['mac']
|
||||
|
||||
session_key_base64 = self.decrypt_session_key(encrypted_session_key,
|
||||
ephemeral_key,
|
||||
session_mac)
|
||||
|
||||
inbound_group = InboundGroupSession.import_session(session_key_base64)
|
||||
plaintext = inbound_group.decrypt(ciphertext)
|
||||
|
||||
print(f"Encrypted message {ciphertext} = {plaintext}")
|
||||
|
||||
def decrypt_room_messages(self):
|
||||
"""
|
||||
Decrypt messages that are of type 'm.room.encrypted'
|
||||
|
||||
{'type': 'm.room.encrypted', 'room_id': '!fDjvLemgiriPvvWEeG:ungleich.ch', 'sender': '@nico:ungleich.ch', 'content': {'algorithm': 'm.megolm.v1.aes-sha2', 'ciphertext': 'AwgBEqABNL8ztRQA67gXxkpbeiSp3zkJTkPXUwjQh0VnnFh6+Tff/dWjfF2rYu9q7MhG7BQgtaAoBoFNot8bPan23Y8Niip714ntI7t89F1t79TkUOcn5H0STydqGOOoZqnDf/l63ggWfD8EbudFSxoO7sJLL9iGO2+9HYWTMdTFAhcHg5c/k3aG+fQrXkbv+5afZXH3CxKnWxe4ukkoGMaDAo7jm3l2killUJ/J6NynCiJ/XinFWIdbRXSIUx3cwnFS/KWvdVmhu2iXYFtIvV65UE/JFhDjZ+rCH7lZ9DBD5jKjsVPQJqtFule0CQ', 'device_id': 'SSAUACUQKJ', 'sender_key': 'pEDLuq1RlDI2bxO6/lx9OQZt0NYma+gs6jg3QVYl4Vk', 'session_id': 'nkx3WnUpLL7hblZ9LNBkx0RPrKp3weX2o/aAgp7hx0c'}, 'origin_server_ts': 1738264304685, 'unsigned': {'membership': 'join', 'age': 126031}, 'event_id': '$k9dYdD6b5eG_AZaZtO6imeHU8HGBpiZt3dqM8C3T8-8', 'user_id': '@nico:ungleich.ch', 'age': 126031}
|
||||
"""
|
||||
|
||||
for message in self.room_messages:
|
||||
if message['type'] == 'm.room.encrypted':
|
||||
sender = message['sender']
|
||||
ciphertext = message['content']['ciphertext']
|
||||
session_id = message['content']['session_id']
|
||||
|
||||
plaintext = self.decrypt_message(ciphertext, session_id)
|
||||
|
||||
|
||||
def get_messages(self):
|
||||
self.parse_security_key()
|
||||
self.check_security_key_parity()
|
||||
self.setup_security_key_pair()
|
||||
|
||||
self.get_room_messages()
|
||||
for message in self.room_messages:
|
||||
print(message)
|
||||
self.get_room_keys()
|
||||
self.decrypt_room_messages()
|
||||
|
||||
|
||||
# Decrypt each message:
|
||||
# Retrieve the session key
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--server-url", required=True, help="Matrix Server URL, i.e. https://your-server ")
|
||||
parser.add_argument("--room-id", required=True, help="ID of the room to get messages from, i.e. !...:your-matrix-domain ")
|
||||
parser.add_argument("--login-username", required=True, help="Username for logging into the server, i.e. @you:your-matrix-domain ")
|
||||
parser.add_argument("--login-password", required=True, help="Password for logging into the server, i.e. your-very-safe-password!! ")
|
||||
parser.add_argument("--security-key", required=True, help="Your security backup key, i.e. ABCf defg aaaa - ensure to quote as one argument! ")
|
||||
|
||||
args = parser.parse_args()
|
||||
client = UngleichMatrixClient(args)
|
||||
client.get_messages()
|
|
@ -1,875 +0,0 @@
|
|||
#!/usr/bin/python3
|
||||
# $Id: megaclisas-status,v 1.68 2016/10/21 14:38:56 root Exp root $
|
||||
#
|
||||
# Written by Adam Cecile <gandalf@NOSPAM.le-vert.net>
|
||||
# Modified by Vincent S. Cojot <vincent@NOSPAM.cojot.name>
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import pdb
|
||||
if sys.platform == 'win32':
|
||||
import ctypes
|
||||
|
||||
def_megaclipath = "/opt/MegaRAID/MegaCli/MegaCli64"
|
||||
|
||||
# Non-Nagios Mode defaults
|
||||
nagiosmode = False
|
||||
nagiosoutput=''
|
||||
nagiosgoodarray = 0
|
||||
nagiosbadarray = 0
|
||||
nagiosgooddisk = 0
|
||||
nagiosbaddisk = 0
|
||||
|
||||
# Sane defaults
|
||||
printarray = True
|
||||
printcontroller = True
|
||||
debugmode = False
|
||||
notempmode = False
|
||||
totaldrivenumber = 0
|
||||
|
||||
# Hardcode a max of 16 HBA and 128 LDs for now. LDTable must be initialized to accept populating list of LD's into each ctlr's list.
|
||||
MaxNumHBA = 16
|
||||
MaxNumLD = 128
|
||||
LDTable = [ [] * MaxNumHBA for i in range(MaxNumLD) ]
|
||||
NestedLDTable = [[False for i in range(MaxNumHBA)] for j in range(MaxNumLD)]
|
||||
|
||||
# Outputs is a 'dict' of all MegaCLI outputs so we can re-use them during loops..
|
||||
Outputs = {}
|
||||
|
||||
# Startup
|
||||
def print_usage():
|
||||
print('Usage: megaraid-status [--nagios|--debug|--notemp]')
|
||||
|
||||
# We need root access to query
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
root_or_admin = os.geteuid() == 0
|
||||
except AttributeError:
|
||||
root_or_admin = ctypes.windll.shell32.IsUserAnAdmin() !=0
|
||||
if not root_or_admin:
|
||||
print('# This script requires Administrator privileges')
|
||||
sys.exit(5)
|
||||
|
||||
# Check command line arguments to enable nagios or not
|
||||
if len(sys.argv) > 2:
|
||||
print_usage()
|
||||
sys.exit(1)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == '--nagios':
|
||||
nagiosmode = True
|
||||
elif sys.argv[1] == '--debug':
|
||||
debugmode = True
|
||||
elif sys.argv[1] == '--notemp':
|
||||
notempmode = True
|
||||
else:
|
||||
print_usage()
|
||||
sys.exit(1)
|
||||
# Functions
|
||||
def dbgprint(msg):
|
||||
if (debugmode):
|
||||
sys.stderr.write ( str('# DEBUG : '+msg+'\n'))
|
||||
|
||||
def is_exe(fpath):
|
||||
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
|
||||
|
||||
def which(program):
|
||||
import os
|
||||
fpath, fname = os.path.split(program)
|
||||
if fpath:
|
||||
if is_exe(program):
|
||||
return program
|
||||
else:
|
||||
# Add some defaults
|
||||
os.environ["PATH"] += os.pathsep + '/opt/MegaRAID/MegaCli'
|
||||
os.environ["PATH"] += os.pathsep + '/ms/dist/hwmgmt/bin'
|
||||
os.environ["PATH"] += os.pathsep + os.path.dirname(os.path.realpath(sys.argv[0]))
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
dbgprint ('Looking in PATH '+str(path))
|
||||
path = path.strip('"')
|
||||
exe_file = os.path.join(path, program)
|
||||
if is_exe(exe_file):
|
||||
dbgprint ('Found "'+program+'" at '+exe_file)
|
||||
return exe_file
|
||||
return None
|
||||
|
||||
# Find MegaCli
|
||||
for megabin in "MegaCli64","MegaCli","megacli", "MegaCli.exe":
|
||||
dbgprint ('Looking for '+str(megabin)+' in PATH next..')
|
||||
megaclipath = which(megabin)
|
||||
if (megaclipath != None):
|
||||
dbgprint ('Will use MegaCLI from here: '+str(megaclipath))
|
||||
break
|
||||
|
||||
# Check binary exists (and +x), if not print an error message
|
||||
if (megaclipath != None):
|
||||
if os.path.exists(megaclipath) and os.access(megaclipath, os.X_OK):
|
||||
pass
|
||||
else:
|
||||
if nagiosmode:
|
||||
print('UNKNOWN - Cannot find '+megaclipath)
|
||||
else:
|
||||
print('Cannot find ' + megaclipath + 'in your PATH. Please install it.')
|
||||
sys.exit(3)
|
||||
else:
|
||||
print('Cannot find "MegaCli64","MegaCli" or "megacli" or "MegaCli.exe" in your PATH. Please install it.')
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
#### pdb.set_trace()
|
||||
|
||||
def returnWdthFromArrayCol(glarray,idx):
|
||||
maxwdth = 0
|
||||
for glrow in glarray:
|
||||
if ( len(glrow[idx]) > maxwdth):
|
||||
maxwdth = len(glrow[idx])
|
||||
return maxwdth
|
||||
|
||||
# Get command output
|
||||
def getOutput(cmd):
|
||||
lines = []
|
||||
if cmd in Outputs:
|
||||
dbgprint ("Got Cached value: "+str(cmd))
|
||||
lines = Outputs[cmd]
|
||||
else:
|
||||
dbgprint ("Not a Cached value: "+str(cmd))
|
||||
output = os.popen(cmd)
|
||||
for line in output:
|
||||
if not re.match(r'^$',line.strip()):
|
||||
lines.append(line.strip())
|
||||
Outputs[cmd] = lines
|
||||
return lines
|
||||
|
||||
def returnControllerNumber(output):
|
||||
for line in output:
|
||||
if re.match(r'^Controller Count.*$',line.strip()):
|
||||
return int(line.split(':')[1].strip().strip('.'))
|
||||
|
||||
def returnTotalDriveNumber(output):
|
||||
for line in output:
|
||||
if re.match(r'Number of Physical Drives on Adapter.*$',line.strip()):
|
||||
return int(line.split(':')[1].strip())
|
||||
|
||||
def returnRebuildProgress(output):
|
||||
percent = 0
|
||||
tmpstr = ''
|
||||
for line in output:
|
||||
if re.match(r'^Rebuild Progress on Device at Enclosure.*, Slot .* Completed ',line.strip()):
|
||||
tmpstr = line.split('Completed')[1].strip()
|
||||
percent = int(tmpstr.split('%')[0].strip())
|
||||
return percent
|
||||
|
||||
def returnConfDriveNumber(output):
|
||||
# Count the configured drives
|
||||
confdrives = 0
|
||||
for line in output:
|
||||
if re.match(r'.*Number of PDs:.*$',line.strip()):
|
||||
confdrives += int(line.split(':')[2].strip())
|
||||
return int(confdrives)
|
||||
|
||||
def returnUnConfDriveNumber(output):
|
||||
# Count the configured drives
|
||||
confdrives = 0
|
||||
for line in output:
|
||||
if re.match(r'^Firmware state: Unconfigured.*$',line.strip()):
|
||||
confdrives += 1
|
||||
return int(confdrives)
|
||||
|
||||
def returnControllerModel(output):
|
||||
for line in output:
|
||||
if re.match(r'^Product Name.*$',line.strip()):
|
||||
return line.split(':')[1].strip()
|
||||
|
||||
def returnMemorySize(output):
|
||||
for line in output:
|
||||
if re.match(r'^Memory Size.*$',line.strip()):
|
||||
return line.split(':')[1].strip()
|
||||
|
||||
def returnFirmwareVersion(output):
|
||||
for line in output:
|
||||
if re.match(r'^FW Package Build.*$',line.strip()):
|
||||
return line.split(':')[1].strip()
|
||||
|
||||
def returnROCTemp(output):
|
||||
ROCtemp = ''
|
||||
tmpstr = ''
|
||||
if (notempmode):
|
||||
return str('N/A')
|
||||
else:
|
||||
for line in output:
|
||||
if re.match(r'^ROC temperature :.*$',line.strip()):
|
||||
tmpstr = line.split(':')[1].strip()
|
||||
ROCtemp = re.sub(' +.*$', '', tmpstr)
|
||||
if ( ROCtemp != '' ):
|
||||
return str(str(ROCtemp)+'C')
|
||||
else:
|
||||
return str('N/A')
|
||||
|
||||
def returnBBUPresence(output):
|
||||
BBU = ''
|
||||
tmpstr = ''
|
||||
for line in output:
|
||||
if re.match(r'^BBU +:.*$',line.strip()):
|
||||
tmpstr = line.split(':')[1].strip()
|
||||
BBU = re.sub(' +.*$', '', tmpstr)
|
||||
break
|
||||
if ( BBU != '' ):
|
||||
return str(BBU)
|
||||
else:
|
||||
return str('N/A')
|
||||
|
||||
def returnBBUStatus(output):
|
||||
BBUStatus = ''
|
||||
tmpstr = ''
|
||||
for line in output:
|
||||
if re.match(r'^ *Battery Replacement required +:.*$',line.strip()):
|
||||
tmpstr = line.split(':')[1].strip()
|
||||
BBUStatus = re.sub(' +.*$', '', tmpstr)
|
||||
break
|
||||
if ( BBUStatus == 'Yes' ):
|
||||
return str('REPL')
|
||||
else:
|
||||
return str('Good')
|
||||
|
||||
def returnArrayNumber(output):
|
||||
i = 0
|
||||
for line in output:
|
||||
if re.match(r'^(CacheCade )?Virtual Drive:.*$',line.strip()):
|
||||
i += 1
|
||||
return i
|
||||
|
||||
def returnHBAPCIInfo(output):
|
||||
busprefix = '0000'
|
||||
busid = ''
|
||||
devid = ''
|
||||
functionid = ''
|
||||
pcipath = ''
|
||||
for line in output:
|
||||
if re.match(r'^Bus Number.*:.*$',line.strip()):
|
||||
busid = str(line.strip().split(':')[1].strip()).zfill(2)
|
||||
if re.match(r'^Device Number.*:.*$',line.strip()):
|
||||
devid = str(line.strip().split(':')[1].strip()).zfill(2)
|
||||
if re.match(r'^Function Number.*:.*$',line.strip()):
|
||||
functionid = str(line.strip().split(':')[1].strip()).zfill(1)
|
||||
if busid:
|
||||
pcipath = str(busprefix + ':' + busid + ':' + devid + '.' + functionid)
|
||||
dbgprint("Array PCI path : "+pcipath)
|
||||
return str(pcipath)
|
||||
else:
|
||||
return None
|
||||
|
||||
def returnHBAInfo(table,output,controllerid):
|
||||
controllermodel = 'Unknown'
|
||||
controllerram = 'Unknown'
|
||||
controllerrev = 'Unknown'
|
||||
controllertemp = ''
|
||||
controllermodel = returnControllerModel(output)
|
||||
controllerram = returnMemorySize(output)
|
||||
controllerrev = returnFirmwareVersion(output)
|
||||
controllertemp = returnROCTemp(output)
|
||||
controllerbbu = returnBBUPresence(output)
|
||||
if controllerbbu == 'Present':
|
||||
cmd = '%s -AdpBbuCmd -GetBbuStatus -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
controllerbbu = returnBBUStatus(output)
|
||||
|
||||
if controllermodel != 'Unknown':
|
||||
table.append([ 'c'+str(controllerid), controllermodel, controllerram, str(controllertemp), str(controllerbbu), str('FW: '+controllerrev) ])
|
||||
|
||||
def returnArrayInfo(output,controllerid,arrayid,arrayindex):
|
||||
id = 'c'+str(controllerid)+'u'+str(arrayid)
|
||||
operationlinennumber = False
|
||||
linenumber = 0
|
||||
targetid = ''
|
||||
raidtype = ''
|
||||
raidlvl = ''
|
||||
size = ''
|
||||
state = 'N/A'
|
||||
strpsz = ''
|
||||
dskcache = 'N/A'
|
||||
properties = ''
|
||||
spandepth = 0
|
||||
diskperspan = 0
|
||||
cachecade_info = 'None'
|
||||
|
||||
for line in output:
|
||||
if re.match(r'^(CacheCade )?Virtual Drive:.*(Target Id: [0-9]+).*$',line.strip()):
|
||||
# Extract the SCSI Target ID
|
||||
targetid = line.strip().split(':')[2].split(')')[0].strip()
|
||||
elif re.match(r'^RAID Level.*?:.*$',line.strip()):
|
||||
# Extract the primary raid type, decide on X0 RAID level later when we hit Span Depth
|
||||
raidlvl = int(line.strip().split(':')[1].split(',')[0].split('-')[1].strip())
|
||||
elif re.match(r'^Size.*?:.*$',line.strip()):
|
||||
# Size reported in MB
|
||||
if re.match(r'^.*MB$',line.strip().split(':')[1]):
|
||||
size = line.strip().split(':')[1].strip('MB').strip()
|
||||
if ( float(size) > 1000):
|
||||
size = str(int(round((float(size) / 1000))))+'G'
|
||||
else:
|
||||
size = str(int(round(float(size))))+'M'
|
||||
# Size reported in TB
|
||||
elif re.match(r'^.*TB$',line.strip().split(':')[1]):
|
||||
size = line.strip().split(':')[1].strip('TB').strip()
|
||||
size = str(int(round((float(size) * 1000))))+'G'
|
||||
# Size reported in GB (default)
|
||||
else:
|
||||
size = line.strip().split(':')[1].strip('GB').strip()
|
||||
size = str(int(round((float(size)))))+'G'
|
||||
elif re.match(r'^Span Depth.*?:.*$',line.strip()):
|
||||
# If Span Depth is greater than 1 chances are we have a RAID 10, 50 or 60
|
||||
spandepth = line.strip().split(':')[1].strip()
|
||||
elif re.match(r'^State.*?:.*$',line.strip()):
|
||||
state = line.strip().split(':')[1].strip()
|
||||
elif re.match(r'^Strip Size.*?:.*$',line.strip()):
|
||||
strpsz = line.strip().split(':')[1].strip()
|
||||
elif re.match(r'^Number Of Drives per span.*:.*$',line.strip()):
|
||||
diskperspan = int(line.strip().split(':')[1].strip())
|
||||
elif re.match(r'^Current Cache Policy.*?:.*$',line.strip()):
|
||||
props = line.strip().split(':')[1].strip()
|
||||
if re.search('ReadAdaptive', props):
|
||||
properties += 'ADRA'
|
||||
if re.search('ReadAhead', props):
|
||||
properties += 'RA'
|
||||
if re.match('ReadAheadNone', props):
|
||||
properties += 'NORA'
|
||||
if re.search('WriteBack', props):
|
||||
properties += ',WB'
|
||||
if re.match('WriteThrough', props):
|
||||
properties += ',WT'
|
||||
elif re.match(r'^Disk Cache Policy.*?:.*$',line.strip()):
|
||||
props = line.strip().split(':')[1].strip()
|
||||
if re.search('Disabled', props):
|
||||
dskcache = 'Disabled'
|
||||
if re.search('Disk.s Default', props):
|
||||
dskcache = 'Default'
|
||||
if re.search('Enabled', props):
|
||||
dskcache = 'Enabled'
|
||||
elif re.match(r'^Ongoing Progresses.*?:.*$',line.strip()):
|
||||
operationlinennumber = linenumber
|
||||
elif re.match(r'Cache Cade Type\s*:.*$', line):
|
||||
cachecade_info = "Type : " + line.strip().split(':')[1].strip()
|
||||
elif re.match(r'^Target Id of the Associated LDs\s*:.*$', line):
|
||||
associated=[]
|
||||
for array in line.split(':')[1].strip().split(','):
|
||||
if array.isdigit():
|
||||
associated.append('c%du%d' % (controllerid, int(array)))
|
||||
if len(associated) >= 1:
|
||||
cachecade_info = "Associated : %s" %(', '.join(associated))
|
||||
linenumber += 1
|
||||
|
||||
# If there was an ongoing operation, find the relevant line in the previous output
|
||||
if operationlinennumber:
|
||||
inprogress = output[operationlinennumber + 1]
|
||||
else:
|
||||
inprogress = 'None'
|
||||
|
||||
# Compute the RAID level
|
||||
NestedLDTable[int(controllerid)][int(arrayindex)] = False
|
||||
if raidlvl == '':
|
||||
raidtype = str('N/A')
|
||||
else:
|
||||
if (int(spandepth) >= 2):
|
||||
raidtype = str('RAID-' + str(raidlvl) + '0')
|
||||
NestedLDTable[controllerid][int(arrayindex)] = True
|
||||
else:
|
||||
if(raidlvl == 1):
|
||||
if(diskperspan > 2):
|
||||
raidtype = str('RAID-10')
|
||||
NestedLDTable[controllerid][int(arrayindex)] = True
|
||||
else:
|
||||
raidtype = str('RAID-' + str(raidlvl))
|
||||
else:
|
||||
raidtype = str('RAID-' + str(raidlvl))
|
||||
|
||||
dbgprint('RAID Level: ' + str(raidlvl)
|
||||
+ ' Span Depth: ' + str(spandepth)
|
||||
+ ' Disk Per Span: ' + str(diskperspan)
|
||||
+ ' Raid Type: ' + str(raidtype))
|
||||
return [id,raidtype,size,strpsz,properties,dskcache,state,targetid,cachecade_info,inprogress]
|
||||
|
||||
def returnDiskInfo(output,controllerid):
|
||||
arrayid = False
|
||||
arrayindex = -1
|
||||
sarrayid = 'Unknown'
|
||||
diskid = False
|
||||
oldenclid = False
|
||||
enclid = False
|
||||
spanid = False
|
||||
slotid = False
|
||||
lsidid = 'Unknown'
|
||||
table = []
|
||||
fstate = 'Offline'
|
||||
substate = 'Unknown'
|
||||
model = 'Unknown'
|
||||
speed = 'Unknown'
|
||||
dsize = 'Unknown'
|
||||
temp = 'Unk0C'
|
||||
percent = 0
|
||||
for line in output:
|
||||
if re.match(r'^Span: [0-9]+ - Number of PDs:',line.strip()):
|
||||
spanid = line.split(':')[1].strip()
|
||||
spanid = re.sub(' - Number of PDs.*', '', spanid)
|
||||
elif re.match(r'Enclosure Device ID: .*$',line.strip()):
|
||||
# We match here early in the analysis so reset the vars if this is a new disk we're reading..
|
||||
oldenclid = enclid
|
||||
enclid = line.split(':')[1].strip()
|
||||
if oldenclid != False:
|
||||
fstate = 'Offline'
|
||||
model = 'Unknown'
|
||||
speed = 'Unknown'
|
||||
temp = 'Unk0C'
|
||||
slotid = False
|
||||
lsidid = 'Unknown'
|
||||
elif re.match(r'^Coerced Size: ',line.strip()):
|
||||
dsize = line.split(':')[1].strip()
|
||||
dsize = re.sub(' \[.*\.*$', '', dsize)
|
||||
dsize = re.sub('[0-9][0-9] GB', ' Gb', dsize)
|
||||
elif re.match(r'^(CacheCade )?Virtual (Disk|Drive): [0-9]+.*$',line.strip()):
|
||||
arrayindex += 1
|
||||
arrayid = line.split('(')[0].split(':')[1].strip()
|
||||
elif re.match(r'PD: [0-9]+ Information.*$',line.strip()):
|
||||
diskid = line.split()[1].strip()
|
||||
elif re.match(r'^Device Id: .*$',line.strip()):
|
||||
lsidid = line.split(':')[1].strip()
|
||||
elif re.match(r'Slot Number: .*$',line.strip()):
|
||||
slotid = line.split(':')[1].strip()
|
||||
elif re.match(r'Firmware state: .*$',line.strip()):
|
||||
fstate = line.split(':')[1].strip()
|
||||
subfstate = re.sub('\(.*', '', fstate)
|
||||
dbgprint('Firmware State: '+str(fstate)+' '+str(subfstate))
|
||||
elif re.match(r'Inquiry Data: .*$',line.strip()):
|
||||
model = line.split(':')[1].strip()
|
||||
model = re.sub(' +', ' ', model)
|
||||
# Sub code
|
||||
manuf = re.sub(' .*', '', model)
|
||||
dtype = re.sub(manuf+' ', '', model)
|
||||
dtype = re.sub(' .*', '', dtype)
|
||||
hwserial = re.sub('.*'+dtype+' *', '', model)
|
||||
elif re.match(r'^Media Type: .*$',line.strip()):
|
||||
mtype = line.split(':')[1].strip()
|
||||
if mtype == 'Hard Disk Device':
|
||||
mtype = 'HDD'
|
||||
else:
|
||||
if mtype == 'Solid State Device':
|
||||
mtype = 'SSD'
|
||||
else:
|
||||
mtype = 'N/A'
|
||||
elif re.match(r'Device Speed: .*$',line.strip()):
|
||||
speed = line.split(':')[1].strip()
|
||||
elif re.match(r'Drive Temperature :.*$',line.strip()):
|
||||
if (notempmode):
|
||||
temp = 'N/A'
|
||||
else:
|
||||
# Drive temp is amongst the last few lines matched, decide here if we add information to the table..
|
||||
temp = line.split(':')[1].strip()
|
||||
temp = re.sub(' \(.*\)', '', temp)
|
||||
if model != 'Unknown':
|
||||
dbgprint('Disk Info: '+str(arrayid)+' '+str(diskid)+' '+str(oldenclid))
|
||||
if subfstate == 'Rebuild':
|
||||
cmd = '%s pdrbld -showprog -physdrv\[%s:%s\] -a%d -NoLog' % (megaclipath, enclid, slotid, controllerid)
|
||||
output = getOutput(cmd)
|
||||
percent = returnRebuildProgress(output)
|
||||
fstate = str('Rebuilding (%d%%)' % (percent))
|
||||
|
||||
if (( NestedLDTable[controllerid][int(arrayindex)] == True) and (spanid != False)):
|
||||
sarrayid = str(arrayid)+"s"+spanid
|
||||
else:
|
||||
sarrayid = str(arrayid)
|
||||
table.append([sarrayid, str(diskid), mtype, model, dsize, fstate , speed, temp, enclid, slotid, lsidid])
|
||||
return table
|
||||
|
||||
|
||||
def returnUnconfDiskInfo(output,controllerid):
|
||||
arrayid = False
|
||||
diskid = False
|
||||
olddiskid = False
|
||||
enclid = False
|
||||
slotid = False
|
||||
lsidid = 'Unknown'
|
||||
table = []
|
||||
fstate = 'Offline'
|
||||
substate = 'Unknown'
|
||||
model = 'Unknown'
|
||||
speed = 'Unknown'
|
||||
mtype = 'Unknown'
|
||||
dsize = 'Unknown'
|
||||
temp = 'Unk0C'
|
||||
for line in output:
|
||||
if re.match(r'Enclosure Device ID: .*$',line.strip()):
|
||||
# We match here early in the analysis so reset the vars if this is a new disk we're reading..
|
||||
oldenclid = enclid
|
||||
enclid = line.split(':')[1].strip()
|
||||
if oldenclid != False:
|
||||
arrayid = False
|
||||
fstate = 'Offline'
|
||||
model = 'Unknown'
|
||||
speed = 'Unknown'
|
||||
temp = 'Unk0C'
|
||||
slotid = False
|
||||
lsidid = 'Unknown'
|
||||
|
||||
elif re.match(r'^Coerced Size: ',line.strip()):
|
||||
dsize = line.split(':')[1].strip()
|
||||
dsize = re.sub(' \[.*\.*$', '', dsize)
|
||||
dsize = re.sub('[0-9][0-9] GB', ' Gb', dsize)
|
||||
elif re.match(r'^Drive.s position: DiskGroup: [0-9]+,.*$',line.strip()):
|
||||
arrayid = line.split(',')[1].split(':')[1].strip()
|
||||
elif re.match(r'^Device Id: [0-9]+.*$',line.strip()):
|
||||
diskid = line.split(':')[1].strip()
|
||||
elif re.match(r'^Device Id: .*$',line.strip()):
|
||||
lsidid = line.split(':')[1].strip()
|
||||
elif re.match(r'Slot Number: .*$',line.strip()):
|
||||
slotid = line.split(':')[1].strip()
|
||||
elif re.match(r'Firmware state: .*$',line.strip()):
|
||||
fstate = line.split(':')[1].strip()
|
||||
subfstate = re.sub('\(.*', '', fstate)
|
||||
dbgprint('Firmware State: '+str(fstate)+' '+str(subfstate))
|
||||
elif re.match(r'Inquiry Data: .*$',line.strip()):
|
||||
model = line.split(':')[1].strip()
|
||||
model = re.sub(' +', ' ', model)
|
||||
manuf = re.sub(' .*', '', model)
|
||||
dtype = re.sub(manuf+' ', '', model)
|
||||
dtype = re.sub(' .*', '', dtype)
|
||||
hwserial = re.sub('.*'+dtype+' *', '', model)
|
||||
elif re.match(r'^Media Type: .*$',line.strip()):
|
||||
mtype = line.split(':')[1].strip()
|
||||
if mtype == 'Hard Disk Device':
|
||||
mtype = 'HDD'
|
||||
else:
|
||||
if mtype == 'Solid State Device':
|
||||
mtype = 'SSD'
|
||||
else:
|
||||
mtype = 'N/A'
|
||||
elif re.match(r'Device Speed: .*$',line.strip()):
|
||||
speed = line.split(':')[1].strip()
|
||||
elif re.match(r'Drive Temperature :.*$',line.strip()):
|
||||
temp = line.split(':')[1].strip()
|
||||
temp = re.sub('\(.*\)', '', temp)
|
||||
# Drive temp is amongst the last few lines matched, decide here if we add information to the table..
|
||||
if arrayid == False:
|
||||
if subfstate == 'Unconfigured':
|
||||
dbgprint('Unconfigured Disk: Arrayid: '+str(arrayid)+' DiskId: '+str(diskid)+' '+str(olddiskid)+' '+str(fstate))
|
||||
elif subfstate == 'Online, Spun Up':
|
||||
dbgprint('Online Disk: Arrayid: '+str(arrayid)+' DiskId: '+str(diskid)+' '+str(olddiskid)+' '+str(fstate))
|
||||
table.append([ mtype, model, dsize, fstate, speed, temp, enclid, slotid, lsidid])
|
||||
return table
|
||||
|
||||
cmd = '%s -adpCount -NoLog' % (megaclipath)
|
||||
output = getOutput(cmd)
|
||||
controllernumber = returnControllerNumber(output)
|
||||
|
||||
bad = False
|
||||
|
||||
# List available controller
|
||||
if printcontroller:
|
||||
if controllernumber:
|
||||
if not nagiosmode:
|
||||
print('-- Controller information --')
|
||||
|
||||
i = 0
|
||||
controllerid = 0
|
||||
mlen = 0
|
||||
hbainfo = []
|
||||
while controllerid < controllernumber:
|
||||
cmd = '%s -AdpAllInfo -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
returnHBAInfo(hbainfo, output,controllerid)
|
||||
controllerid += 1
|
||||
mlen = returnWdthFromArrayCol(hbainfo,1)
|
||||
|
||||
controllerid = 0
|
||||
for hba in hbainfo:
|
||||
hbafmt = str('%-5s | %-'+str(mlen)+'s | %-6s | %-4s | %-6s | %-12s ')
|
||||
# Header
|
||||
if ( i == 0 ):
|
||||
if not nagiosmode:
|
||||
print(hbafmt % ("-- ID","H/W Model","RAM","Temp","BBU", "Firmware"))
|
||||
if not nagiosmode:
|
||||
print(hbafmt % (
|
||||
hba[0],
|
||||
hba[1],
|
||||
hba[2],
|
||||
hba[3],
|
||||
hba[4],
|
||||
hba[5]))
|
||||
i += 1
|
||||
if not nagiosmode:
|
||||
print('')
|
||||
else:
|
||||
print("No MegaRAID or PERC adapter detected on your system!")
|
||||
exit(1)
|
||||
|
||||
if printarray:
|
||||
if not nagiosmode:
|
||||
print('-- Array information --')
|
||||
|
||||
controllerid = 0
|
||||
pcipath = ''
|
||||
diskpath = ''
|
||||
i = 0 ; j = 0
|
||||
mlen = 0 ; rlen = 0 ; clen = 0
|
||||
while controllerid < controllernumber:
|
||||
arrayindex = 0
|
||||
|
||||
cmd = '%s -LDInfo -lall -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraynumber = returnArrayNumber(output)
|
||||
# We need to explore each HBA to look for gaps in LD's
|
||||
ldid = 0 ; ldcount = 0
|
||||
while ldcount < arraynumber:
|
||||
cmd = '%s -LDInfo -l%d -a%d -NoLog' % (megaclipath, ldid, controllerid)
|
||||
output = getOutput(cmd)
|
||||
for line in output:
|
||||
if re.match(r'^Adapter.*Virtual Drive .* Does not Exist',line.strip()):
|
||||
ldid += 1
|
||||
elif re.match(r'^(CacheCade )?Virtual Drive:',line.strip()):
|
||||
LDTable[controllerid].append ( ldid )
|
||||
#NestedLDTable[controllerid][int(arrayindex)] = False
|
||||
ldcount += 1
|
||||
ldid += 1
|
||||
|
||||
while arrayindex < arraynumber:
|
||||
ldid = LDTable[controllerid][arrayindex]
|
||||
cmd = '%s -LDInfo -l%d -a%d -NoLog' % (megaclipath, ldid, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arrayinfo = returnArrayInfo(output, controllerid, ldid, arrayindex)
|
||||
if ( len(arrayinfo[1]) > rlen):
|
||||
rlen = len(arrayinfo[1])
|
||||
if ( len(arrayinfo[4]) > mlen):
|
||||
mlen = len(arrayinfo[4])
|
||||
if ( len(arrayinfo[8]) > clen):
|
||||
clen = len(arrayinfo[8])
|
||||
arrayindex += 1
|
||||
controllerid += 1
|
||||
|
||||
controllerid = 0
|
||||
while controllerid < controllernumber:
|
||||
arrayindex = 0
|
||||
|
||||
cmd = '%s -AdpGetPciInfo -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
pcipath = returnHBAPCIInfo(output)
|
||||
|
||||
cmd = '%s -LDInfo -lall -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraynumber = returnArrayNumber(output)
|
||||
while arrayindex < arraynumber:
|
||||
ldid = LDTable[controllerid][arrayindex]
|
||||
cmd = '%s -LDInfo -l%d -a%d -NoLog' % (megaclipath, ldid, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arrayinfo = returnArrayInfo(output,controllerid, ldid, arrayindex)
|
||||
|
||||
if pcipath:
|
||||
diskprefix = str('/dev/disk/by-path/pci-' + pcipath + '-scsi-0:')
|
||||
for j in range (8):
|
||||
diskpath = diskprefix + str(j) + ':' + str(arrayinfo[7]) + ':0'
|
||||
if os.path.exists(diskpath):
|
||||
arrayinfo[7] = os.path.realpath(diskpath)
|
||||
else:
|
||||
arrayinfo[7] = 'N/A'
|
||||
|
||||
# Pad the string length, just to make sure it's aligned with the headers...
|
||||
if (rlen < len("Type")):
|
||||
rlen = len("Type")
|
||||
if (mlen < len("Flags")):
|
||||
mlen = len("Flags")
|
||||
if (clen < len("CacheCade")):
|
||||
clen = len("CacheCade")
|
||||
|
||||
ldfmt = str('%-5s | %-'+str(rlen)+'s | %7s | %7s | %'+str(mlen)+'s | %8s | %8s | %8s | %-'+str(clen)+'s |%-12s ')
|
||||
# Header
|
||||
if ( i == 0 ):
|
||||
if not nagiosmode:
|
||||
print(ldfmt % ("-- ID", "Type", "Size", "Strpsz", "Flags", "DskCache", "Status", "OS Path", "CacheCade", "InProgress" ))
|
||||
if not nagiosmode:
|
||||
print(ldfmt % (
|
||||
arrayinfo[0],
|
||||
arrayinfo[1],
|
||||
arrayinfo[2],
|
||||
arrayinfo[3],
|
||||
arrayinfo[4],
|
||||
arrayinfo[5],
|
||||
arrayinfo[6],
|
||||
arrayinfo[7],
|
||||
arrayinfo[8],
|
||||
arrayinfo[9]))
|
||||
dbgprint("Array state : "+arrayinfo[6])
|
||||
if arrayinfo[6] not in [ 'Optimal', 'N/A' ]:
|
||||
bad = True
|
||||
nagiosbadarray=nagiosbadarray+1
|
||||
else:
|
||||
nagiosgoodarray=nagiosgoodarray+1
|
||||
arrayindex += 1
|
||||
i += 1
|
||||
controllerid += 1
|
||||
if not nagiosmode:
|
||||
print('')
|
||||
|
||||
controllerid = 0
|
||||
while controllerid < controllernumber:
|
||||
cmd = '%s -PDGetNum -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
totaldrivenumber += returnTotalDriveNumber(output)
|
||||
controllerid += 1
|
||||
|
||||
if totaldrivenumber:
|
||||
if not nagiosmode:
|
||||
print('-- Disk information --')
|
||||
|
||||
i = 0
|
||||
dlen = 0 ; mlen = 0 ; flen = 0
|
||||
controllerid = 0
|
||||
while controllerid < controllernumber:
|
||||
arrayid = 0
|
||||
cmd = '%s -LDInfo -lall -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraynumber = returnArrayNumber(output)
|
||||
#### BUG: -LdPdInfo shows all PD on the adapter, not just for said LD..
|
||||
#### while arrayid <= arraynumber:
|
||||
cmd = '%s -LdPdInfo -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraydisk = returnDiskInfo(output,controllerid)
|
||||
for array in arraydisk:
|
||||
dbgprint('Disk c'+str(controllerid)+'u'+array[0]+'p'+array[1] + ' status : ' + array[5])
|
||||
if array[5] not in [ 'Online', 'Online, Spun Up' ]:
|
||||
bad = True
|
||||
nagiosbaddisk=nagiosbaddisk+1
|
||||
else:
|
||||
nagiosgooddisk=nagiosgooddisk+1
|
||||
|
||||
if ( returnWdthFromArrayCol(arraydisk,0) > dlen):
|
||||
dlen = returnWdthFromArrayCol(arraydisk,0)
|
||||
if ( returnWdthFromArrayCol(arraydisk,3) > mlen):
|
||||
mlen = returnWdthFromArrayCol(arraydisk,3)
|
||||
if ( returnWdthFromArrayCol(arraydisk,5) > flen):
|
||||
flen = returnWdthFromArrayCol(arraydisk,5)
|
||||
controllerid += 1
|
||||
|
||||
controllerid = 0
|
||||
while controllerid < controllernumber:
|
||||
arrayid = 0
|
||||
|
||||
cmd = '%s -LDInfo -lall -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraynumber = returnArrayNumber(output)
|
||||
#### BUG: -LdPdInfo shows all PD on the adapter, not just for said LD..
|
||||
#### while arrayid <= arraynumber:
|
||||
|
||||
cmd = '%s -LdPdInfo -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraydisk = returnDiskInfo(output,controllerid)
|
||||
|
||||
# Adjust print format with width computed above
|
||||
drvfmt = "%-"+str(dlen+5)+"s | %-4s | %-"+str(mlen)+"s | %-8s | %-"+str(flen)+"s | %-8s | %-4s | %-8s | %-8s"
|
||||
for array in arraydisk:
|
||||
# Header
|
||||
if ( i == 0 ):
|
||||
if not nagiosmode:
|
||||
print(drvfmt % (
|
||||
"-- ID", "Type", "Drive Model", "Size", "Status", "Speed", "Temp", "Slot ID", "LSI Device ID"))
|
||||
# Drive information
|
||||
if not nagiosmode:
|
||||
print(drvfmt % (
|
||||
str('c'+str(controllerid)+'u'+array[0]+'p'+array[1]), # c0p0
|
||||
array[2], # HDD/SDD
|
||||
array[3], # Model Information (Variable len)
|
||||
array[4], # Size
|
||||
array[5], # Status (Variable len)
|
||||
array[6], # Speed
|
||||
array[7], # Temp
|
||||
str('['+array[8]+':'+array[9]+']'), # Slot ID
|
||||
array[10])) # LSI ID
|
||||
i = i + 1
|
||||
controllerid += 1
|
||||
if not nagiosmode:
|
||||
print('')
|
||||
|
||||
controllerid = 0
|
||||
totalconfdrivenumber = 0
|
||||
totalunconfdrivenumber = 0
|
||||
totaldrivenumber = 0
|
||||
while controllerid < controllernumber:
|
||||
cmd = '%s -LdPdInfo -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
totalconfdrivenumber += returnConfDriveNumber(output)
|
||||
|
||||
cmd = '%s -PDGetNum -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
totaldrivenumber += returnTotalDriveNumber(output)
|
||||
|
||||
cmd = '%s -PDList -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
totalunconfdrivenumber += returnUnConfDriveNumber(output)
|
||||
|
||||
controllerid += 1
|
||||
|
||||
dbgprint('Total Drives in system : ' + str(totaldrivenumber))
|
||||
dbgprint('Total Configured Drives : ' + str(totalconfdrivenumber))
|
||||
dbgprint('Total Unconfigured Drives : ' + str(totalunconfdrivenumber))
|
||||
|
||||
if totalunconfdrivenumber:
|
||||
if not nagiosmode:
|
||||
print('-- Unconfigured Disk information --')
|
||||
|
||||
controllerid = 0
|
||||
while controllerid < controllernumber:
|
||||
arrayid = 0
|
||||
|
||||
cmd = '%s -LDInfo -lall -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraynumber = returnArrayNumber(output)
|
||||
#### BUG: -LdPdInfo shows all PD on the adapter, not just for given LD..
|
||||
#### while arrayid <= arraynumber:
|
||||
|
||||
cmd = '%s -PDList -a%d -NoLog' % (megaclipath, controllerid)
|
||||
output = getOutput(cmd)
|
||||
arraydisk = returnUnconfDiskInfo(output,controllerid)
|
||||
for array in arraydisk:
|
||||
dbgprint('Disk c'+str(controllerid)+'uXpY status : ' + array[3])
|
||||
if array[3] not in [ 'Online', 'Unconfigured(good), Spun Up', 'Unconfigured(good), Spun down', 'JBOD','Hotspare, Spun Up','Hotspare, Spun down' ]:
|
||||
bad = True
|
||||
nagiosbaddisk=nagiosbaddisk+1
|
||||
else:
|
||||
nagiosgooddisk=nagiosgooddisk+1
|
||||
|
||||
mlen = returnWdthFromArrayCol(arraydisk,1)
|
||||
flen = returnWdthFromArrayCol(arraydisk,3)
|
||||
|
||||
# Adjust print format with widths computed above
|
||||
drvfmt = "%-7s | %-4s | %-"+str(mlen)+"s | %-8s | %-"+str(flen+2)+"s | %-8s | %-4s | %-8s | %-8s"
|
||||
i = 0
|
||||
for array in arraydisk:
|
||||
# Header
|
||||
if ( i == 0 ):
|
||||
if not nagiosmode:
|
||||
print(drvfmt % (
|
||||
"-- ID", "Type", "Drive Model", "Size", "Status", "Speed", "Temp", "Slot ID", "LSI Device ID"))
|
||||
# Drive information
|
||||
if not nagiosmode:
|
||||
print(drvfmt % (
|
||||
str('c'+str(controllerid)+'uXpY'), # cXpY
|
||||
array[0], # HDD/SDD
|
||||
array[1], # Model Information (Variable len)
|
||||
array[2], # Size
|
||||
array[3], # Status (Variable len)
|
||||
array[4], # Speed
|
||||
array[5], # Temp
|
||||
str('['+array[6]+':'+array[7]+']'), # Slot ID
|
||||
array[8])) # LSI ID
|
||||
i = i + 1
|
||||
controllerid += 1
|
||||
if not nagiosmode:
|
||||
print('')
|
||||
|
||||
if nagiosmode:
|
||||
if bad:
|
||||
print('RAID ERROR - Arrays: OK:'+str(nagiosgoodarray)+' Bad:'+str(nagiosbadarray)+' - Disks: OK:'+str(nagiosgooddisk)+' Bad:'+str(nagiosbaddisk))
|
||||
sys.exit(2)
|
||||
else:
|
||||
print('RAID OK - Arrays: OK:'+str(nagiosgoodarray)+' Bad:'+str(nagiosbadarray)+' - Disks: OK:'+str(nagiosgooddisk)+' Bad:'+str(nagiosbaddisk))
|
||||
else:
|
||||
if bad:
|
||||
print('\nThere is at least one disk/array in a NOT OPTIMAL state.')
|
||||
sys.exit(1)
|
7
mikrotik/mikrotik-setup.sh → mikrotik-setup.sh
Executable file → Normal file
7
mikrotik/mikrotik-setup.sh → mikrotik-setup.sh
Executable file → Normal file
|
@ -16,12 +16,7 @@ conf() {
|
|||
ssh admin@${target} "$@"
|
||||
}
|
||||
copy() {
|
||||
if echo ${target} | grep -q :; then
|
||||
ltarget="[$target]"
|
||||
else
|
||||
ltarget="$target"
|
||||
fi
|
||||
scp "$1" admin@${ltarget}:
|
||||
scp "$1" admin@${target}:
|
||||
}
|
||||
|
||||
# store ssh key in the admin user!
|
|
@ -1,21 +1,13 @@
|
|||
#!/bin/sh
|
||||
#!/bin
|
||||
# Nico Schottelius, 2019-12-02
|
||||
# Update mikrotik routers to the latest package
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "$0 <version> <arch> router [router...]"
|
||||
cat <<EOF
|
||||
Version:
|
||||
- the package version as found on https://mikrotik.com/download
|
||||
Arch:
|
||||
- rb4011: arm
|
||||
- crs326: arm
|
||||
- hapac: mipsbe
|
||||
|
||||
router:
|
||||
- The hostname(s) or IP(v6) addresses of the routers you want to update
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
@ -31,22 +23,13 @@ cd "$tmp"
|
|||
wget "${url}"
|
||||
unzip "${file}"
|
||||
|
||||
pkg_list="dhcp ipv6 lcd lte multicast ppp routing security system user-manager wireless"
|
||||
|
||||
pkg_list="dhcp ipv6 lcd lte multicast ppp routing security system user-manager wireless"!
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
target=$1; shift
|
||||
|
||||
# Escape literal IPv6 addresses
|
||||
if echo $target | grep ':'; then
|
||||
target_scp="[$target]"
|
||||
else
|
||||
target_scp="$target"
|
||||
fi
|
||||
|
||||
echo "Updating ${target}"
|
||||
for pkg in $pkg_list; do
|
||||
scp ${pkg}-${version}-${arch}.npk "admin@${target_scp}:"
|
||||
scp ${pkg}-${version}-${arch}.npk "admin@${target}:"
|
||||
done
|
||||
ssh admin@${target} "/system reboot"
|
||||
done
|
|
@ -1,59 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2020-08-03
|
||||
# Setup a standard crs326
|
||||
|
||||
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "$0 <current-ip> <new-ipv6-network> <hostname-to-be-setup> <password>"
|
||||
echo "Example:"
|
||||
echo "$0 fe80::764d:28ff:fe09:9355%eth1 2a0a:e5c0:2::/64 mikrotik-crs326-8 \$(pass ...)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ip=$1; shift
|
||||
newip=$1; shift
|
||||
hostname=$1; shift
|
||||
password=$1; shift
|
||||
|
||||
target=$ip
|
||||
bridge=bridge
|
||||
|
||||
conf() {
|
||||
echo $@
|
||||
ssh admin@${target} "$@"
|
||||
}
|
||||
|
||||
commastring() {
|
||||
echo $@ | sed 's/ /,/g'
|
||||
}
|
||||
|
||||
conf "/system identity set name=$hostname"
|
||||
conf "/interface bridge add name=$bridge"
|
||||
|
||||
################################################################################
|
||||
# MTU
|
||||
|
||||
for i in $(seq 1 24); do
|
||||
conf "/interface ethernet set ether$i mtu=9200 l2mtu=9204"
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes"
|
||||
done
|
||||
|
||||
|
||||
for i in $(seq 1 2); do
|
||||
conf "/interface ethernet set sfp-sfpplus$i mtu=9200 l2mtu=9204"
|
||||
conf "/interface bridge port add bridge=$bridge interface=sfp-sfpplus$i hw=yes"
|
||||
done
|
||||
|
||||
|
||||
################################################################################
|
||||
# IPv6 address, password
|
||||
|
||||
conf "/ipv6 address add eui-64=yes advertise=no address=$newip interface=$bridge"
|
||||
conf "/ipv6 address print"
|
||||
conf "/password old-password=\"\" new-password=$password confirm-new-password=$password"
|
||||
|
||||
# Show neigh
|
||||
conf "/interface bridge host print where !local"
|
||||
|
||||
echo "do not forget to set a password"
|
|
@ -1,103 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2020-08-03
|
||||
# Setup a standard crs326
|
||||
|
||||
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "$0 <ip> <hostname-to-be-setup>"
|
||||
echo "Example:"
|
||||
echo "$0 fe80::764d:28ff:fe09:9355%eth1 mikrotik-crs326-8"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ip=$1; shift
|
||||
hostname=$1; shift
|
||||
password=$1; shift
|
||||
|
||||
target=$ip
|
||||
bridge=bridgevlans
|
||||
|
||||
internal=10
|
||||
coworking=15
|
||||
server=11
|
||||
other="8 16 18 33 34"
|
||||
|
||||
tagged="ether23 ether24 sfp-sfpplus1"
|
||||
|
||||
net_internal=2a0a:e5c0:2::/64
|
||||
|
||||
conf() {
|
||||
echo $@
|
||||
ssh admin@${target} "$@"
|
||||
}
|
||||
|
||||
commastring() {
|
||||
echo $@ | sed 's/ /,/g'
|
||||
}
|
||||
|
||||
#set -x
|
||||
|
||||
|
||||
# do this out of band -- see mikrotik-setup.sh
|
||||
#conf "/password new-password=$password confirm-new-password=$password old-password=\"\""
|
||||
|
||||
conf "/system identity set name=$hostname"
|
||||
conf "/interface bridge add name=$bridge"
|
||||
|
||||
################################################################################
|
||||
# MTU
|
||||
|
||||
for i in $(seq 1 24); do
|
||||
conf "/interface ethernet set ether$i mtu=9200 l2mtu=9204"
|
||||
done
|
||||
|
||||
|
||||
for i in $(seq 1 2); do
|
||||
conf "/interface ethernet set sfp-sfpplus$i mtu=9200 l2mtu=9204"
|
||||
done
|
||||
|
||||
|
||||
################################################################################
|
||||
# VLANs
|
||||
|
||||
# Internal ports 1-16
|
||||
ifaces=""
|
||||
for i in $(seq 1 16); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$internal"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
|
||||
# also tag the bridge for the vlan interface we need later
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged),$bridge untagged=$(commastring $ifaces) vlan-ids=$internal"
|
||||
|
||||
# Coworking 17-18
|
||||
ifaces=""
|
||||
for i in $(seq 17 18); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$coworking"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) untagged=$(commastring $ifaces) vlan-ids=$coworking"
|
||||
|
||||
# Server 19-20
|
||||
ifaces=""
|
||||
for i in $(seq 19 20); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$server"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) untagged=$(commastring $ifaces) vlan-ids=$server"
|
||||
|
||||
# Not modified 21-22
|
||||
|
||||
# Tagged 23-24, sfp-sfpplus1
|
||||
for iface in $tagged; do
|
||||
conf "/interface bridge port add bridge=$bridge interface=$iface hw=yes"
|
||||
done
|
||||
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) vlan-ids=$(commastring $other)"
|
||||
conf "/interface vlan add interface=$bridge vlan-id=$internal mtu=9200 name=internal"
|
||||
conf "/ipv6 address add eui-64=yes advertise=no address=$net_internal interface=internal"
|
||||
conf "/interface bridge set $bridge vlan-filtering=yes"
|
||||
|
||||
# Show neigh
|
||||
conf "/interface bridge host print where !local"
|
|
@ -1,103 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2020-08-03
|
||||
# Setup a standard crs326
|
||||
|
||||
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "$0 <ip> <hostname-to-be-setup>"
|
||||
echo "Example:"
|
||||
echo "$0 fe80::764d:28ff:fe09:9355%eth1 mikrotik-crs326-8 $(pass place6-linthal/mikrotik)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ip=$1; shift
|
||||
hostname=$1; shift
|
||||
password=$1; shift
|
||||
|
||||
target=$ip
|
||||
bridge=bridgevlans
|
||||
|
||||
internal=10
|
||||
coworking=15
|
||||
server=11
|
||||
other="8 16 18 33 34"
|
||||
|
||||
tagged="ether23 ether24 sfp-sfpplus1"
|
||||
|
||||
net_internal=2a0a:e5c0:2::/64
|
||||
|
||||
conf() {
|
||||
echo $@
|
||||
ssh admin@${target} "$@"
|
||||
}
|
||||
|
||||
commastring() {
|
||||
echo $@ | sed 's/ /,/g'
|
||||
}
|
||||
|
||||
set -x
|
||||
|
||||
|
||||
# do this out of band -- see mikrotik-setup.sh
|
||||
#conf "/password new-password=$password confirm-new-password=$password old-password=\"\""
|
||||
|
||||
conf "/system identity set name=$hostname"
|
||||
conf "/interface bridge add name=$bridge"
|
||||
|
||||
################################################################################
|
||||
# MTU
|
||||
|
||||
for i in $(seq 1 24); do
|
||||
conf "/interface ethernet set ether$i mtu=9200 l2mtu=9204"
|
||||
done
|
||||
|
||||
|
||||
for i in $(seq 1 2); do
|
||||
conf "/interface ethernet set sfp-sfpplus$i mtu=9200 l2mtu=9204"
|
||||
done
|
||||
|
||||
|
||||
################################################################################
|
||||
# VLANs
|
||||
|
||||
# Internal ports 1-16
|
||||
ifaces=""
|
||||
for i in $(seq 1 16); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$internal"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
|
||||
# also tag the bridge for the vlan interface we need later
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged),$bridge untagged=$(commastring $ifaces) vlan-ids=$internal"
|
||||
|
||||
# Coworking 17-18
|
||||
ifaces=""
|
||||
for i in $(seq 17 18); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$coworking"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) untagged=$(commastring $ifaces) vlan-ids=$coworking"
|
||||
|
||||
# Server 19-20
|
||||
ifaces=""
|
||||
for i in $(seq 19 20); do
|
||||
conf "/interface bridge port add bridge=$bridge interface=ether$i hw=yes pvid=$server"
|
||||
ifaces="ether$i ${ifaces}"
|
||||
done
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) untagged=$(commastring $ifaces) vlan-ids=$server"
|
||||
|
||||
# Not modified 21-22
|
||||
|
||||
# Tagged 23-24, sfp-sfpplus1
|
||||
for iface in ; do
|
||||
conf "/interface bridge port add bridge=$bridge interface=$iface hw=yes"
|
||||
done
|
||||
|
||||
conf "/interface bridge vlan add bridge=$bridge tagged=$(commastring $tagged) lan-ids=$(commastring $other)"
|
||||
conf "/interface vlan add interface=$bridge vlan-id=$internal name=MGMT"
|
||||
conf "/ipv6 address add eui-64=yes advertise=no address=$net_internal interface=MGMT"
|
||||
conf "/interface bridge set $bridge vlan-filtering=yes"
|
||||
|
||||
# Show neigh
|
||||
conf "/interface bridge host print where !local"
|
|
@ -13,32 +13,34 @@ to_monitor=$1
|
|||
set -e
|
||||
|
||||
depends="cephrundir"
|
||||
osd=""
|
||||
conf="/etc/monit/conf.d/$to_monitor"
|
||||
|
||||
daemon=$(echo $to_monitor | awk -F . '{ print $1 }')
|
||||
id=$(echo $to_monitor | awk -F . '{ print $2 }')
|
||||
if echo $to_monitor | grep ^osd; then
|
||||
depends="${depends}, ${to_monitor}-whoami"
|
||||
osd="yes"
|
||||
osdid=$(echo $to_monitor | cut -d. -f2)
|
||||
fi
|
||||
|
||||
|
||||
case "$daemon" in
|
||||
osd)
|
||||
depends="${depends}, ${to_monitor}-whoami"
|
||||
cat > "$conf" <<EOF
|
||||
check file ${to_monitor}-whoami with path /var/lib/ceph/osd/ceph-${id}/whoami
|
||||
if content != "${id}" then alert
|
||||
EOF
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
cat >> "$conf" <<EOF
|
||||
cat > "$conf" <<EOF
|
||||
# Generated by $0
|
||||
check process ${to_monitor} with pidfile /var/run/ceph/${to_monitor}.pid
|
||||
start program = "/usr/bin/ceph-${daemon} -i ${id} --pid-file /var/run/ceph/${daemon}.${id}.pid -c /etc/ceph/ceph.conf --cluster ceph --setuser ceph --setgroup ceph" with timeout 3600 seconds
|
||||
stop program = "/usr/bin/pkill -f '/usr/bin/ceph-${daemon} -i ${id}'"
|
||||
start program = "/etc/init.d/ceph start ${to_monitor}" with timeout 60 seconds
|
||||
stop program = "/etc/init.d/ceph stop ${to_monitor}"
|
||||
|
||||
group ceph
|
||||
depends on $depends
|
||||
EOF
|
||||
|
||||
if [ "$osd" ]; then
|
||||
cat >> "$conf" <<EOF
|
||||
check file ${to_monitor}-whoami with path /var/lib/ceph/osd/ceph-${osdid}/whoami
|
||||
if content != "${osdid}" then alert
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
/etc/init.d/monit restart
|
||||
# monit reload
|
||||
sleep 1
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
for c in "$@"; do
|
||||
echo $c
|
||||
start=$(grep "start program" $c | sed -e 's/.*start program = "//' -e 's/".*//')
|
||||
echo $start
|
||||
( $start & )
|
||||
done
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
http --json GET "http://${hostname}/report"
|
||||
curl -s --location \
|
||||
--request GET \
|
||||
"http://${hostname}/relay?state=0"
|
||||
http --json GET "http://${hostname}/report"
|
||||
echo "Waiting..."
|
||||
sleep 10
|
||||
curl -s --location \
|
||||
--request GET \
|
||||
"http://${hostname}/relay?state=1"
|
||||
http --json GET "http://${hostname}/report"
|
||||
|
||||
done
|
|
@ -1,17 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
curl -s --location \
|
||||
--request GET \
|
||||
"http://${hostname}/relay?state=0"
|
||||
|
||||
done
|
|
@ -1,16 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
curl -s --location \
|
||||
--request GET \
|
||||
"http://${hostname}/relay?state=1"
|
||||
done
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
http --json GET "http://${hostname}/report"
|
||||
|
||||
done
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
http --json "http://${hostname}/temp"
|
||||
|
||||
done
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
# 2020-12-07, Nico Schottelius
|
||||
|
||||
set -x
|
||||
exec >>/tmp/foo
|
||||
exec 2>&1
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "$0 hostname [hostname...]"
|
||||
echo " hostname: which mystrom to connect to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
while [ $# -ge 1 ]; do
|
||||
hostname=$1; shift
|
||||
|
||||
http --json GET "http://${hostname}/toggle"
|
||||
done
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# This script extract VM IDs and filter them if a pattern is provided as first
|
||||
# argument.
|
||||
|
||||
set -e
|
||||
|
||||
# Extract instances from ONE.
|
||||
instances=$(onevm list --csv | tail -n +2)
|
||||
|
||||
# Filter them is a pattern has been provided.
|
||||
if [ "$1" != "" ]; then
|
||||
filtered_instances="$(echo "$instances" | grep -E "$1")"
|
||||
instances="$filtered_instances"
|
||||
fi
|
||||
|
||||
# Outputs instance IDs.
|
||||
echo "$instances" | cut -d ',' -f 1 -
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# This script is expected to run on the ONE server (i.e.
|
||||
# opennebula.ungleich.ch).
|
||||
|
||||
set -e
|
||||
|
||||
# Fetch instance list from STDIN.
|
||||
instances=$(cat -)
|
||||
|
||||
# For every instance, extract relevant information:
|
||||
for id in $instances; do
|
||||
nics_raw="$(onevm show --xml $id | xml_grep 'NIC')"
|
||||
networks="$(echo $nics_raw | xml_grep --text_only 'NETWORK' | tr '\n' ',' | sed 's/,$//')"
|
||||
ip="$(echo $nics_raw | xml_grep --text_only 'IP' | tr '\n' ',' | sed 's/,$//')"
|
||||
ip6="$(echo $nics_raw | xml_grep --text_only 'IP6_GLOBAL' | tr '\n' ',' | sed 's/,$//')"
|
||||
echo "$id,$networks,$ip,$ip6"
|
||||
done
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
vpndir=/home/nico/vcs/ungleich-dot-cdist/type/__ungleich_wireguard/files
|
||||
|
||||
for i in $(ls -1 viirb*public_key); do
|
||||
viirb=${i%.public_key};
|
||||
num=${viirb#viirb};
|
||||
hex=$(printf "%0.2x" $num);
|
||||
pubkey=$(cat $i);
|
||||
network="2a0a:e5c1:3${hex}::/48";
|
||||
peerfilename=vpn-2a0ae5c1300.ungleich.ch.peer${hex}
|
||||
|
||||
peerfile=${vpndir}/${peerfilename}
|
||||
if [ ! -f "${peerfile}" ]; then
|
||||
echo "VIIRB $num / peer $hex missing, recreating"
|
||||
cat <<EOF > "${peerfile}"
|
||||
# viirb${num}, $(date +%F)
|
||||
[Peer]
|
||||
PublicKey = $pubkey
|
||||
AllowedIPs = ${network}
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
||||
done
|
|
@ -1,173 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates almalinux images for OpenNebula.
|
||||
|
||||
# Depends on the following packages (as of Almalinux 8.3):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=9.3
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=almalinux-$RELEASE-$(date --iso-8601).img
|
||||
IMAGE_SIZE=10G
|
||||
LOOPBACK_DEVICE=/dev/loop0
|
||||
|
||||
# since v9.3, there is an issue is the compilation of RHEL 9 and it's derivatives to use the x86-64-v2 instruction set.
|
||||
# refer to Task#12351, change cpu type
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v6.6.1/one-context-6.6.1-1.el8.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
losetup -d "$LOOPBACK_DEVICE"
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/almalinux-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a AlmaLinux system." >&2
|
||||
echo "WARNING: Not running AlmaLinux. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base RAW image (no LOOPBACK support in RHEL/AlmaLinux).
|
||||
qemu-img create -f raw "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
losetup "$LOOPBACK_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
{
|
||||
sfdisk --no-reread "$LOOPBACK_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
} || true
|
||||
|
||||
partprobe "$LOOPBACK_DEVICE"
|
||||
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p1"
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${LOOPBACK_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${LOOPBACK_DEVICE}p1" /mnt/boot
|
||||
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=baseos \
|
||||
--enablerepo=appstream \
|
||||
--enablerepo=extras \
|
||||
--setopt=install_weak_deps=False install \
|
||||
bash basesystem systemd systemd-udev dnf almalinux-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details.
|
||||
# network-scripts.x86_64 : Legacy scripts for manipulating of network devices
|
||||
#run_root dnf -y install network-scripts
|
||||
run_root dnf -y install NetworkManager
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
|
||||
# Install and configure NTP client.
|
||||
run_root dnf install -y chrony
|
||||
run_root systemctl enable chronyd.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
run_root grub2-install --target=i386-pc "${LOOPBACK_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,184 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Alpine images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
RELEASE=v3.20
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=alpine-$RELEASE-$(date -I).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
APK_MIRROR=http://dl-2.alpinelinux.org/alpine/ # Mind the trailing /
|
||||
|
||||
ONE_CONTEXT_APK_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v6.6.1/one-context-6.6.1-r1.apk"
|
||||
ONE_CONTEXT_APK_PATH=/root/one-context.apk
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(lsb_release --short --id)" != "Alpine" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Alpine system." >&2
|
||||
echo "WARNING: Not running Alpine. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
|
||||
# TODO: Remove bash
|
||||
apk add -U -X $APK_MIRROR$RELEASE/main/ \
|
||||
--allow-untrusted \
|
||||
--arch="$ARCH" \
|
||||
--root=/mnt \
|
||||
--initdb \
|
||||
alpine-base alpine-conf openssh tzdata gnupg haveged bash eudev
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize networking.
|
||||
run_root setup-interfaces -i << EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
EOF
|
||||
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
run_root setup-timezone -z UTC
|
||||
if [ "$RELEASE" = "edge" ]
|
||||
then
|
||||
cat >/mnt/etc/apk/repositories <<EOF
|
||||
$APK_MIRROR$RELEASE/main
|
||||
$APK_MIRROR$RELEASE/community
|
||||
$APK_MIRROR$RELEASE/testing
|
||||
EOF
|
||||
else
|
||||
cat >/mnt/etc/apk/repositories <<EOF
|
||||
$APK_MIRROR$RELEASE/main
|
||||
$APK_MIRROR$RELEASE/community
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Update package index.
|
||||
run_root apk update
|
||||
|
||||
# Initialize base services.
|
||||
for i in devfs dmesg hwdrivers mdev; do
|
||||
run_root rc-update add $i sysinit
|
||||
done
|
||||
|
||||
for i in bootmisc hostname hwclock modules sysctl syslog acpid networking haveged; do
|
||||
run_root rc-update add $i boot
|
||||
done
|
||||
|
||||
for i in ntpd sshd crond; do
|
||||
run_root rc-update add $i default
|
||||
done
|
||||
|
||||
for i in mount-ro killprocs savecache; do
|
||||
run_root rc-update add $i shutdown
|
||||
done
|
||||
|
||||
# Set hostname.
|
||||
run_root setup-hostname -n alpine
|
||||
|
||||
#debug
|
||||
blkid
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
#debug
|
||||
echo $boot_uuid
|
||||
echo $root_uuid
|
||||
cat /mnt/etc/fstab
|
||||
run_root cat /etc/fstab
|
||||
|
||||
# Install kernel and bootloader.
|
||||
run_root apk add linux-virt grub grub-bios
|
||||
echo 'GRUB_CMDLINE_LINUX_DEFAULT="quiet rootfstype=ext4"' >> /mnt/etc/default/grub
|
||||
cat /mnt/etc/default/grub
|
||||
run_root grub-install --target=i386-pc $NBD_DEVICE
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
#debug
|
||||
run_root cat /etc/default/grub
|
||||
run_root cat /etc/fstab
|
||||
|
||||
# Install one-context APK and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_APK_URL" > "/mnt$ONE_CONTEXT_APK_PATH"
|
||||
run_root apk add --allow-untrusted "$ONE_CONTEXT_APK_PATH"
|
||||
run_root rm "$ONE_CONTEXT_APK_PATH"
|
||||
|
||||
# Remove resolvconf: handled by uncloud-init.
|
||||
run_root rm /etc/resolv.conf
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,169 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Debian images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
IMAGE_PATH=arch-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
|
||||
ONE_CONTEXT_VERSION=6.8.1
|
||||
ONE_CONTEXT_SOURCE_ARCHIVE="https://github.com/OpenNebula/one-apps/archive/refs/tags/v${ONE_CONTEXT_VERSION:?}.tar.gz"
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(lsb_release --short --id)" != "Arch" ]; then
|
||||
echo "WARNING: this script has been designed to run on Arch Linux." >&2
|
||||
echo "WARNING: Not running Arch. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
# Install base system.
|
||||
pacstrap /mnt base base-devel openssh
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat > /mnt/etc/pacman.d/mirrorlist << EOF
|
||||
##
|
||||
## Arch Linux repository mirrorlist
|
||||
## Generated on 2024-03-07
|
||||
##
|
||||
|
||||
## Switzerland
|
||||
Server = http://pkg.adfinis.com/archlinux/\$repo/os/\$arch
|
||||
Server = https://pkg.adfinis.com/archlinux/\$repo/os/\$arch
|
||||
Server = http://mirror.init7.net/archlinux/\$repo/os/\$arch
|
||||
Server = https://mirror.init7.net/archlinux/\$repo/os/\$arch
|
||||
Server = http://mirror.metanet.ch/archlinux/\$repo/os/\$arch
|
||||
Server = https://mirror.metanet.ch/archlinux/\$repo/os/\$arch
|
||||
Server = http://mirror.puzzle.ch/archlinux/\$repo/os/\$arch
|
||||
Server = https://mirror.puzzle.ch/archlinux/\$repo/os/\$arch
|
||||
Server = https://mirror.ungleich.ch/mirror/packages/archlinux/\$repo/os/\$arch
|
||||
EOF
|
||||
run_root pacman -Syu --noconfirm
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=${boot_uuid:?} /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=${root_uuid:?} / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/one-apps for details.
|
||||
run_root pacman -Sy curl tar rsync --noconfirm
|
||||
run_root curl -L "$ONE_CONTEXT_SOURCE_ARCHIVE" -o one-context.tar.gz
|
||||
run_root tar xf one-context.tar.gz
|
||||
run_root rsync -ravh "one-apps-${ONE_CONTEXT_VERSION:?}/context-linux/src/"* /
|
||||
run_root rm -r "one-apps-${ONE_CONTEXT_VERSION:?}"
|
||||
run_root cp -r /usr/lib/systemd/system/one-context.service##arch.one /usr/lib/systemd/system/one-context.service
|
||||
run_root systemctl enable one-context.service
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
# Install kernel and generate initramfs.
|
||||
run_root ln -s /usr/lib/modules /lib/modules
|
||||
run_root pacman -Sy mkinitcpio linux linux-firmware --noconfirm
|
||||
sed -i '/MODULES=/c\MODULES=(virtio virtio_blk virtio_pci virtio_net)' /mnt/etc/mkinitcpio.conf
|
||||
run_root mkinitcpio -p linux
|
||||
|
||||
# Install and configure bootloader.
|
||||
run_root pacman -Sy grub --noconfirm
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install and configure a SSH daemon.
|
||||
run_root pacman -Sy openssh netctl --noconfirm
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root pacman -Sy haveged --noconfirm
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Make sure core services are enabled.
|
||||
run_root systemctl enable systemd-networkd
|
||||
run_root systemctl enable systemd-resolved
|
||||
run_root systemctl enable systemd-timesyncd
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
echo "arch" > /mnt/etc/hostname
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,170 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates CentOS images for OpenNebula.
|
||||
|
||||
# Depends on the following packages (as of CentOS 8):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=8
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=centos-$RELEASE-$(date --iso-8601).img
|
||||
IMAGE_SIZE=10G
|
||||
LOOPBACK_DEVICE=/dev/loop0
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el8.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
losetup -d "$LOOPBACK_DEVICE"
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/centos-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a CentOS system." >&2
|
||||
echo "WARNING: Not running CentOS. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base RAW image (no LOOPBACK support in RHEL/CentOS).
|
||||
qemu-img create -f raw "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
losetup "$LOOPBACK_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
{
|
||||
sfdisk --no-reread "$LOOPBACK_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
} || true
|
||||
|
||||
partprobe "$LOOPBACK_DEVICE"
|
||||
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p1"
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${LOOPBACK_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${LOOPBACK_DEVICE}p1" /mnt/boot
|
||||
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=BaseOS \
|
||||
--enablerepo=AppStream \
|
||||
--enablerepo=extras \
|
||||
--setopt=install_weak_deps=False install \
|
||||
bash basesystem systemd systemd-udev dnf centos-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# See https://github.com/OpenNebula/addon-context-linux/issues/121 for details.
|
||||
# network-scripts.x86_64 : Legacy scripts for manipulating of network devices
|
||||
run_root dnf -y install network-scripts
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
|
||||
# Install and configure NTP client.
|
||||
run_root dnf install -y chrony
|
||||
run_root systemctl enable chronyd.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
run_root grub2-install --target=i386-pc "${LOOPBACK_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${LOOPBACK_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,186 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates CentOS images for OpenNebula. Expected to run on CentOS 7.
|
||||
|
||||
# Depends on the following packages:
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs cryptsetup parted
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=raw
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
RELEASE=7
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=centos-luks-$RELEASE-$(date --iso-8601).img
|
||||
IMAGE_SIZE=10G
|
||||
LOOPBACK_DEVICE=/dev/loop0
|
||||
LUKS_DEVICE_NAME=cryptroot
|
||||
LUKS_DEVICE="/dev/mapper/$LUKS_DEVICE_NAME"
|
||||
DISABLED_ONE_SCRIPTS="loc-20-set-username-password loc-22-ssh_public_key"
|
||||
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.10.0/one-context-5.10.0-1.el$RELEASE.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
# Get LUKS passphrase.
|
||||
if [ -z "$1" ]; then
|
||||
echo "Usage: centos7-build-luks-opennebula-image.sh LUKS_PASSPHRASE"
|
||||
exit 1
|
||||
fi
|
||||
LUKS_PASSPHRASE="$1"
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
losetup -d "$LOOPBACK_DEVICE"
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/centos-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a CentOS system." >&2
|
||||
echo "WARNING: Not running CentOS. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Install requirements
|
||||
yum install -y qemu cryptsetup dnf
|
||||
|
||||
# Create base RAW image (no LOOPBACK support in RHEL/CentOS).
|
||||
qemu-img create -f raw "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
losetup "$LOOPBACK_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
parted --script "$LOOPBACK_DEVICE" \
|
||||
mklabel msdos \
|
||||
mkpart primary ext4 1M 500M \
|
||||
mkpart primary ext4 500M 100%
|
||||
|
||||
partprobe "$LOOPBACK_DEVICE"
|
||||
|
||||
mkfs.ext4 "${LOOPBACK_DEVICE}p1"
|
||||
echo -n "$LUKS_PASSPHRASE" | cryptsetup luksFormat -v -d - "${LOOPBACK_DEVICE}p2"
|
||||
echo -n "$LUKS_PASSPHRASE" | cryptsetup open -v -d - "${LOOPBACK_DEVICE}p2" "$LUKS_DEVICE_NAME"
|
||||
mkfs.ext4 "$LUKS_DEVICE"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${LUKS_DEVICE}" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${LOOPBACK_DEVICE}p1" /mnt/boot
|
||||
|
||||
# Add --setopt=reposdir=rpm-repositories if you do not run on CentOS 7.
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=base \
|
||||
--enablerepo=extras \
|
||||
--setopt=install_weak_deps=False install \
|
||||
bash basesystem systemd dnf centos-release cryptsetup dnf passwd
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
EOF
|
||||
|
||||
# Setup root password
|
||||
run_root passwd
|
||||
|
||||
# Install one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
for script in $DISABLED_ONE_SCRIPTS; do
|
||||
run_root rm "/etc/one-context.d/$script"
|
||||
done
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
|
||||
# Install and configure NTP client.
|
||||
run_root dnf install -y chrony
|
||||
run_root systemctl enable chronyd.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time, configure bootloader.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
luks_uuid=$(blkid -o value "${LOOPBACK_DEVICE}p2" | head -n 1)
|
||||
echo "cryptroot UUID=$luks_uuid luks,timeout=30" >> /mnt/etc/crypttab
|
||||
|
||||
run_root dracut -v --force --kver $kernel_version
|
||||
run_root grub2-install --target=i386-pc "${LOOPBACK_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid -o value "${LOOPBACK_DEVICE}p1" | head -n 1)
|
||||
root_uuid=$(blkid -o value "$LUKS_DEVICE" | head -n 1)
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
||||
|
||||
# Cleanup!
|
||||
cleanup
|
|
@ -1,171 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Debian images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=bookworm # 12.X
|
||||
ARCH=amd64
|
||||
IMAGE_PATH=debian-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
HOSTNAME=debian
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_DEB_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v6.6.1/one-context_6.6.1-1.deb"
|
||||
ONE_CONTEXT_DEB_PATH=/root/one-context.deb
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(lsb_release --short --id) != "Debian" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Debian system." >&2
|
||||
echo "WARNING: Not running Debian. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
debootstrap \
|
||||
--arch=$ARCH $RELEASE \
|
||||
/mnt http://ftp.ch.debian.org/debian
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 $HOSTNAME localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 $HOSTNAME localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
run_root hostnamectl set-hostname $HOSTNAME
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat >/mnt/etc/apt/sources.list <<EOF
|
||||
# Stable
|
||||
deb http://ftp.ch.debian.org/debian $RELEASE main contrib non-free
|
||||
deb-src http://ftp.ch.debian.org/debian $RELEASE main contrib non-free
|
||||
|
||||
# Security updates
|
||||
deb http://ftp.ch.debian.org/debian $RELEASE-updates main contrib non-free
|
||||
deb-src http://ftp.ch.debian.org/debian $RELEASE-updates main contrib non-free
|
||||
|
||||
# Backports
|
||||
#deb http://ftp.ch.debian.org/debian $RELEASE-backports main
|
||||
#deb-src http://ftp.ch.debian.org/debian $RELEASE-backports main
|
||||
EOF
|
||||
run_root apt-get update
|
||||
|
||||
# Install (magic?) one-context DEB and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_DEB_URL" > "/mnt$ONE_CONTEXT_DEB_PATH"
|
||||
run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH"
|
||||
run_root rm "$ONE_CONTEXT_DEB_PATH"
|
||||
|
||||
# Manually install legacy network scripts used by one-context.
|
||||
run_root apt-get -y install ifupdown systemd-timesyncd.service
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install kernel and bootloader. Do not autoconfigure grub.
|
||||
run_root 'echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections'
|
||||
run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-image-amd64 grub-pc
|
||||
|
||||
# Configure grub.
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root apt-get -y install openssh-server
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root apt-get -y install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Generate locales.
|
||||
run_root 'sed -i "s/^# *\(en_GB.UTF-8\)/\1/" etc/locale.gen'
|
||||
run_root locale-gen
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
run_root apt-get clean
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,162 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Devuan images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=daedalus
|
||||
ARCH=amd64
|
||||
IMAGE_PATH=devuan-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd4
|
||||
HOSTNAME=devuan
|
||||
MIRROR=http://pkgmaster.devuan.org/merged
|
||||
|
||||
ONE_CONTEXT_DEB_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v6.6.1/one-context_6.6.1-1.deb"
|
||||
ONE_CONTEXT_DEB_PATH=/root/one-context.deb
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(lsb_release --short --id) != "Devuan" ]; then
|
||||
echo "WARNING: this script has been designed to run on a Devuan system." >&2
|
||||
echo "WARNING: Not running Debian. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
debootstrap \
|
||||
--arch=$ARCH $RELEASE \
|
||||
/mnt $MIRROR
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 $HOSTNAME localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 $HOSTNAME localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
run_root hostname $HOSTNAME
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat >/mnt/etc/apt/sources.list <<EOF
|
||||
# Stable
|
||||
deb $MIRROR $RELEASE main contrib non-free
|
||||
deb-src $MIRROR $RELEASE main contrib non-free
|
||||
|
||||
# Security updates
|
||||
deb $MIRROR $RELEASE-updates main contrib non-free
|
||||
deb-src $MIRROR $RELEASE-updates main contrib non-free
|
||||
|
||||
# Backports
|
||||
#deb $MIRROR $RELEASE-backports main
|
||||
#deb-src $MIRROR $RELEASE-backports main
|
||||
EOF
|
||||
run_root apt-get update
|
||||
|
||||
# Install (magic?) one-context DEB and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_DEB_URL" > "/mnt$ONE_CONTEXT_DEB_PATH"
|
||||
run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH"
|
||||
run_root rm "$ONE_CONTEXT_DEB_PATH"
|
||||
|
||||
# Manually install legacy network scripts used by one-context.
|
||||
run_root apt-get -y install ifupdown
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
|
||||
# Install kernel and bootloader. Do not autoconfigure grub.
|
||||
run_root 'echo "grub-pc grub-pc/install_devices_empty boolean true" | debconf-set-selections'
|
||||
run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-image-amd64 grub-pc
|
||||
|
||||
# Configure grub.
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root apt-get -y install openssh-server
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root apt-get -y install haveged
|
||||
run_root update-rc.d haveged defaults
|
||||
|
||||
# Generate locales.
|
||||
run_root 'sed -i "s/^# *\(en_GB.UTF-8\)/\1/" etc/locale.gen'
|
||||
run_root locale-gen
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
run_root apt-get clean
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,223 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates FreeBSD images for OpenNebula, being heavily inspired
|
||||
# from srht's FreeBSD build image definition. It assumes running on a FreeBSD host.
|
||||
# ZFS installation as documented by the FreeBSD project
|
||||
# https://wiki.freebsd.org/RootOnZFS/GPTZFSBoot
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=14.0-RELEASE
|
||||
ARCH=amd64
|
||||
IMAGE_PATH_ZFS="freebsd-zfs-$RELEASE-$(date -I).img.qcow2"
|
||||
IMAGE_PATH_UFS="freebsd-ufs-$RELEASE-$(date -I).img.qcow2"
|
||||
IMAGE_SIZE=10G
|
||||
|
||||
# Comment out to simply use latest version
|
||||
# Hash checking is disabled when specifying this
|
||||
#CLOUDSETUP_VERSION=1.2
|
||||
|
||||
DIST_BASE="https://download.freebsd.org/ftp/releases/$ARCH/$RELEASE"
|
||||
ZPOOL=zroot
|
||||
ZPOOL_TMP="zinstalling"
|
||||
|
||||
ZFSTARGET="$(mktemp -d /var/tmp/zfsbuild.XXXXX)"
|
||||
UFSTARGET="$(mktemp -d /var/tmp/ufsbuild.XXXXX)"
|
||||
|
||||
if zpool list -Ho name "$ZPOOL_TMP" 2>/dev/null; then
|
||||
echo "The pool $ZPOOL_TMP is already imported." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup() {
|
||||
sync ||:
|
||||
umount "$UFSTARGET/dev" ||:
|
||||
umount "$UFSTARGET/tmp" ||:
|
||||
umount "$UFSTARGET/var/tmp" ||:
|
||||
umount "$UFSTARGET" ||:
|
||||
zpool export "$ZPOOL_TMP" ||:
|
||||
mdconfig -du md0 ||:
|
||||
mdconfig -du md1 ||:
|
||||
rm -rf "$CLOUDSETUP_WORK" ||:
|
||||
rmdir "$ZFSTARGET" ||:
|
||||
rmdir "$UFSTARGET" ||:
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v rsync >/dev/null
|
||||
then
|
||||
env ASSUME_ALWAYS_YES=YES pkg install -y rsync
|
||||
fi
|
||||
if ! command -v qemu-img >/dev/null
|
||||
then
|
||||
env ASSUME_ALWAYS_YES=YES pkg install -y qemu-tools
|
||||
fi
|
||||
|
||||
portsnap fetch
|
||||
if [ -f /usr/ports/README ]
|
||||
then
|
||||
portsnap update || portsnap extract
|
||||
else
|
||||
portsnap extract
|
||||
fi
|
||||
|
||||
if [ -n "$CLOUDSETUP_VERSION" ]
|
||||
then
|
||||
sed -i .bak -e '/^PORTVERSION=/ s/[0-9]*\.[0-9]*/'"$CLOUDSETUP_VERSION/" /usr/ports/sysutils/firstboot-cloudsetup/Makefile
|
||||
make -C /usr/ports/sysutils/firstboot-cloudsetup makesum
|
||||
fi
|
||||
make -C /usr/ports/sysutils/firstboot-cloudsetup clean package
|
||||
CLOUDSETUP_VERSION="$(fgrep VERSION /usr/ports/sysutils/firstboot-cloudsetup/Makefile | cut -f2- | tr -d \\t)"
|
||||
CLOUDSETUP_PKG="/usr/ports/sysutils/firstboot-cloudsetup/work/pkg/firstboot-cloudsetup-${CLOUDSETUP_VERSION}.pkg"
|
||||
tar -tzf "$CLOUDSETUP_PKG" >/dev/null # check that it's a valid tar, or we crash due to set -e
|
||||
# tar -t lists the contents of a tar file, but does not extract
|
||||
|
||||
make -C /usr/ports/sysutils/firstboot-freebsd-update clean package
|
||||
FBUPDATE_VERSION="$(fgrep VERSION /usr/ports/sysutils/firstboot-freebsd-update/Makefile | cut -f2- | tr -d \\t)"
|
||||
FBUPDATE_PKG="/usr/ports/sysutils/firstboot-freebsd-update/work/pkg/firstboot-freebsd-update-${FBUPDATE_VERSION}.pkg"
|
||||
tar -tzf "$FBUPDATE_PKG" >/dev/null # check that it's a valid tar, or we crash due to set -e
|
||||
|
||||
ufsdisk="$(mktemp /var/tmp/ufsdisk.XXXXX)"
|
||||
truncate -s 6G "$ufsdisk"
|
||||
mdconfig -a -t vnode -f "$ufsdisk" -u md1
|
||||
gpart create -s gpt /dev/md1
|
||||
#gpart add -t efi -l efiboot0 -s 260M md1
|
||||
gpart add -t freebsd-boot -l gptboot -b 40 -s 512K md1
|
||||
gpart bootcode -b /boot/pmbr -p /boot/gptboot -i 1 md1
|
||||
gpart add -t freebsd-ufs -l rootfs -b 1M -s 5G md1
|
||||
newfs -U /dev/md1p2
|
||||
|
||||
# Mount allocated image.
|
||||
mount /dev/md1p2 "$UFSTARGET"
|
||||
|
||||
# Allocate and partition/format disk image.
|
||||
# We use "legacy boot", aka BIOS boot
|
||||
# Preferably, we'd use EFI boot here, check the FreeBSD wiki link in the header
|
||||
# to see how to make that change, but make the EFI partition larger
|
||||
zfsdisk="$(mktemp /var/tmp/zfsdisk.XXXXX)"
|
||||
truncate -s 6G "$zfsdisk"
|
||||
mdconfig -a -t vnode -f "$zfsdisk" -u md0
|
||||
gpart create -s gpt /dev/md0
|
||||
#gpart add -t efi -l efiboot0 -s 260M md1
|
||||
gpart add -t freebsd-boot -l gptboot0 -b 40 -s 512K md0
|
||||
gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 md0
|
||||
gpart add -t freebsd-zfs -l zfs0 -b 1M -s 5G md0
|
||||
zpool create -O compression=on -o ashift=12 -o "altroot=$ZFSTARGET" -m none -t "$ZPOOL_TMP" "$ZPOOL" md0p2
|
||||
|
||||
zfs create -o mountpoint=none "$ZPOOL_TMP/ROOT"
|
||||
# We set zstd-19 so our image will become smaller, at the cost of a longer build time.
|
||||
# At the end of the process, we disable zstd-19 again using zfs inherit compression,
|
||||
# but all files already written will remain zstd-19 compressed
|
||||
zfs create -o mountpoint=/ -o canmount=noauto "$ZPOOL_TMP/ROOT/default"
|
||||
mount -t zfs "$ZPOOL_TMP/ROOT/default" "$ZFSTARGET"
|
||||
zpool set "bootfs=$ZPOOL_TMP/ROOT/default" "$ZPOOL_TMP"
|
||||
|
||||
zfs create -o mountpoint=/tmp -o exec=on -o setuid=off "$ZPOOL_TMP/tmp"
|
||||
zfs create -o canmount=off -o mountpoint=/usr "$ZPOOL_TMP/usr"
|
||||
zfs create "$ZPOOL_TMP/usr/home"
|
||||
zfs create -o exec=off -o setuid=off "$ZPOOL_TMP/usr/src"
|
||||
zfs create -o mountpoint=/usr/ports -o setuid=off "$ZPOOL_TMP/usr/ports"
|
||||
zfs create -o canmount=off -o mountpoint=/var "$ZPOOL_TMP/var"
|
||||
zfs create -o exec=off -o setuid=off "$ZPOOL_TMP/var/audit"
|
||||
zfs create -o exec=off -o setuid=off "$ZPOOL_TMP/var/crash"
|
||||
zfs create -o exec=off -o setuid=off "$ZPOOL_TMP/var/log"
|
||||
zfs create -o atime=on -o exec=off -o setuid=off "$ZPOOL_TMP/var/mail"
|
||||
zfs create -o exec=on -o setuid=off "$ZPOOL_TMP/var/tmp"
|
||||
|
||||
ln -s /usr/home "$ZFSTARGET/home"
|
||||
chmod 1777 "$ZFSTARGET/var/tmp"
|
||||
chmod 1777 "$ZFSTARGET/tmp"
|
||||
|
||||
# Download and extract base system.
|
||||
dist_files="kernel.txz base.txz"
|
||||
dist_dir="/usr/freebsd-dist/$ARCH/$RELEASE"
|
||||
|
||||
mkdir -p "$dist_dir"
|
||||
for f in $dist_files
|
||||
do
|
||||
fetch -m -o "$dist_dir/$f" "$DIST_BASE/$f"
|
||||
tar -C "$UFSTARGET" -xJf "$dist_dir/$f"
|
||||
done
|
||||
|
||||
# Mount dev and tmp in chroot
|
||||
mount -t devfs devfs "$UFSTARGET/dev"
|
||||
mount_nullfs /tmp "$UFSTARGET/tmp"
|
||||
mount_nullfs /var/tmp "$UFSTARGET/var/tmp"
|
||||
|
||||
# Install the first-boot script that configures the network and ssh key
|
||||
# We must use --rootdir and not --chroot, because the file is read from within the chroot
|
||||
# --automatic means that the package is considered to be installed "automatically",
|
||||
# aka as a dependency of something, so pkg autoremove will remove it.
|
||||
# We do not run pkg autoremove ourselves, that's up to the administrator.
|
||||
pkg --rootdir "$UFSTARGET" add --automatic "$CLOUDSETUP_PKG" "$FBUPDATE_PKG"
|
||||
|
||||
# Configure new system.
|
||||
touch "$UFSTARGET/firstboot"
|
||||
sysrc -f "$UFSTARGET/boot/loader.conf" \
|
||||
zfs_load="YES" \
|
||||
autoboot_delay="-1" \
|
||||
|
||||
sysrc -f "$UFSTARGET/etc/rc.conf" \
|
||||
zfs_enable="YES" \
|
||||
ntpd_enable="YES" \
|
||||
sshd_enable="YES" \
|
||||
growfs_enable="YES" \
|
||||
hostname="freebsd" \
|
||||
firstboot_cloudsetup_enable="YES" \
|
||||
firstboot_freebsd_update_enable="YES" \
|
||||
|
||||
# The resolv.conf file is written by firstboot_cloudsetup
|
||||
#cp /etc/resolv.conf "$UFSTARGET/etc/resolv.conf"
|
||||
|
||||
tzsetup -s -C "$UFSTARGET" UTC
|
||||
|
||||
# Add PermitRootLogin without-password, unless PermitRootLogin yes was already set
|
||||
sed -i .orig -e '/^#PermitRootLogin[[:blank:]]/a\
|
||||
PermitRootLogin without-password
|
||||
' -e '/^PermitRootLogin[[:blank:]]*no/ s/\([[:blank:]]\).*$/\1without-password/' \
|
||||
"$UFSTARGET/etc/ssh/sshd_config"
|
||||
if ! grep -Eq '^PermitRootLogin (without-password|yes)' "$UFSTARGET/etc/ssh/sshd_config"
|
||||
then
|
||||
cat >>"$UFSTARGET/etc/ssh/sshd_config" <<EOF
|
||||
|
||||
# Added by Ungleich
|
||||
PermitRootLogin without-password
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Set zstd-19 compression, copy all data to the pool, and then set compression to default again
|
||||
# This will make the base image smaller, at the cost of taking longer to generate, as zstd-19 is slow to write
|
||||
# Therefore, afterwards we restore compression to default, so written files stay zstd-19, which is fast to read,
|
||||
# but files written by the user afterwards will be written with the default compression algorihtm.
|
||||
zfs set compression=zstd-19 "$ZPOOL_TMP/ROOT/default"
|
||||
umount "$UFSTARGET/dev" "$UFSTARGET/tmp" "$UFSTARGET/var/tmp"
|
||||
rsync -aH --fileflags --inplace "$UFSTARGET/." "$ZFSTARGET"
|
||||
|
||||
sysrc -f "$UFSTARGET/boot/loader.conf" -x zfs_load
|
||||
sysrc -f "$UFSTARGET/etc/rc.conf" -x zfs_enable
|
||||
printf '# Device\tMountpoint\tFStype\tOptions\t\tDump\tPass#\n' \
|
||||
>"$ZFSTARGET/etc/fstab"
|
||||
printf '# Device\tMountpoint\tFStype\tOptions\t\tDump\tPass#\n%s\t%s\t\t%s\t%s\t%s\t%s\n' \
|
||||
/dev/gpt/rootfs / ufs rw,noatime 1 1 \
|
||||
>"$UFSTARGET/etc/fstab"
|
||||
sync ||:
|
||||
zfs inherit compression "$ZPOOL_TMP/ROOT/default"
|
||||
|
||||
trap : EXIT
|
||||
cleanup
|
||||
|
||||
mkdir -p "$ARCH"
|
||||
qemu-img convert -f raw -O qcow2 "$zfsdisk" "$ARCH/$IMAGE_PATH_ZFS"
|
||||
qemu-img convert -f raw -O qcow2 "$ufsdisk" "$ARCH/$IMAGE_PATH_UFS"
|
||||
rm "$zfsdisk" "$ufsdisk"
|
||||
|
||||
# Filesystem will be enlarged by growfs(7) on next startup
|
||||
qemu-img resize "$ARCH/$IMAGE_PATH_ZFS" "$IMAGE_SIZE"
|
||||
qemu-img resize "$ARCH/$IMAGE_PATH_UFS" "$IMAGE_SIZE"
|
|
@ -1,144 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates OpenBSD images for OpenNebula, being inspired from
|
||||
# srht's OpenBSD build image definition. It assumes running on an OpenBSD host.
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=7.5
|
||||
ARCH=amd64
|
||||
IMAGE_PATH="$(pwd)/openbsd-$RELEASE-$(date +"%Y-%m-%d").img"
|
||||
IMAGE_SIZE=10G
|
||||
VIRTUAL_DEVICE=vnd0
|
||||
|
||||
# Setup working directory.
|
||||
workdir="$(mktemp -d)"
|
||||
cd "${workdir:?}"
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
vnconfig -u "$VIRTUAL_DEVICE"
|
||||
rm -r "${workdir:?}"
|
||||
}
|
||||
|
||||
# Create base image.
|
||||
vmctl create -s "$IMAGE_SIZE" "$IMAGE_PATH"
|
||||
vnconfig "$VIRTUAL_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Format disk, mount /mnt.
|
||||
fdisk -iy "${VIRTUAL_DEVICE}"
|
||||
cat > "${workdir}/partitions" <<EOF
|
||||
/ 1G-* 100%
|
||||
EOF
|
||||
disklabel -w -A -T "${workdir:?}/partitions" "${VIRTUAL_DEVICE}"
|
||||
|
||||
# Fetch base system.
|
||||
short_version="$(echo "$RELEASE" | tr -d .)"
|
||||
openbsd_sets="base comp xbase xshare"
|
||||
openbsd_kernel="bsd.mp"
|
||||
openbsd_installurl="${openbsd_installurl:-"https://cdn.openbsd.org/pub/OpenBSD"}"
|
||||
mirror_dir="${RELEASE}"
|
||||
openbsd_public_key="openbsd-${short_version}-base.pub"
|
||||
|
||||
set_files=""
|
||||
for s in $openbsd_sets; do
|
||||
set_files="$set_files ${s}${short_version}.tgz"
|
||||
done
|
||||
|
||||
for f in $set_files $openbsd_kernel SHA256.sig SHA256
|
||||
do
|
||||
test -f "$f" || ftp "${openbsd_installurl}/${mirror_dir}/${ARCH}/${f}"
|
||||
done
|
||||
|
||||
signify -Cp /etc/signify/"$openbsd_public_key" \
|
||||
-x SHA256.sig $set_files $openbsd_kernel
|
||||
|
||||
# Install base system.
|
||||
for f in $set_files
|
||||
do
|
||||
tar -zxphf "$f" -C /mnt
|
||||
done
|
||||
|
||||
tar -zxphf /mnt/var/sysmerge/etc.tgz -C /mnt
|
||||
tar -zxphf /mnt/var/sysmerge/xetc.tgz -C /mnt
|
||||
|
||||
cat > /mnt/etc/fstab <<EOF
|
||||
/dev/sd0a / ffs rw,wxallowed 1 1
|
||||
EOF
|
||||
|
||||
cd /mnt/dev
|
||||
sh MAKEDEV all
|
||||
cd "$workdir"
|
||||
|
||||
# Configure base system.
|
||||
cat >>/mnt/etc/ssh/sshd_config <<EOF
|
||||
PermitRootLogin yes
|
||||
EOF
|
||||
|
||||
echo "openbsd" > /mnt/etc/myname
|
||||
echo "nameserver 2606:4700:4700::1111" >> /mnt/etc/resolv.conf
|
||||
echo "nameserver 1.1.1.1" > /mnt/etc/resolv.conf
|
||||
echo "127.0.0.1 localhost.localdomain localhost" > /mnt/etc/hosts
|
||||
echo "::1 localhost.localdomain localhost" >> /mnt/etc/hosts
|
||||
echo "$openbsd_installurl" > /mnt/etc/installurl
|
||||
ln -sf /usr/share/zoneinfo/UTC /mnt/etc/localtime
|
||||
|
||||
|
||||
cat >>/mnt/etc/rc.conf.local <<EOF
|
||||
dhcpleased_flags=NO
|
||||
pf=NO
|
||||
pflogd_flags=NO
|
||||
resolvd_flags=NO
|
||||
slaacd_flags=NO
|
||||
sndiod_flags=NO
|
||||
EOF
|
||||
|
||||
# Install kernel
|
||||
cp "$openbsd_kernel" /mnt/bsd
|
||||
|
||||
cp SHA256.sig /mnt/var/db/installed.SHA256.sig
|
||||
sha256 /mnt/bsd | (umask 077; sed 's,/mnt,,' >/mnt/var/db/kernel.SHA256)
|
||||
rm -rf /mnt/usr/share/relink/kernel
|
||||
mkdir -m 700 /mnt/usr/share/relink/kernel
|
||||
tar -C /mnt/usr/share/relink/kernel -xzf /mnt/usr/share/relink/kernel.tgz GENERIC.MP
|
||||
rm -f /mnt/usr/share/relink/kernel.tgz
|
||||
echo "Relinking kernel"
|
||||
chroot /mnt /bin/ksh -e -c "cd /usr/share/relink/kernel/GENERIC.MP; make newbsd; make newinstall" > /dev/null
|
||||
|
||||
# Update and install utilities.
|
||||
chroot /mnt /usr/sbin/pkg_add $pkg_add_params -u
|
||||
chroot /mnt /usr/sbin/pkg_add $pkg_add_params bash cloud-agent
|
||||
echo '!/usr/local/libexec/cloud-agent "\$if"' > /mnt/etc/hostname.vio0
|
||||
|
||||
# Remove useless kernel object files. This saves about 300MB of space in the final image
|
||||
rm -rf /mnt/usr/share/relink/kernel/GENERIC.MP/
|
||||
|
||||
# Disable boot wait. Saves 5 seconds
|
||||
echo "boot" > /mnt/etc/boot.conf
|
||||
|
||||
# Dump root filesystem in OS image.
|
||||
makefs "/dev/${VIRTUAL_DEVICE}a" /mnt
|
||||
growfs -y "/dev/${VIRTUAL_DEVICE}a"
|
||||
fsck -y "/dev/${VIRTUAL_DEVICE}a"
|
||||
sync
|
||||
|
||||
# Setup bootloader.
|
||||
mount /dev/vnd0a /mnt
|
||||
installboot -vr /mnt ${VIRTUAL_DEVICE:?}
|
||||
umount /mnt
|
||||
|
||||
# Convert raw image to qcow.
|
||||
vmctl create -i "$IMAGE_PATH" "$IMAGE_PATH.qcow2"
|
|
@ -1,172 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates openSUSE images for OpenNebula.
|
||||
#
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=leap
|
||||
RELEASE_VERSION=15.3
|
||||
IMAGE_PATH=opensuse-${RELEASE}${RELEASE_VERSION}-$(date -I).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd1
|
||||
|
||||
# TODO: find the package definition and built ourself, publish in some RPM repository.
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/addon-context-linux/releases/download/v5.12.0.2/one-context-5.12.0.2-1.suse.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(lsb_release --short --id)" != "openSUSE" ]; then
|
||||
echo "WARNING: this script has been designed to run on an openSUSE system." >&2
|
||||
echo "WARNING: Not running openSUSE. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
repo_addr=http://download.opensuse.org
|
||||
case "$RELEASE" in
|
||||
leap)
|
||||
distribution_slice=$RELEASE/$RELEASE_VERSION
|
||||
oss_repo_url="${repo_addr}/distribution/${distribution_slice}/repo/oss"
|
||||
;;
|
||||
tumbleweed)
|
||||
distribution_slice=$RELEASE
|
||||
oss_repo_url="${repo_addr}/${distribution_slice}/repo/oss"
|
||||
;;
|
||||
*)
|
||||
echo "Unkown openSUSE release: $RELEASE." >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
oss_update_repo_url="${repo_addr}/update/${distribution_slice}/oss"
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
zypper --root /mnt addrepo $oss_repo_url opensuse-$RELEASE-oss
|
||||
zypper --root /mnt addrepo $oss_update_repo_url opensuse-$RELEASE-oss-update
|
||||
zypper --root /mnt --gpg-auto-import-keys refresh
|
||||
zypper --root /mnt --non-interactive install -t pattern base
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
# Note: as of writing, one-context does not support NetworkManager or
|
||||
# systemd-networkd.
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
#cat /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root zypper -n --no-gpg-checks install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root zypper -n install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root zypper -n install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
run_root zypper -n install kernel-default grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "^vmlinuz-.*" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
run_root grub2-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root zypper -n install openssh
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root zypper clean --all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,178 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Rocky Linux images for OpenNebula.
|
||||
|
||||
# Depends on the following packages (as of Fedora 31):
|
||||
# qemu-img util-linux coreutils dnf curl e2fsprogs
|
||||
|
||||
# Run locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=9.3
|
||||
ARCH=x86_64
|
||||
IMAGE_PATH=rocky-$RELEASE-$(date +%+F).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd0
|
||||
|
||||
ONE_CONTEXT_RPM_URL="https://github.com/OpenNebula/one-apps/releases/download/v6.8.1/one-context-6.8.1-1.el9.noarch.rpm"
|
||||
ONE_CONTEXT_RPM_PATH=/root/one-context.rpm
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f '/etc/fedora-release' ]; then
|
||||
echo "WARNING: this script has been designed to run on a Fedora system." >&2
|
||||
echo "WARNING: Not running Fedora. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
# Note: we could use the @Core package group but it pulls quite a lot of
|
||||
# 'unwanted' dependencies. Run `dnf group info Core` for details.
|
||||
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
# Add --setopt=reposdir=rpm-repositories if you do not run on CentOS 7.
|
||||
dnf -y \
|
||||
--releasever=$RELEASE \
|
||||
--setopt=reposdir=rpm-repositories/rocky/ \
|
||||
--installroot=/mnt \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=baseos \
|
||||
--enablerepo=appstream \
|
||||
--enablerepo=extras \
|
||||
install \
|
||||
basesystem dnf systemd systemd-udev passwd glibc-langpack-en rocky-release
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Required to resolve package mirror in chroot.
|
||||
cp /etc/resolv.conf /mnt/etc/resolv.conf
|
||||
|
||||
# Re-run dnf/install hooks that couldn't be executed in initial call.
|
||||
# Install a few extra dependencies.
|
||||
run_root dnf -y --releasever $RELEASE install rocky-release epel-release
|
||||
|
||||
# Set locale.
|
||||
run_root localectl set-locale LANG=en_GB.UTF-8
|
||||
|
||||
# Install and enable NetworkManager.
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/one-apps for details.
|
||||
run_root dnf install -y NetworkManager
|
||||
run_root systemctl enable NetworkManager
|
||||
|
||||
# Install (magic?) one-context RPM and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_RPM_URL" > "/mnt$ONE_CONTEXT_RPM_PATH"
|
||||
run_root dnf -y install "$ONE_CONTEXT_RPM_PATH"
|
||||
run_root rm "$ONE_CONTEXT_RPM_PATH"
|
||||
|
||||
# Install resize2fs, which is required to resize the root file-system.
|
||||
run_root dnf -y install e2fsprogs
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
#run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install haveged due to lack of entropy in ONE environment.
|
||||
run_root dnf -y install haveged
|
||||
run_root systemctl enable haveged.service
|
||||
|
||||
# Install kernel and bootloader.
|
||||
# Note: linux-firmware is not required our environment and takes almost 200M
|
||||
# uncompressed but is a direct dependency of kernel-core...
|
||||
run_root dnf -y install kernel grub2
|
||||
|
||||
# Add support for virtio block devices at boot time.
|
||||
cat > /mnt/etc/dracut.conf.d/virtio-blk.conf <<EOF
|
||||
add_drivers="virtio-blk"
|
||||
EOF
|
||||
kernel_version=$(ls /mnt/boot | grep "vmlinuz.*.$ARCH" | cut -d- -f2-)
|
||||
run_root dracut --force --kver $kernel_version
|
||||
|
||||
# Configure grub2.
|
||||
echo "GRUB_DISABLE_OS_PROBER=true" >> /mnt/etc/default/grub
|
||||
run_root grub2-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root dnf -y install openssh-server
|
||||
run_root systemctl enable sshd
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
echo "rocky" > /mnt/etc/hostname
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
# Note: build logs could be removed as well.
|
||||
run_root dnf clean all
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,16 +0,0 @@
|
|||
[base]
|
||||
name=CentOS-$releasever - Base
|
||||
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
|
||||
gpgcheck=0
|
||||
enabled=0
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
||||
|
||||
#released updates
|
||||
[updates]
|
||||
name=CentOS-$releasever - Updates
|
||||
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
|
||||
gpgcheck=0
|
||||
enabled=0
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
|
|
@ -1,7 +0,0 @@
|
|||
[extras]
|
||||
name=CentOS-$releasever - Extras
|
||||
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
|
||||
#baseurl=http://mirror.centos.org/$contentdir/$releasever/extras/$basearch/os/
|
||||
gpgcheck=0
|
||||
enabled=0
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
|
|
@ -1,65 +0,0 @@
|
|||
# rocky-extras.repo
|
||||
#
|
||||
# The mirrorlist system uses the connecting IP address of the client and the
|
||||
# update status of each mirror to pick current mirrors that are geographically
|
||||
# close to the client. You should use this for Rocky updates unless you are
|
||||
# manually picking other mirrors.
|
||||
#
|
||||
# If the mirrorlist does not work for you, you can try the commented out
|
||||
# baseurl line instead.
|
||||
|
||||
[extras]
|
||||
name=Rocky Linux $releasever - Extras
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=extras-$releasever
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/extras/$basearch/os/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
countme=1
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[extras-debug]
|
||||
name=Rocky Linux $releasever - Extras Debug
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=extras-$releasever-debug
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/extras/$basearch/debug/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[extras-source]
|
||||
name=Rocky Linux $releasever - Extras Source
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=extras-$releasever-source
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/extras/source/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[plus]
|
||||
name=Rocky Linux $releasever - Plus
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=plus-$releasever
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/plus/$basearch/os/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
countme=1
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[plus-debug]
|
||||
name=Rocky Linux $releasever - Plus - Debug
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=plus-$releasever-debug
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/plus/$basearch/debug/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[plus-source]
|
||||
name=Rocky Linux $releasever - Plus - Source
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=source&repo=plus-$releasever-source
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/plus/source/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
|
@ -1,93 +0,0 @@
|
|||
# rocky.repo
|
||||
#
|
||||
# The mirrorlist system uses the connecting IP address of the client and the
|
||||
# update status of each mirror to pick current mirrors that are geographically
|
||||
# close to the client. You should use this for Rocky updates unless you are
|
||||
# manually picking other mirrors.
|
||||
#
|
||||
# If the mirrorlist does not work for you, you can try the commented out
|
||||
# baseurl line instead.
|
||||
|
||||
[baseos]
|
||||
name=Rocky Linux $releasever - BaseOS
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=BaseOS-$releasever
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/BaseOS/$basearch/os/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
countme=1
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[baseos-debug]
|
||||
name=Rocky Linux $releasever - BaseOS - Debug
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=BaseOS-$releasever-debug
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/BaseOS/$basearch/debug/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[baseos-source]
|
||||
name=Rocky Linux $releasever - BaseOS - Source
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=source&repo=BaseOS-$releasever-source
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/BaseOS/source/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[appstream]
|
||||
name=Rocky Linux $releasever - AppStream
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=AppStream-$releasever
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/AppStream/$basearch/os/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
countme=1
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[appstream-debug]
|
||||
name=Rocky Linux $releasever - AppStream - Debug
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=AppStream-$releasever-debug
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/AppStream/$basearch/debug/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[appstream-source]
|
||||
name=Rocky Linux $releasever - AppStream - Source
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=source&repo=AppStream-$releasever-source
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/AppStream/source/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[crb]
|
||||
name=Rocky Linux $releasever - CRB
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=CRB-$releasever
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/CRB/$basearch/os/
|
||||
gpgcheck=1
|
||||
enabled=1
|
||||
countme=1
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[crb-debug]
|
||||
name=Rocky Linux $releasever - CRB - Debug
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo=CRB-$releasever-debug
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/CRB/$basearch/debug/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
||||
|
||||
[crb-source]
|
||||
name=Rocky Linux $releasever - CRB - Source
|
||||
mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=source&repo=CRB-$releasever-source
|
||||
#baseurl=http://dl.rockylinux.org/$contentdir/$releasever/CRB/source/tree/
|
||||
gpgcheck=1
|
||||
enabled=0
|
||||
metadata_expire=6h
|
||||
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9
|
|
@ -1,150 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script generates Ubuntu images for OpenNebula.
|
||||
#
|
||||
# Test image locally (without network) with:
|
||||
# qemu-system-x86_64 -enable-kvm -m 1G -drive file=$IMAGE,format=qcow2
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# XXX: Handle command-line arguments?
|
||||
RELEASE=noble # 24.04 LTS
|
||||
ARCH=amd64
|
||||
IMAGE_PATH=ubuntu-$RELEASE-$(date --iso-8601).img.qcow2
|
||||
IMAGE_SIZE=10G
|
||||
NBD_DEVICE=/dev/nbd2
|
||||
|
||||
ONE_CONTEXT_DEB_URL="https://github.com/OpenNebula/one-apps/releases/download/v6.8.1/one-context_6.8.1-1.deb"
|
||||
ONE_CONTEXT_DEB_PATH=/root/one-context.deb
|
||||
|
||||
cleanup() {
|
||||
# The order here is important.
|
||||
umount /mnt/dev/pts 2>/dev/null || true
|
||||
umount /mnt/dev/shm 2>/dev/null || true
|
||||
umount /mnt/dev 2>/dev/null || true
|
||||
umount /mnt/proc 2>/dev/null || true
|
||||
umount /mnt/run 2>/dev/null || true
|
||||
umount /mnt/sys 2>/dev/null || true
|
||||
umount /mnt/boot 2>/dev/null || true
|
||||
umount /mnt 2>/dev/null || true
|
||||
qemu-nbd --disconnect "$NBD_DEVICE" || true
|
||||
}
|
||||
|
||||
run_root() {
|
||||
chroot /mnt /usr/bin/env \
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin \
|
||||
sh -c "$*"
|
||||
}
|
||||
|
||||
if [ "$(whoami)" != 'root' ]; then
|
||||
echo "This script must be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $(lsb_release --short --id) != "Ubuntu" ]; then
|
||||
echo "WARNING: this script has been designed to run on an Ubuntu system." >&2
|
||||
echo "WARNING: Not running Ubuntu. Giving you 5 seconds to abort." >&2
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# Create base QCOW2 image.
|
||||
qemu-img create -f qcow2 "$IMAGE_PATH" "$IMAGE_SIZE"
|
||||
modprobe nbd max_part=16
|
||||
qemu-nbd --connect="$NBD_DEVICE" "$IMAGE_PATH"
|
||||
|
||||
# Wait for qemu-nbd to settle.
|
||||
sleep 1
|
||||
|
||||
# Don't forget to cleanup, even if the script crash.
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create partition table, format partitions.
|
||||
sfdisk --no-reread "$NBD_DEVICE" <<EOF
|
||||
1M,500M,L,*
|
||||
,,L
|
||||
EOF
|
||||
|
||||
mkfs.ext4 "${NBD_DEVICE}p1"
|
||||
mkfs.ext4 "${NBD_DEVICE}p2"
|
||||
|
||||
# Mount partitions, install base OS.
|
||||
mount "${NBD_DEVICE}p2" /mnt
|
||||
mkdir /mnt/boot
|
||||
mount "${NBD_DEVICE}p1" /mnt/boot
|
||||
|
||||
debootstrap \
|
||||
--arch=$ARCH $RELEASE \
|
||||
/mnt http://archive.ubuntu.com/ubuntu/
|
||||
|
||||
mount --bind /dev /mnt/dev
|
||||
mount --bind /dev/pts /mnt/dev/pts
|
||||
mount --bind /dev/shm /mnt/dev/shm
|
||||
mount --bind /proc /mnt/proc
|
||||
mount --bind /run /mnt/run
|
||||
mount --bind /sys /mnt/sys
|
||||
|
||||
# Guest networking is to be handled by the one-context package.
|
||||
# See https://github.com/OpenNebula/addon-context-linux for details.
|
||||
|
||||
# Initialize /etc/hosts.
|
||||
cat > /mnt/etc/hosts << EOF
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
EOF
|
||||
|
||||
# Configure package sources and update package index.
|
||||
cat >/mnt/etc/apt/sources.list <<EOF
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-security main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-updates main restricted universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ $RELEASE-backports main restricted universe multiverse
|
||||
EOF
|
||||
run_root apt-get update
|
||||
|
||||
# Install (magic?) one-context DEB and hope things works as expected.
|
||||
curl -L "$ONE_CONTEXT_DEB_URL" > "/mnt$ONE_CONTEXT_DEB_PATH"
|
||||
run_root apt-get -y install "$ONE_CONTEXT_DEB_PATH"
|
||||
run_root rm "$ONE_CONTEXT_DEB_PATH"
|
||||
|
||||
# Manually install legacy network scripts used by one-context.
|
||||
run_root apt-get -y install ifupdown
|
||||
|
||||
# Initalize base services.
|
||||
run_root systemd-machine-id-setup
|
||||
|
||||
run_root ln -sf /usr/share/zoneinfo/UTC /etc/localtime
|
||||
run_root systemctl enable systemd-timesyncd.service
|
||||
|
||||
# Install kernel and bootloader. Do not autoconfigure grub.
|
||||
run_root "echo 'grub-pc grub-pc/install_devices_empty boolean true' | debconf-set-selections"
|
||||
run_root DEBIAN_FRONTEND=noninteractive apt-get -y install locales linux-base linux-image-generic grub-pc
|
||||
|
||||
# Configure grub.
|
||||
echo "GRUB_DISABLE_OS_PROBER=true" >> /mnt/etc/default/grub
|
||||
run_root grub-install --target=i386-pc "${NBD_DEVICE}"
|
||||
run_root grub-mkconfig -o /boot/grub/grub.cfg
|
||||
|
||||
# Install en configure SSH daemon.
|
||||
run_root apt-get -y install openssh-server
|
||||
|
||||
# Generate fstab file.
|
||||
boot_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p1")
|
||||
root_uuid=$(blkid --match-tag UUID --output value "${NBD_DEVICE}p2")
|
||||
cat >>/mnt/etc/fstab <<EOF
|
||||
UUID=$boot_uuid /boot ext4 rw,relatime,data=ordered 0 2
|
||||
UUID=$root_uuid / ext4 rw,relatime,data=ordered 0 1
|
||||
EOF
|
||||
|
||||
# Reset systemd's environment.
|
||||
run_root rm -f /etc/machine-id
|
||||
run_root touch /etc/machine-id
|
||||
rm -f /var/lib/systemd/random-seed
|
||||
echo "ubuntu" > /mnt/etc/hostname
|
||||
|
||||
# Remove temporary files and reclaim freed disk space.
|
||||
run_root apt-get clean
|
||||
|
||||
# Make sure everything is written to disk before exiting.
|
||||
sync
|
|
@ -1,12 +0,0 @@
|
|||
import configparser
|
||||
|
||||
from etcd_wrapper import EtcdWrapper
|
||||
|
||||
config = configparser.ConfigParser(allow_no_value=True)
|
||||
config.read('config-and-secrets.conf')
|
||||
|
||||
etcd_client = EtcdWrapper(
|
||||
host=config['etcd']['url'], port=config['etcd']['port'],
|
||||
ca_cert=config['etcd']['ca_cert'], cert_key=config['etcd']['cert_key'],
|
||||
cert_cert=config['etcd']['cert_cert']
|
||||
)
|
|
@ -1,73 +0,0 @@
|
|||
import etcd3
|
||||
import json
|
||||
import logging
|
||||
|
||||
from functools import wraps
|
||||
|
||||
|
||||
class EtcdEntry:
|
||||
def __init__(self, meta_or_key, value, value_in_json=True):
|
||||
if hasattr(meta_or_key, 'key'):
|
||||
# if meta has attr 'key' then get it
|
||||
self.key = meta_or_key.key.decode('utf-8')
|
||||
else:
|
||||
# otherwise meta is the 'key'
|
||||
self.key = meta_or_key
|
||||
self.value = value.decode('utf-8')
|
||||
|
||||
if value_in_json:
|
||||
self.value = json.loads(self.value)
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError:
|
||||
raise etcd3.exceptions.ConnectionFailedError('Cannot connect to etcd: is etcd running as configured?')
|
||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
||||
except Exception as err:
|
||||
logging.exception('Some etcd error occured. See syslog for details.', err)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class EtcdWrapper:
|
||||
@readable_errors
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = etcd3.client(*args, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get(self, *args, value_in_json=True, **kwargs):
|
||||
_value, _key = self.client.get(*args, **kwargs)
|
||||
if _key is None or _value is None:
|
||||
return None
|
||||
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def put(self, *args, value_in_json=True, **kwargs):
|
||||
_key, _value = args
|
||||
if value_in_json:
|
||||
_value = json.dumps(_value)
|
||||
|
||||
if not isinstance(_key, str):
|
||||
_key = _key.decode('utf-8')
|
||||
|
||||
return self.client.put(_key, _value, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, *args, value_in_json=True, **kwargs):
|
||||
event_iterator = self.client.get_prefix(*args, **kwargs)
|
||||
for e in event_iterator:
|
||||
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def watch_prefix(self, key, value_in_json=True):
|
||||
event_iterator, cancel = self.client.watch_prefix(key)
|
||||
for e in event_iterator:
|
||||
if hasattr(e, '_event'):
|
||||
e = getattr('e', '_event')
|
||||
if e.type == e.PUT:
|
||||
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
|
|
@ -1,98 +0,0 @@
|
|||
import json
|
||||
|
||||
from enum import IntEnum
|
||||
from xmlrpc.client import ServerProxy as RPCClient
|
||||
|
||||
from xmltodict import parse
|
||||
|
||||
from config import config, etcd_client
|
||||
|
||||
|
||||
# Constants
|
||||
ALL_VM_STATES = -1
|
||||
START_ID = -1 # First id whatever it is
|
||||
END_ID = -1 # Last id whatever it is
|
||||
|
||||
|
||||
def put_under_list(obj):
|
||||
if not isinstance(obj, list):
|
||||
return [obj]
|
||||
return obj
|
||||
|
||||
|
||||
class VMState(IntEnum):
|
||||
INIT = 0
|
||||
PENDING = 1
|
||||
HOLD = 2
|
||||
ACTIVE = 3
|
||||
STOPPED = 4
|
||||
SUSPENDED = 5
|
||||
DONE = 6
|
||||
FAILED = 7
|
||||
POWEROFF = 8
|
||||
UNDEPLOYED = 9
|
||||
CLONING = 10
|
||||
CLONING_FAILURE = 11
|
||||
|
||||
|
||||
class VmFilterFlag(IntEnum):
|
||||
UIDUserResources = 0 # UID User’s Resources
|
||||
UserAndItsGroupsResources = -1 # Resources belonging to the user and any of his groups
|
||||
AllResources = -2 # All resources
|
||||
UserResources = -3 # Resources belonging to the user
|
||||
UserPrimaryGroupResources = -4 # Resources belonging to the user’s primary group
|
||||
|
||||
|
||||
class VM:
|
||||
def __init__(self, vm: dict):
|
||||
self.id = vm.get('ID', None)
|
||||
self.owner = {
|
||||
'id': vm.get('UID', None),
|
||||
'name': vm.get('UNAME', None),
|
||||
'gname': vm.get('GNAME', None)
|
||||
}
|
||||
self.name = vm.get('NAME', None)
|
||||
self.status = vm.get('STATE', None)
|
||||
if self.status:
|
||||
self.status = VMState(int(self.status)).name.lower()
|
||||
|
||||
template = vm['TEMPLATE']
|
||||
|
||||
self.disk = put_under_list(template.get('DISK', []))
|
||||
self.graphics = template.get('GRAPHICS', {})
|
||||
self.memory = template.get('MEMORY', None)
|
||||
self.nic = put_under_list(template.get('NIC', []))
|
||||
self.vcpu = template.get('VCPU', None)
|
||||
self.host = {
|
||||
'name': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None),
|
||||
'id': ((vm.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HID', None),
|
||||
}
|
||||
self.snapshots = put_under_list(vm.get('SNAPSHOTS', []))
|
||||
|
||||
def get_data(self):
|
||||
return {
|
||||
attr: getattr(self, attr)
|
||||
for attr in dir(self)
|
||||
if not attr.startswith('__') and not callable(getattr(self, attr))
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
with RPCClient('https://opennebula.ungleich.ch:2634/RPC2') as rpc_client:
|
||||
success, response, *_ = rpc_client.one.vmpool.infoextended(
|
||||
config['oca']['client_secrets'], VmFilterFlag.AllResources.value, START_ID, END_ID, ALL_VM_STATES
|
||||
)
|
||||
if success:
|
||||
vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
|
||||
for i, vm in enumerate(vms):
|
||||
vm_id = vm['ID']
|
||||
etcd_client.put(f'/opennebula/vm/{vm_id}', vm)
|
||||
|
||||
parsed_vm = VM(vm)
|
||||
etcd_client.put(f'/opennebula/parsed_vm/{parsed_vm.id}', parsed_vm.get_data())
|
||||
else:
|
||||
print(response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,56 +0,0 @@
|
|||
from pprint import pprint
|
||||
|
||||
from config import etcd_client
|
||||
|
||||
|
||||
def get_vm_by_ip(vms, ip, status='active'):
|
||||
vms_by_status = {
|
||||
vm_id: vm
|
||||
for vm_id, vm in vms.items()
|
||||
if vm['status'] == status
|
||||
}
|
||||
for vm_id, vm in vms_by_status.items():
|
||||
vm_ips = []
|
||||
for nic in vm.get('nic', []):
|
||||
global_ipv6 = nic.get('IP6_GLOBAL', None)
|
||||
local_ipv6 = nic.get('IP6_LINK', None)
|
||||
ipv4 = nic.get('IP', None)
|
||||
vm_ips += [global_ipv6, local_ipv6, ipv4]
|
||||
|
||||
if ip in vm_ips:
|
||||
return {vm_id: vm}
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
vm_prefix = '/opennebula/parsed_vm/'
|
||||
|
||||
vms = {
|
||||
int(vm.key.split('/')[-1]): vm.value
|
||||
for vm in etcd_client.get_prefix(vm_prefix)
|
||||
}
|
||||
|
||||
VM_ID = 10761 # One of nico's VM
|
||||
|
||||
# Get all data related to a VM
|
||||
pprint(vms.get(VM_ID))
|
||||
|
||||
# Get host of a VM
|
||||
print(vms.get(VM_ID).get('host').get('name'))
|
||||
|
||||
# Get VNC Port of a VM
|
||||
print(vms.get(VM_ID).get('graphics').get('PORT'))
|
||||
|
||||
# Get all disks attached with VM
|
||||
pprint(vms.get(VM_ID).get('disk'))
|
||||
|
||||
# Who is owner of a VM?
|
||||
print(vms.get(VM_ID).get('owner').get('name'))
|
||||
|
||||
# Get VM who has 2a0a:e5c0:0:5:0:78ff:fe11:d75f
|
||||
search_ungleich_ch = get_vm_by_ip(vms, '2a0a:e5c0:0:5:0:78ff:fe11:d75f')
|
||||
pprint(search_ungleich_ch)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
cd ~/osh/openstack-helm-infra
|
||||
helm upgrade --install ceph-adapter-rook \
|
||||
./ceph-adapter-rook/ \
|
||||
--namespace=rook-ceph \
|
||||
--values=$HOME/vcs/ungleich-tools/openstack/values/ceph-adapter-rook-ceph.yaml
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "This script should be sourced for setting env variables, like venv activate"
|
||||
|
||||
export OPENSTACK_RELEASE=2023.2
|
||||
export CONTAINER_DISTRO_NAME=ubuntu
|
||||
export CONTAINER_DISTRO_VERSION=jammy
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
mkdir -p ~/osh
|
||||
cd ~/osh
|
||||
if [ ! -d openstack-helm ]; then
|
||||
git clone https://opendev.org/openstack/openstack-helm.git
|
||||
else
|
||||
cd openstack-helm && git pull
|
||||
fi
|
||||
|
||||
if [ ! -d openstack-helm-infa ]; then
|
||||
git clone https://opendev.org/openstack/openstack-helm-infra.git
|
||||
else
|
||||
cd openstack-helm-infra && git pull
|
||||
fi
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue