Compare commits
186 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e14ac947c1 | |||
| 3e69fb275f | |||
| 618fecb73f | |||
| e2cd44826b | |||
| 1758629ca1 | |||
| a759b8aa39 | |||
| 4c6a126d8b | |||
| 2b71c1807d | |||
| cbcaf63650 | |||
|
|
5d05e91335 | ||
|
|
8cc58726d0 | ||
|
|
5711bf4770 | ||
|
|
ae3482cc71 | ||
|
|
b1319d654a | ||
|
|
93d7a409b1 | ||
|
|
7e91f60c0a | ||
|
|
58daf8191e | ||
|
|
b5409552d8 | ||
|
|
550937630c | ||
|
|
46a04048b5 | ||
|
|
3ddd27a08d | ||
|
|
c881c7ce4d | ||
|
|
d5a7f8ef59 | ||
|
|
0982927c1b | ||
|
|
8e839aeb44 | ||
|
|
8888f5d9f7 | ||
|
|
bd9dbb12b7 | ||
|
|
30be791312 | ||
|
|
2b8831784a | ||
|
|
b847260768 | ||
|
|
1b5a3f6d2e | ||
|
|
8a451ff4ff | ||
|
|
bd03f95e99 | ||
|
|
26d5c91625 | ||
|
|
b877ab13b3 | ||
|
|
12e8ccd01c | ||
|
|
8078ffae5a | ||
|
|
1b36c2f96f | ||
|
|
c0e6d6a0d8 | ||
|
|
083ba43918 | ||
|
|
22531a7459 | ||
|
|
b96e56b453 | ||
|
|
9f02b31b1b | ||
|
|
10c8dc85ba | ||
|
|
091131d350 | ||
|
|
ab65349047 | ||
|
|
c3b42aabc6 | ||
|
|
e6d22a73c5 | ||
|
|
02526baaf9 | ||
|
|
3188787c2a | ||
|
|
94dad7c9b6 | ||
|
|
53c6a14d60 | ||
|
|
64ab011299 | ||
|
|
b017df4879 | ||
|
|
aaf29adcbb | ||
|
|
6d51e2a8c4 | ||
|
|
c6b7152464 | ||
|
|
8544df8bad | ||
|
|
708e3ebb97 | ||
|
|
3b68a589d4 | ||
|
|
029ef36d62 | ||
|
|
3cf4807f7c | ||
|
|
c1cabb7220 | ||
|
|
5d95f11b3d | ||
|
|
23d805f04f | ||
|
|
3825c7c210 | ||
|
|
7c9e3d747a | ||
|
|
b9c9a5e0ec | ||
|
|
ebcb1680d7 | ||
| cf4930ee84 | |||
| 00d876aea1 | |||
|
|
e91fd9e24a | ||
|
|
469d03467d | ||
|
|
ec66a756a0 | ||
| b4f47adb4f | |||
| 31ec024be6 | |||
|
|
82a69701ce | ||
|
|
d9dd6b48dc | ||
|
|
b7596e071a | ||
|
|
71fd0ca7d9 | ||
|
|
92f985c857 | ||
|
|
feb334cf04 | ||
| 48efcdf08c | |||
| f8f790e7fc | |||
| 5a646aeac9 | |||
| 6046015c3d | |||
| b4292615de | |||
| 48cc37c438 | |||
| 6086fec633 | |||
|
|
388127bd11 | ||
| ef0f13534a | |||
| ec40d6b1e0 | |||
| b7f3ba1a34 | |||
| 6f51ddbb36 | |||
| 7fff280c79 | |||
| 6847a0d323 | |||
| 180f6f4989 | |||
| 344a957a3f | |||
| 3296e524cc | |||
| 50fb135726 | |||
| cd2f0aaa0d | |||
| 2afb37daca | |||
|
|
b95037f624 | ||
|
|
eb19b10333 | ||
|
|
2566e86f1e | ||
|
|
e775570884 | ||
|
|
9662e02eb7 | ||
|
|
71c3f9d978 | ||
|
|
29dfacfadb | ||
|
|
bff12ed930 | ||
|
|
1fba79ca31 | ||
|
|
4c7678618d | ||
|
|
6682f127f1 | ||
|
|
433a3b9817 | ||
|
|
7b6c02b3ab | ||
|
|
70c8da544e | ||
| 6a40a7f12f | |||
| 27e780b359 | |||
| 4b7d6d5099 | |||
| d13a4bcc37 | |||
| d2d6c6bf5c | |||
| 9963e9c62d | |||
| 52867614df | |||
| 9bdf4d2180 | |||
| 29e938dc74 | |||
| f980cdb464 | |||
| 808271f3e0 | |||
| ba515f0b48 | |||
| cd9d4cb78c | |||
| ec3cf49799 | |||
| f79097cae9 | |||
| 972bb5a920 | |||
| eea6c1568e | |||
| e4d2c98fb5 | |||
| 88b4d34e1a | |||
| 04993e4106 | |||
| bc58a6ed9c | |||
| 71279a968f | |||
|
|
f919719b1e | ||
|
|
8afd524c55 | ||
|
|
e79f1b4de9 | ||
|
|
23c7604a3e | ||
|
|
8e159c8be1 | ||
|
|
0283894ba2 | ||
|
|
9206d8ed1d | ||
|
|
5b44034602 | ||
| dee0a29c91 | |||
| 8b90755015 | |||
| bfbf08c7cd | |||
| 2a1e80dbc5 | |||
|
|
26aeb78f61 | ||
|
|
9ec9083c57 | ||
| c6fe2cb1c4 | |||
| 1e70d0183d | |||
| 34a6e99525 | |||
| fee1cfd4ff | |||
|
|
b2de277244 | ||
|
|
00563c7dc2 | ||
| b235f0833c | |||
|
|
42bb7bc609 | ||
| 012f3cb3b5 | |||
|
|
0d38a66a34 | ||
|
|
72af426b3a | ||
| 871aa5347b | |||
| 608d1eb280 | |||
|
|
537a5b01f1 | ||
|
|
c37bf19f92 | ||
|
|
431a6f6d9b | ||
| 6c56a7a7c6 | |||
| dd33b89941 | |||
|
|
cff6a4021f | ||
|
|
76f63633ca | ||
|
|
adddba518c | ||
| 787b236305 | |||
| c1b0c5301e | |||
| 7486fafbaa | |||
|
|
cdbfb96e71 | ||
|
|
a4bedb01f6 | ||
|
|
6d0ce65f5c | ||
|
|
e459434b91 | ||
|
|
cfb09c29de | ||
|
|
9517e73233 | ||
|
|
f9dbdc730a | ||
|
|
2244b94fd8 | ||
|
|
9ae75f20e8 | ||
|
|
6d715e8348 |
126 changed files with 4187 additions and 2753 deletions
8
.gitignore
vendored
8
.gitignore
vendored
|
|
@ -2,17 +2,19 @@
|
||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
|
|
||||||
ucloud/docs/build
|
uncloud/docs/build
|
||||||
logs.txt
|
logs.txt
|
||||||
|
|
||||||
ucloud.egg-info
|
uncloud.egg-info
|
||||||
|
|
||||||
# run artefacts
|
# run artefacts
|
||||||
default.etcd
|
default.etcd
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|
||||||
# build artefacts
|
# build artefacts
|
||||||
ucloud/version.py
|
uncloud/version.py
|
||||||
build/
|
build/
|
||||||
venv/
|
venv/
|
||||||
dist/
|
dist/
|
||||||
|
|
||||||
|
*.iso
|
||||||
|
|
|
||||||
29
bin/gen-version
Executable file
29
bin/gen-version
Executable file
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# 2019-2020 Nico Schottelius (nico-uncloud at schottelius.org)
|
||||||
|
#
|
||||||
|
# This file is part of uncloud.
|
||||||
|
#
|
||||||
|
# uncloud is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# uncloud is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Wrapper for real script to allow execution from checkout
|
||||||
|
dir=${0%/*}
|
||||||
|
|
||||||
|
# Ensure version is present - the bundled/shipped version contains a static version,
|
||||||
|
# the git version contains a dynamic version
|
||||||
|
printf "VERSION = \"%s\"\n" "$(git describe --tags --abbrev=0)" > ${dir}/../uncloud/version.py
|
||||||
|
|
@ -25,9 +25,9 @@ dir=${0%/*}
|
||||||
|
|
||||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
# Ensure version is present - the bundled/shipped version contains a static version,
|
||||||
# the git version contains a dynamic version
|
# the git version contains a dynamic version
|
||||||
printf "VERSION = \"%s\"\n" "$(git describe)" > ${dir}/../ucloud/version.py
|
${dir}/gen-version
|
||||||
|
|
||||||
libdir=$(cd "${dir}/../" && pwd -P)
|
libdir=$(cd "${dir}/../" && pwd -P)
|
||||||
export PYTHONPATH="${libdir}"
|
export PYTHONPATH="${libdir}"
|
||||||
|
|
||||||
"$dir/../scripts/ucloud" "$@"
|
"$dir/../scripts/uncloud" "$@"
|
||||||
29
bin/uncloud-run-reinstall
Executable file
29
bin/uncloud-run-reinstall
Executable file
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
||||||
|
#
|
||||||
|
# This file is part of ucloud.
|
||||||
|
#
|
||||||
|
# ucloud is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# ucloud is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
# Wrapper for real script to allow execution from checkout
|
||||||
|
dir=${0%/*}
|
||||||
|
|
||||||
|
${dir}/gen-version;
|
||||||
|
pip uninstall -y uncloud >/dev/null
|
||||||
|
python setup.py install >/dev/null
|
||||||
|
${dir}/uncloud "$@"
|
||||||
13
conf/uncloud.conf
Normal file
13
conf/uncloud.conf
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
[etcd]
|
||||||
|
url = localhost
|
||||||
|
port = 2379
|
||||||
|
base_prefix = /
|
||||||
|
ca_cert
|
||||||
|
cert_cert
|
||||||
|
cert_key
|
||||||
|
|
||||||
|
[client]
|
||||||
|
name = replace_me
|
||||||
|
realm = replace_me
|
||||||
|
seed = replace_me
|
||||||
|
api_server = http://localhost:5000
|
||||||
|
|
@ -7,7 +7,7 @@ SPHINXOPTS ?=
|
||||||
SPHINXBUILD ?= sphinx-build
|
SPHINXBUILD ?= sphinx-build
|
||||||
SOURCEDIR = source/
|
SOURCEDIR = source/
|
||||||
BUILDDIR = build/
|
BUILDDIR = build/
|
||||||
DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/ucloud/
|
DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/uncloud/
|
||||||
|
|
||||||
.PHONY: all build clean
|
.PHONY: all build clean
|
||||||
|
|
||||||
12
docs/README.md
Normal file
12
docs/README.md
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
# uncloud docs
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
1. Python3
|
||||||
|
2. Sphinx
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
Run `make build` to build docs.
|
||||||
|
|
||||||
|
Run `make clean` to remove build directory.
|
||||||
|
|
||||||
|
Run `make publish` to push build dir to https://ungleich.ch/ucloud/
|
||||||
|
|
@ -56,40 +56,13 @@ To start host we created earlier, execute the following command
|
||||||
|
|
||||||
ucloud host ungleich.ch
|
ucloud host ungleich.ch
|
||||||
|
|
||||||
Create OS Image
|
File & image scanners
|
||||||
---------------
|
--------------------------
|
||||||
|
|
||||||
Create ucloud-init ready OS image (Optional)
|
Let's assume we have uploaded an *alpine-uploaded.qcow2* disk images to our
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
uncloud server. Currently, our *alpine-untouched.qcow2* is not tracked by
|
||||||
This step is optional if you just want to test ucloud. However, sooner or later
|
ucloud. We can only make images from tracked files. So, we need to track the
|
||||||
you want to create OS images with ucloud-init to properly
|
file by running File Scanner
|
||||||
contexualize VMs.
|
|
||||||
|
|
||||||
1. Start a VM with OS image on which you want to install ucloud-init
|
|
||||||
2. Execute the following command on the started VM
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
apk add git
|
|
||||||
git clone https://code.ungleich.ch/ucloud/ucloud-init.git
|
|
||||||
cd ucloud-init
|
|
||||||
sh ./install.sh
|
|
||||||
3. Congratulations. Your image is now ucloud-init ready.
|
|
||||||
|
|
||||||
|
|
||||||
Upload Sample OS Image
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Execute the following to get the sample OS image file.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
mkdir /var/www/admin
|
|
||||||
(cd /var/www/admin && wget https://cloud.ungleich.ch/s/qTb5dFYW5ii8KsD/download)
|
|
||||||
|
|
||||||
Run File Scanner and Image Scanner
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Currently, our uploaded file *alpine-untouched.qcow2* is not tracked by ucloud. We can only make
|
|
||||||
images from tracked files. So, we need to track the file by running File Scanner
|
|
||||||
|
|
||||||
.. code-block:: sh
|
.. code-block:: sh
|
||||||
|
|
||||||
|
|
@ -17,9 +17,9 @@
|
||||||
|
|
||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = 'ucloud'
|
project = "uncloud"
|
||||||
copyright = '2019, ungleich'
|
copyright = "2019, ungleich"
|
||||||
author = 'ungleich'
|
author = "ungleich"
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
|
@ -27,12 +27,12 @@ author = 'ungleich'
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
extensions = [
|
extensions = [
|
||||||
'sphinx.ext.autodoc',
|
"sphinx.ext.autodoc",
|
||||||
'sphinx_rtd_theme',
|
"sphinx_rtd_theme",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ["_templates"]
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
# List of patterns, relative to source directory, that match files and
|
||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
|
|
@ -50,4 +50,4 @@ html_theme = "sphinx_rtd_theme"
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
html_static_path = ['_static']
|
html_static_path = ["_static"]
|
||||||
36
docs/source/hacking.rst
Normal file
36
docs/source/hacking.rst
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
Hacking
|
||||||
|
=======
|
||||||
|
Using uncloud in hacking (aka development) mode.
|
||||||
|
|
||||||
|
|
||||||
|
Get the code
|
||||||
|
------------
|
||||||
|
.. code-block:: sh
|
||||||
|
:linenos:
|
||||||
|
|
||||||
|
git clone https://code.ungleich.ch/uncloud/uncloud.git
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Install python requirements
|
||||||
|
---------------------------
|
||||||
|
You need to have python3 installed.
|
||||||
|
|
||||||
|
.. code-block:: sh
|
||||||
|
:linenos:
|
||||||
|
|
||||||
|
cd uncloud!
|
||||||
|
python -m venv venv
|
||||||
|
. ./venv/bin/activate
|
||||||
|
./bin/uncloud-run-reinstall
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Install os requirements
|
||||||
|
-----------------------
|
||||||
|
Install the following software packages: **dnsmasq**.
|
||||||
|
|
||||||
|
If you already have a working IPv6 SLAAC and DNS setup,
|
||||||
|
this step can be skipped.
|
||||||
|
|
||||||
|
Note that you need at least one /64 IPv6 network to run uncloud.
|
||||||
|
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 37 KiB |
|
|
@ -11,12 +11,12 @@ Welcome to ucloud's documentation!
|
||||||
:caption: Contents:
|
:caption: Contents:
|
||||||
|
|
||||||
introduction
|
introduction
|
||||||
user-guide
|
|
||||||
setup-install
|
setup-install
|
||||||
|
vm-images
|
||||||
|
user-guide
|
||||||
admin-guide
|
admin-guide
|
||||||
user-guide/how-to-create-an-os-image-for-ucloud
|
|
||||||
troubleshooting
|
troubleshooting
|
||||||
|
hacking
|
||||||
|
|
||||||
Indices and tables
|
Indices and tables
|
||||||
==================
|
==================
|
||||||
66
docs/source/vm-images.rst
Normal file
66
docs/source/vm-images.rst
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
VM images
|
||||||
|
==================================
|
||||||
|
|
||||||
|
Overview
|
||||||
|
---------
|
||||||
|
|
||||||
|
ucloud tries to be least invasise towards VMs and only require
|
||||||
|
strictly necessary changes for running in a virtualised
|
||||||
|
environment. This includes configurations for:
|
||||||
|
|
||||||
|
* Configuring the network
|
||||||
|
* Managing access via ssh keys
|
||||||
|
* Resizing the attached disk(s)
|
||||||
|
|
||||||
|
Upstream images
|
||||||
|
---------------
|
||||||
|
|
||||||
|
The 'official' uncloud images are defined in the `uncloud/images
|
||||||
|
<https://code.ungleich.ch/uncloud/images>`_ repository.
|
||||||
|
|
||||||
|
How to make you own Uncloud images
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
It is fairly easy to create your own images for uncloud, as the common
|
||||||
|
operations (which are detailed below) can be automatically handled by the
|
||||||
|
`uncloud/uncloud-init <https://code.ungleich.ch/uncloud/uncloud-init>`_ tool.
|
||||||
|
|
||||||
|
Network configuration
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
All VMs in ucloud are required to support IPv6. The primary network
|
||||||
|
configuration is always done using SLAAC. A VM thus needs only to be
|
||||||
|
configured to
|
||||||
|
|
||||||
|
* accept router advertisements on all network interfaces
|
||||||
|
* use the router advertisements to configure the network interfaces
|
||||||
|
* accept the DNS entries from the router advertisements
|
||||||
|
|
||||||
|
|
||||||
|
Configuring SSH keys
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To be able to access the VM, ucloud support provisioning SSH keys.
|
||||||
|
|
||||||
|
To accept ssh keys in your VM, request the URL
|
||||||
|
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
||||||
|
**authorized_keys** file. Below you find sample code to accomplish
|
||||||
|
this task:
|
||||||
|
|
||||||
|
.. code-block:: sh
|
||||||
|
|
||||||
|
tmp=$(mktemp)
|
||||||
|
curl -s http://metadata/ssk_keys > "$tmp"
|
||||||
|
touch ~/.ssh/authorized_keys # ensure it exists
|
||||||
|
cat ~/.ssh/authorized_keys >> "$tmp"
|
||||||
|
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
||||||
|
|
||||||
|
|
||||||
|
Disk resize
|
||||||
|
~~~~~~~~~~~
|
||||||
|
In virtualised environments, the disk sizes might grow. The operating
|
||||||
|
system should detect disks that are bigger than the existing partition
|
||||||
|
table and resize accordingly. This task is os specific.
|
||||||
|
|
||||||
|
ucloud does not support shrinking disks due to the complexity and
|
||||||
|
intra OS dependencies.
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import multiprocessing as mp
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from os.path import join as join_path
|
|
||||||
from ucloud.sanity_checks import check
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
arg_parser = argparse.ArgumentParser(prog='ucloud',
|
|
||||||
description='Open Source Cloud Management Software')
|
|
||||||
arg_parser.add_argument('component',
|
|
||||||
choices=['api', 'scheduler', 'host',
|
|
||||||
'filescanner', 'imagescanner',
|
|
||||||
'metadata'])
|
|
||||||
arg_parser.add_argument('component_args', nargs='*')
|
|
||||||
args = arg_parser.parse_args()
|
|
||||||
|
|
||||||
logging.basicConfig(
|
|
||||||
level=logging.DEBUG,
|
|
||||||
filename=join_path("/", "etc", "ucloud", "log.txt"),
|
|
||||||
filemode="a",
|
|
||||||
format="%(name)s %(asctime)s: %(levelname)s - %(message)s",
|
|
||||||
datefmt="%d-%b-%y %H:%M:%S",
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
check()
|
|
||||||
|
|
||||||
if args.component == 'api':
|
|
||||||
from ucloud.api.main import main
|
|
||||||
|
|
||||||
main()
|
|
||||||
elif args.component == 'host':
|
|
||||||
from ucloud.host.main import main
|
|
||||||
|
|
||||||
hostname = args.component_args
|
|
||||||
mp.set_start_method('spawn')
|
|
||||||
main(*hostname)
|
|
||||||
elif args.component == 'scheduler':
|
|
||||||
from ucloud.scheduler.main import main
|
|
||||||
|
|
||||||
main()
|
|
||||||
elif args.component == 'filescanner':
|
|
||||||
from ucloud.filescanner.main import main
|
|
||||||
|
|
||||||
main()
|
|
||||||
elif args.component == 'imagescanner':
|
|
||||||
from ucloud.imagescanner.main import main
|
|
||||||
|
|
||||||
main()
|
|
||||||
elif args.component == 'metadata':
|
|
||||||
from ucloud.metadata.main import main
|
|
||||||
|
|
||||||
main()
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception(e)
|
|
||||||
print(e)
|
|
||||||
88
scripts/uncloud
Executable file
88
scripts/uncloud
Executable file
|
|
@ -0,0 +1,88 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import importlib
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
from etcd3.exceptions import ConnectionFailedError
|
||||||
|
|
||||||
|
from uncloud.common import settings
|
||||||
|
from uncloud import UncloudException
|
||||||
|
from uncloud.common.cli import resolve_otp_credentials
|
||||||
|
|
||||||
|
# Components that use etcd
|
||||||
|
ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
|
||||||
|
'imagescanner', 'metadata', 'configure', 'hack']
|
||||||
|
|
||||||
|
ALL_COMPONENTS = ETCD_COMPONENTS.copy()
|
||||||
|
ALL_COMPONENTS.append('oneshot')
|
||||||
|
#ALL_COMPONENTS.append('cli')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
arg_parser = argparse.ArgumentParser()
|
||||||
|
subparsers = arg_parser.add_subparsers(dest='command')
|
||||||
|
|
||||||
|
parent_parser = argparse.ArgumentParser(add_help=False)
|
||||||
|
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
|
||||||
|
help='More verbose logging')
|
||||||
|
parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
|
||||||
|
default=os.path.expanduser('~/uncloud'))
|
||||||
|
|
||||||
|
etcd_parser = argparse.ArgumentParser(add_help=False)
|
||||||
|
etcd_parser.add_argument('--etcd-host')
|
||||||
|
etcd_parser.add_argument('--etcd-port')
|
||||||
|
etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
|
||||||
|
etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
|
||||||
|
etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
|
||||||
|
|
||||||
|
for component in ALL_COMPONENTS:
|
||||||
|
mod = importlib.import_module('uncloud.{}.main'.format(component))
|
||||||
|
parser = getattr(mod, 'arg_parser')
|
||||||
|
|
||||||
|
if component in ETCD_COMPONENTS:
|
||||||
|
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
|
||||||
|
else:
|
||||||
|
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
|
||||||
|
|
||||||
|
arguments = vars(arg_parser.parse_args())
|
||||||
|
etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
|
||||||
|
etcd_arguments = {
|
||||||
|
'etcd': {
|
||||||
|
key.replace('etcd_', ''): arguments[key]
|
||||||
|
for key in etcd_arguments
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if not arguments['command']:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
# Initializing Settings and resolving otp_credentials
|
||||||
|
# It is neccessary to resolve_otp_credentials after argument parsing is done because
|
||||||
|
# previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
|
||||||
|
# providing the default values for --name, --realm and --seed arguments from the values
|
||||||
|
# we read from file. But, now we are asking user about where the config file lives. So,
|
||||||
|
# to providing default value is not possible before parsing arguments. So, we are doing
|
||||||
|
# it after..
|
||||||
|
# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
|
||||||
|
# resolve_otp_credentials(arguments)
|
||||||
|
|
||||||
|
name = arguments.pop('command')
|
||||||
|
mod = importlib.import_module('uncloud.{}.main'.format(name))
|
||||||
|
main = getattr(mod, 'main')
|
||||||
|
|
||||||
|
if arguments['debug']:
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
log = logging.getLogger()
|
||||||
|
|
||||||
|
try:
|
||||||
|
main(arguments)
|
||||||
|
except UncloudException as err:
|
||||||
|
log.error(err)
|
||||||
|
# except ConnectionFailedError as err:
|
||||||
|
# log.error('Cannot connect to etcd: {}'.format(err))
|
||||||
|
except Exception as err:
|
||||||
|
log.exception(err)
|
||||||
75
setup.py
75
setup.py
|
|
@ -1,35 +1,50 @@
|
||||||
|
import os
|
||||||
|
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
with open("README.md", "r") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
|
|
||||||
setup(name='ucloud',
|
try:
|
||||||
version='0.0.1',
|
import uncloud.version
|
||||||
description='All ucloud server components.',
|
|
||||||
url='https://code.ungleich.ch/ucloud/ucloud',
|
version = uncloud.version.VERSION
|
||||||
long_description=long_description,
|
except:
|
||||||
long_description_content_type='text/markdown',
|
import subprocess
|
||||||
classifiers=[
|
|
||||||
'Development Status :: 3 - Alpha',
|
c = subprocess.check_output(["git", "describe"])
|
||||||
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
|
version = c.decode("utf-8").strip()
|
||||||
'Programming Language :: Python :: 3'
|
|
||||||
],
|
|
||||||
author='ungleich',
|
setup(
|
||||||
author_email='technik@ungleich.ch',
|
name="uncloud",
|
||||||
packages=find_packages(),
|
version=version,
|
||||||
install_requires=[
|
description="uncloud cloud management",
|
||||||
'requests',
|
url="https://code.ungleich.ch/uncloud/uncloud",
|
||||||
'python-decouple',
|
long_description=long_description,
|
||||||
'flask',
|
long_description_content_type="text/markdown",
|
||||||
'flask-restful',
|
classifiers=[
|
||||||
'bitmath',
|
"Development Status :: 3 - Alpha",
|
||||||
'pyotp',
|
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||||
'sshtunnel',
|
"Programming Language :: Python :: 3",
|
||||||
'sphinx',
|
],
|
||||||
'pynetbox',
|
author="ungleich",
|
||||||
'sphinx-rtd-theme',
|
author_email="technik@ungleich.ch",
|
||||||
'etcd3_wrapper @ https://code.ungleich.ch/ungleich-public/etcd3_wrapper/repository/master/archive.tar.gz#egg=etcd3_wrapper',
|
packages=find_packages(),
|
||||||
'etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3',
|
install_requires=[
|
||||||
],
|
"requests",
|
||||||
scripts=['scripts/ucloud'],
|
"Flask>=1.1.1",
|
||||||
zip_safe=False)
|
"flask-restful",
|
||||||
|
"bitmath",
|
||||||
|
"pyotp",
|
||||||
|
"pynetbox",
|
||||||
|
"colorama",
|
||||||
|
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
|
||||||
|
"marshmallow"
|
||||||
|
],
|
||||||
|
scripts=["scripts/uncloud"],
|
||||||
|
data_files=[
|
||||||
|
(os.path.expanduser("~/uncloud/"), ["conf/uncloud.conf"])
|
||||||
|
],
|
||||||
|
zip_safe=False,
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from ucloud.config import etcd_client, env_vars
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"is_public": True,
|
|
||||||
"type": "ceph",
|
|
||||||
"name": "images",
|
|
||||||
"description": "first ever public image-store",
|
|
||||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
|
||||||
}
|
|
||||||
|
|
||||||
etcd_client.put(os.path.join(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
|
|
||||||
|
|
@ -1,517 +0,0 @@
|
||||||
import json
|
|
||||||
import pynetbox
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
from flask import Flask, request
|
|
||||||
from flask_restful import Resource, Api
|
|
||||||
|
|
||||||
from ucloud.common import counters
|
|
||||||
from ucloud.common.vm import VMStatus
|
|
||||||
from ucloud.common.request import RequestEntry, RequestType
|
|
||||||
from ucloud.config import (etcd_client, request_pool, vm_pool, host_pool, env_vars, image_storage_handler)
|
|
||||||
from . import schemas
|
|
||||||
from .helper import generate_mac, mac2ipv6
|
|
||||||
from . import logger
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
api = Api(app)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateVM(Resource):
|
|
||||||
"""API Request to Handle Creation of VM"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateVMSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
vm_uuid = uuid4().hex
|
|
||||||
vm_key = join_path(env_vars.get("VM_PREFIX"), vm_uuid)
|
|
||||||
specs = {
|
|
||||||
"cpu": validator.specs["cpu"],
|
|
||||||
"ram": validator.specs["ram"],
|
|
||||||
"os-ssd": validator.specs["os-ssd"],
|
|
||||||
"hdd": validator.specs["hdd"],
|
|
||||||
}
|
|
||||||
macs = [generate_mac() for _ in range(len(data["network"]))]
|
|
||||||
tap_ids = [counters.increment_etcd_counter(etcd_client, "/v1/counter/tap")
|
|
||||||
for _ in range(len(data["network"]))]
|
|
||||||
vm_entry = {
|
|
||||||
"name": data["vm_name"],
|
|
||||||
"owner": data["name"],
|
|
||||||
"owner_realm": data["realm"],
|
|
||||||
"specs": specs,
|
|
||||||
"hostname": "",
|
|
||||||
"status": VMStatus.stopped,
|
|
||||||
"image_uuid": validator.image_uuid,
|
|
||||||
"log": [],
|
|
||||||
"vnc_socket": "",
|
|
||||||
"network": list(zip(data["network"], macs, tap_ids)),
|
|
||||||
"metadata": {"ssh-keys": []},
|
|
||||||
}
|
|
||||||
etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
|
||||||
|
|
||||||
# Create ScheduleVM Request
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.ScheduleVM, uuid=vm_uuid,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX")
|
|
||||||
)
|
|
||||||
request_pool.put(r)
|
|
||||||
|
|
||||||
return {"message": "VM Creation Queued"}, 200
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class VmStatus(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VMStatusSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
vm = vm_pool.get(
|
|
||||||
join_path(env_vars.get("VM_PREFIX"), data["uuid"])
|
|
||||||
)
|
|
||||||
vm_value = vm.value.copy()
|
|
||||||
vm_value["ip"] = []
|
|
||||||
for network_mac_and_tap in vm.network:
|
|
||||||
network_name, mac, tap = network_mac_and_tap
|
|
||||||
network = etcd_client.get(
|
|
||||||
join_path(
|
|
||||||
env_vars.get("NETWORK_PREFIX"),
|
|
||||||
data["name"],
|
|
||||||
network_name,
|
|
||||||
),
|
|
||||||
value_in_json=True,
|
|
||||||
)
|
|
||||||
ipv6_addr = network.value.get("ipv6").split("::")[0] + "::"
|
|
||||||
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
|
|
||||||
vm.value = vm_value
|
|
||||||
return vm.value
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateImage(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateImageSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
file_entry = etcd_client.get(
|
|
||||||
join_path(env_vars.get("FILE_PREFIX"), data["uuid"])
|
|
||||||
)
|
|
||||||
file_entry_value = json.loads(file_entry.value)
|
|
||||||
|
|
||||||
image_entry_json = {
|
|
||||||
"status": "TO_BE_CREATED",
|
|
||||||
"owner": file_entry_value["owner"],
|
|
||||||
"filename": file_entry_value["filename"],
|
|
||||||
"name": data["name"],
|
|
||||||
"store_name": data["image_store"],
|
|
||||||
"visibility": "public",
|
|
||||||
}
|
|
||||||
etcd_client.put(
|
|
||||||
join_path(env_vars.get("IMAGE_PREFIX"), data["uuid"]),
|
|
||||||
json.dumps(image_entry_json),
|
|
||||||
)
|
|
||||||
|
|
||||||
return {"message": "Image queued for creation."}
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListPublicImages(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
images = etcd_client.get_prefix(
|
|
||||||
env_vars.get("IMAGE_PREFIX"), value_in_json=True
|
|
||||||
)
|
|
||||||
r = {
|
|
||||||
"images": []
|
|
||||||
}
|
|
||||||
for image in images:
|
|
||||||
image_key = "{}:{}".format(
|
|
||||||
image.value["store_name"], image.value["name"]
|
|
||||||
)
|
|
||||||
r["images"].append(
|
|
||||||
{"name": image_key, "status": image.value["status"]}
|
|
||||||
)
|
|
||||||
return r, 200
|
|
||||||
|
|
||||||
|
|
||||||
class VMAction(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VmActionSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vm_entry = vm_pool.get(
|
|
||||||
join_path(env_vars.get("VM_PREFIX"), data["uuid"])
|
|
||||||
)
|
|
||||||
action = data["action"]
|
|
||||||
|
|
||||||
if action == "start":
|
|
||||||
action = "schedule"
|
|
||||||
|
|
||||||
if action == "delete" and vm_entry.hostname == "":
|
|
||||||
if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
|
|
||||||
r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
|
|
||||||
if r_status:
|
|
||||||
etcd_client.client.delete(vm_entry.key)
|
|
||||||
return {"message": "VM successfully deleted"}
|
|
||||||
else:
|
|
||||||
logger.error("Some Error Occurred while deleting VM")
|
|
||||||
return {"message": "VM deletion unsuccessfull"}
|
|
||||||
else:
|
|
||||||
etcd_client.client.delete(vm_entry.key)
|
|
||||||
return {"message": "VM successfully deleted"}
|
|
||||||
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type="{}VM".format(action.title()),
|
|
||||||
uuid=data["uuid"],
|
|
||||||
hostname=vm_entry.hostname,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX")
|
|
||||||
)
|
|
||||||
request_pool.put(r)
|
|
||||||
return {"message": "VM {} Queued".format(action.title())}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class VMMigration(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VmMigrationSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vm = vm_pool.get(data["uuid"])
|
|
||||||
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.ScheduleVM,
|
|
||||||
uuid=vm.uuid,
|
|
||||||
destination=join_path(
|
|
||||||
env_vars.get("HOST_PREFIX"), validator.destination.value
|
|
||||||
),
|
|
||||||
migration=True,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX")
|
|
||||||
)
|
|
||||||
request_pool.put(r)
|
|
||||||
return {"message": "VM Migration Initialization Queued"}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserVM(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vms = etcd_client.get_prefix(
|
|
||||||
env_vars.get("VM_PREFIX"), value_in_json=True
|
|
||||||
)
|
|
||||||
return_vms = []
|
|
||||||
user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
|
|
||||||
for vm in user_vms:
|
|
||||||
return_vms.append(
|
|
||||||
{
|
|
||||||
"name": vm.value["name"],
|
|
||||||
"vm_uuid": vm.key.split("/")[-1],
|
|
||||||
"specs": vm.value["specs"],
|
|
||||||
"status": vm.value["status"],
|
|
||||||
"hostname": vm.value["hostname"],
|
|
||||||
# "mac": vm.value["mac"],
|
|
||||||
"vnc_socket": None
|
|
||||||
if vm.value.get("vnc_socket", None) is None
|
|
||||||
else vm.value["vnc_socket"],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if return_vms:
|
|
||||||
return {"message": return_vms}, 200
|
|
||||||
return {"message": "No VM found"}, 404
|
|
||||||
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserFiles(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
files = etcd_client.get_prefix(
|
|
||||||
env_vars.get("FILE_PREFIX"), value_in_json=True
|
|
||||||
)
|
|
||||||
return_files = []
|
|
||||||
user_files = list(
|
|
||||||
filter(lambda f: f.value["owner"] == data["name"], files)
|
|
||||||
)
|
|
||||||
for file in user_files:
|
|
||||||
return_files.append(
|
|
||||||
{
|
|
||||||
"filename": file.value["filename"],
|
|
||||||
"uuid": file.key.split("/")[-1],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return {"message": return_files}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateHost(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateHostSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
host_key = join_path(env_vars.get("HOST_PREFIX"), uuid4().hex)
|
|
||||||
host_entry = {
|
|
||||||
"specs": data["specs"],
|
|
||||||
"hostname": data["hostname"],
|
|
||||||
"status": "DEAD",
|
|
||||||
"last_heartbeat": "",
|
|
||||||
}
|
|
||||||
etcd_client.put(host_key, host_entry, value_in_json=True)
|
|
||||||
|
|
||||||
return {"message": "Host Created"}, 200
|
|
||||||
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListHost(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
hosts = host_pool.hosts
|
|
||||||
r = {
|
|
||||||
host.key: {
|
|
||||||
"status": host.status,
|
|
||||||
"specs": host.specs,
|
|
||||||
"hostname": host.hostname,
|
|
||||||
}
|
|
||||||
for host in hosts
|
|
||||||
}
|
|
||||||
return r, 200
|
|
||||||
|
|
||||||
|
|
||||||
class GetSSHKeys(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.GetSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
if not validator.key_name.value:
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/
|
|
||||||
etcd_key = join_path(
|
|
||||||
env_vars.get('USER_PREFIX'),
|
|
||||||
data["realm"],
|
|
||||||
data["name"],
|
|
||||||
"key",
|
|
||||||
)
|
|
||||||
etcd_entry = etcd_client.get_prefix(
|
|
||||||
etcd_key, value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
keys = {
|
|
||||||
key.key.split("/")[-1]: key.value for key in etcd_entry
|
|
||||||
}
|
|
||||||
return {"keys": keys}
|
|
||||||
else:
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
env_vars.get('USER_PREFIX'),
|
|
||||||
data["realm"],
|
|
||||||
data["name"],
|
|
||||||
"key",
|
|
||||||
data["key_name"],
|
|
||||||
)
|
|
||||||
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
|
||||||
|
|
||||||
if etcd_entry:
|
|
||||||
return {
|
|
||||||
"keys": {
|
|
||||||
etcd_entry.key.split("/")[-1]: etcd_entry.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
return {"keys": {}}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class AddSSHKey(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.AddSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
env_vars.get("USER_PREFIX"),
|
|
||||||
data["realm"],
|
|
||||||
data["name"],
|
|
||||||
"key",
|
|
||||||
data["key_name"],
|
|
||||||
)
|
|
||||||
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
|
||||||
if etcd_entry:
|
|
||||||
return {
|
|
||||||
"message": "Key with name '{}' already exists".format(
|
|
||||||
data["key_name"]
|
|
||||||
)
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
# Key Not Found. It implies user' haven't added any key yet.
|
|
||||||
etcd_client.put(etcd_key, data["key"], value_in_json=True)
|
|
||||||
return {"message": "Key added successfully"}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class RemoveSSHKey(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.RemoveSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
env_vars.get("USER_PREFIX"),
|
|
||||||
data["realm"],
|
|
||||||
data["name"],
|
|
||||||
"key",
|
|
||||||
data["key_name"],
|
|
||||||
)
|
|
||||||
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
|
||||||
if etcd_entry:
|
|
||||||
etcd_client.client.delete(etcd_key)
|
|
||||||
return {"message": "Key successfully removed."}
|
|
||||||
else:
|
|
||||||
return {
|
|
||||||
"message": "No Key with name '{}' Exists at all.".format(
|
|
||||||
data["key_name"]
|
|
||||||
)
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateNetwork(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateNetwork(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
network_entry = {
|
|
||||||
"id": counters.increment_etcd_counter(
|
|
||||||
etcd_client, "/v1/counter/vxlan"
|
|
||||||
),
|
|
||||||
"type": data["type"],
|
|
||||||
}
|
|
||||||
if validator.user.value:
|
|
||||||
nb = pynetbox.api(
|
|
||||||
url=env_vars.get("NETBOX_URL"),
|
|
||||||
token=env_vars.get("NETBOX_TOKEN"),
|
|
||||||
)
|
|
||||||
nb_prefix = nb.ipam.prefixes.get(
|
|
||||||
prefix=env_vars.get("PREFIX")
|
|
||||||
)
|
|
||||||
|
|
||||||
prefix = nb_prefix.available_prefixes.create(
|
|
||||||
data={
|
|
||||||
"prefix_length": env_vars.get(
|
|
||||||
"PREFIX_LENGTH", cast=int
|
|
||||||
),
|
|
||||||
"description": '{}\'s network "{}"'.format(
|
|
||||||
data["name"], data["network_name"]
|
|
||||||
),
|
|
||||||
"is_pool": True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
network_entry["ipv6"] = prefix["prefix"]
|
|
||||||
else:
|
|
||||||
network_entry["ipv6"] = "fd00::/64"
|
|
||||||
|
|
||||||
network_key = join_path(
|
|
||||||
env_vars.get("NETWORK_PREFIX"),
|
|
||||||
data["name"],
|
|
||||||
data["network_name"],
|
|
||||||
)
|
|
||||||
etcd_client.put(network_key, network_entry, value_in_json=True)
|
|
||||||
return {"message": "Network successfully added."}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserNetwork(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
prefix = join_path(
|
|
||||||
env_vars.get("NETWORK_PREFIX"), data["name"]
|
|
||||||
)
|
|
||||||
networks = etcd_client.get_prefix(prefix, value_in_json=True)
|
|
||||||
user_networks = []
|
|
||||||
for net in networks:
|
|
||||||
net.value["name"] = net.key.split("/")[-1]
|
|
||||||
user_networks.append(net.value)
|
|
||||||
return {"networks": user_networks}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(CreateVM, "/vm/create")
|
|
||||||
api.add_resource(VmStatus, "/vm/status")
|
|
||||||
|
|
||||||
api.add_resource(VMAction, "/vm/action")
|
|
||||||
api.add_resource(VMMigration, "/vm/migrate")
|
|
||||||
|
|
||||||
api.add_resource(CreateImage, "/image/create")
|
|
||||||
api.add_resource(ListPublicImages, "/image/list-public")
|
|
||||||
|
|
||||||
api.add_resource(ListUserVM, "/user/vms")
|
|
||||||
api.add_resource(ListUserFiles, "/user/files")
|
|
||||||
api.add_resource(ListUserNetwork, "/user/networks")
|
|
||||||
|
|
||||||
api.add_resource(AddSSHKey, "/user/add-ssh")
|
|
||||||
api.add_resource(RemoveSSHKey, "/user/remove-ssh")
|
|
||||||
api.add_resource(GetSSHKeys, "/user/get-ssh")
|
|
||||||
|
|
||||||
api.add_resource(CreateHost, "/host/create")
|
|
||||||
api.add_resource(ListHost, "/host/list")
|
|
||||||
|
|
||||||
api.add_resource(CreateNetwork, "/network/create")
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True))
|
|
||||||
if len(image_stores) == 0:
|
|
||||||
data = {
|
|
||||||
"is_public": True,
|
|
||||||
"type": "ceph",
|
|
||||||
"name": "images",
|
|
||||||
"description": "first ever public image-store",
|
|
||||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
|
||||||
}
|
|
||||||
|
|
||||||
etcd_client.put(join_path(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
|
|
||||||
|
|
||||||
app.run(host="::", debug=True)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
||||||
import logging
|
|
||||||
import socket
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ipaddress import ip_address
|
|
||||||
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
|
|
||||||
def create_package_loggers(packages, base_path, mode="a"):
|
|
||||||
loggers = {}
|
|
||||||
for pkg in packages:
|
|
||||||
logger = logging.getLogger(pkg)
|
|
||||||
logger_handler = logging.FileHandler(
|
|
||||||
join_path(base_path, "{}.txt".format(pkg)),
|
|
||||||
mode=mode
|
|
||||||
)
|
|
||||||
logger.setLevel(logging.DEBUG)
|
|
||||||
logger_handler.setFormatter(logging.Formatter(fmt="%(asctime)s: %(levelname)s - %(message)s",
|
|
||||||
datefmt="%d-%b-%y %H:%M:%S"))
|
|
||||||
logger.addHandler(logger_handler)
|
|
||||||
loggers[pkg] = logger
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Should be removed as soon as migration
|
|
||||||
# mechanism is finalized inside ucloud
|
|
||||||
def get_ipv4_address():
|
|
||||||
# If host is connected to internet
|
|
||||||
# Return IPv4 address of machine
|
|
||||||
# Otherwise, return 127.0.0.1
|
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
|
||||||
try:
|
|
||||||
s.connect(("8.8.8.8", 80))
|
|
||||||
except socket.timeout:
|
|
||||||
address = "127.0.0.1"
|
|
||||||
except Exception as e:
|
|
||||||
logging.getLogger().exception(e)
|
|
||||||
address = "127.0.0.1"
|
|
||||||
else:
|
|
||||||
address = s.getsockname()[0]
|
|
||||||
|
|
||||||
return address
|
|
||||||
|
|
||||||
|
|
||||||
def get_ipv6_address():
|
|
||||||
try:
|
|
||||||
r = requests.get("https://api6.ipify.org?format=json")
|
|
||||||
content = json.loads(r.content.decode("utf-8"))
|
|
||||||
ip = ip_address(content["ip"]).exploded
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception(e)
|
|
||||||
else:
|
|
||||||
return ip
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
from etcd3_wrapper import Etcd3Wrapper
|
|
||||||
|
|
||||||
from ucloud.common.host import HostPool
|
|
||||||
from ucloud.common.request import RequestPool
|
|
||||||
from ucloud.common.vm import VmPool
|
|
||||||
from ucloud.common.storage_handlers import FileSystemBasedImageStorageHandler, CEPHBasedImageStorageHandler
|
|
||||||
from decouple import Config, RepositoryEnv, RepositoryEmpty
|
|
||||||
|
|
||||||
|
|
||||||
# Try importing config, but don't fail if it does not exist
|
|
||||||
try:
|
|
||||||
env_vars = Config(RepositoryEnv('/etc/ucloud/ucloud.conf'))
|
|
||||||
except FileNotFoundError:
|
|
||||||
env_vars = Config(RepositoryEmpty())
|
|
||||||
|
|
||||||
|
|
||||||
etcd_wrapper_args = ()
|
|
||||||
etcd_wrapper_kwargs = {
|
|
||||||
'host': env_vars.get('ETCD_URL', 'localhost'),
|
|
||||||
'port': env_vars.get('ETCD_PORT', 2379),
|
|
||||||
'ca_cert': env_vars.get('CA_CERT', None),
|
|
||||||
'cert_cert': env_vars.get('CERT_CERT', None),
|
|
||||||
'cert_key': env_vars.get('CERT_KEY', None)
|
|
||||||
}
|
|
||||||
|
|
||||||
etcd_client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
|
|
||||||
|
|
||||||
host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
|
|
||||||
vm_pool = VmPool(etcd_client, env_vars.get('VM_PREFIX'))
|
|
||||||
request_pool = RequestPool(etcd_client, env_vars.get('REQUEST_PREFIX'))
|
|
||||||
|
|
||||||
running_vms = []
|
|
||||||
|
|
||||||
__storage_backend = env_vars.get("STORAGE_BACKEND")
|
|
||||||
if __storage_backend == "filesystem":
|
|
||||||
image_storage_handler = FileSystemBasedImageStorageHandler(vm_base=env_vars.get("VM_DIR"),
|
|
||||||
image_base=env_vars.get("IMAGE_DIR"))
|
|
||||||
elif __storage_backend == "ceph":
|
|
||||||
image_storage_handler = CEPHBasedImageStorageHandler(vm_base="ssd", image_base="ssd")
|
|
||||||
else:
|
|
||||||
raise Exception("Unknown Image Storage Handler")
|
|
||||||
|
|
@ -1,126 +0,0 @@
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import subprocess as sp
|
|
||||||
import time
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from . import logger
|
|
||||||
from ucloud.config import env_vars, etcd_client
|
|
||||||
|
|
||||||
|
|
||||||
def getxattr(file, attr):
|
|
||||||
"""Get specified user extended attribute (arg:attr) of a file (arg:file)"""
|
|
||||||
try:
|
|
||||||
attr = "user." + attr
|
|
||||||
value = sp.check_output(['getfattr', file,
|
|
||||||
'--name', attr,
|
|
||||||
'--only-values',
|
|
||||||
'--absolute-names'], stderr=sp.DEVNULL)
|
|
||||||
value = value.decode("utf-8")
|
|
||||||
except sp.CalledProcessError as e:
|
|
||||||
logger.exception(e)
|
|
||||||
value = None
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
def setxattr(file, attr, value):
|
|
||||||
"""Set specified user extended attribute (arg:attr) equal to (arg:value)
|
|
||||||
of a file (arg:file)"""
|
|
||||||
|
|
||||||
attr = "user." + attr
|
|
||||||
sp.check_output(['setfattr', file,
|
|
||||||
'--name', attr,
|
|
||||||
'--value', str(value)])
|
|
||||||
|
|
||||||
|
|
||||||
def sha512sum(file: str):
|
|
||||||
"""Use sha512sum utility to compute sha512 sum of arg:file
|
|
||||||
|
|
||||||
IF arg:file does not exists:
|
|
||||||
raise FileNotFoundError exception
|
|
||||||
ELSE IF sum successfully computer:
|
|
||||||
return computed sha512 sum
|
|
||||||
ELSE:
|
|
||||||
return None
|
|
||||||
"""
|
|
||||||
if not isinstance(file, str): raise TypeError
|
|
||||||
try:
|
|
||||||
output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
|
|
||||||
except sp.CalledProcessError as e:
|
|
||||||
error = e.stderr.decode("utf-8")
|
|
||||||
if "No such file or directory" in error:
|
|
||||||
raise FileNotFoundError from None
|
|
||||||
else:
|
|
||||||
output = output.decode("utf-8").strip()
|
|
||||||
output = output.split(" ")
|
|
||||||
return output[0]
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
sp.check_output(['which', 'getfattr'])
|
|
||||||
sp.check_output(['which', 'setfattr'])
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
print('Make sure you have getfattr and setfattr available')
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
BASE_DIR = env_vars.get("BASE_DIR")
|
|
||||||
|
|
||||||
FILE_PREFIX = env_vars.get("FILE_PREFIX")
|
|
||||||
|
|
||||||
# Recursively Get All Files and Folder below BASE_DIR
|
|
||||||
files = glob.glob("{}/**".format(BASE_DIR), recursive=True)
|
|
||||||
|
|
||||||
# Retain only Files
|
|
||||||
files = list(filter(os.path.isfile, files))
|
|
||||||
|
|
||||||
untracked_files = list(
|
|
||||||
filter(lambda f: not bool(getxattr(f, "user.utracked")), files)
|
|
||||||
)
|
|
||||||
|
|
||||||
tracked_files = list(
|
|
||||||
filter(lambda f: f not in untracked_files, files)
|
|
||||||
)
|
|
||||||
for file in untracked_files:
|
|
||||||
file_id = uuid4()
|
|
||||||
|
|
||||||
# Get Username
|
|
||||||
owner = pathlib.Path(file).parts[3]
|
|
||||||
# Get Creation Date of File
|
|
||||||
# Here, we are assuming that ctime is creation time
|
|
||||||
# which is mostly not true.
|
|
||||||
creation_date = time.ctime(os.stat(file).st_ctime)
|
|
||||||
|
|
||||||
# Get File Size
|
|
||||||
size = os.path.getsize(file)
|
|
||||||
|
|
||||||
# Compute sha512 sum
|
|
||||||
sha_sum = sha512sum(file)
|
|
||||||
|
|
||||||
# File Path excluding base and username
|
|
||||||
file_path = pathlib.Path(file).parts[4:]
|
|
||||||
file_path = os.path.join(*file_path)
|
|
||||||
|
|
||||||
# Create Entry
|
|
||||||
entry_key = os.path.join(FILE_PREFIX, str(file_id))
|
|
||||||
entry_value = {
|
|
||||||
"filename": file_path,
|
|
||||||
"owner": owner,
|
|
||||||
"sha512sum": sha_sum,
|
|
||||||
"creation_date": creation_date,
|
|
||||||
"size": size
|
|
||||||
}
|
|
||||||
|
|
||||||
print("Tracking {}".format(file))
|
|
||||||
# Insert Entry
|
|
||||||
etcd_client.put(entry_key, entry_value, value_in_json=True)
|
|
||||||
setxattr(file, "user.utracked", True)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
import socket
|
|
||||||
from contextlib import closing
|
|
||||||
|
|
||||||
|
|
||||||
def find_free_port():
|
|
||||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
|
||||||
try:
|
|
||||||
s.bind(('', 0))
|
|
||||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
except Exception:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return s.getsockname()[1]
|
|
||||||
|
|
@ -1,143 +0,0 @@
|
||||||
import argparse
|
|
||||||
import multiprocessing as mp
|
|
||||||
import time
|
|
||||||
|
|
||||||
from etcd3_wrapper import Etcd3Wrapper
|
|
||||||
|
|
||||||
from ucloud.common.request import RequestEntry, RequestType
|
|
||||||
from ucloud.config import (vm_pool, request_pool,
|
|
||||||
etcd_client, running_vms,
|
|
||||||
etcd_wrapper_args, etcd_wrapper_kwargs,
|
|
||||||
HostPool, env_vars)
|
|
||||||
|
|
||||||
from .helper import find_free_port
|
|
||||||
from . import virtualmachine
|
|
||||||
from ucloud.host import logger
|
|
||||||
|
|
||||||
|
|
||||||
def update_heartbeat(hostname):
|
|
||||||
"""Update Last HeartBeat Time for :param hostname: in etcd"""
|
|
||||||
client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
|
|
||||||
host_pool = HostPool(client, env_vars.get('HOST_PREFIX'))
|
|
||||||
this_host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
this_host.update_heartbeat()
|
|
||||||
host_pool.put(this_host)
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
|
|
||||||
def maintenance(host):
|
|
||||||
# To capture vm running according to running_vms list
|
|
||||||
|
|
||||||
# This is to capture successful migration of a VM.
|
|
||||||
# Suppose, this host is running "vm1" and user initiated
|
|
||||||
# request to migrate this "vm1" to some other host. On,
|
|
||||||
# successful migration the destination host would set
|
|
||||||
# the vm hostname to itself. Thus, we are checking
|
|
||||||
# whether this host vm is successfully migrated. If yes
|
|
||||||
# then we shutdown "vm1" on this host.
|
|
||||||
|
|
||||||
to_be_removed = []
|
|
||||||
for running_vm in running_vms:
|
|
||||||
with vm_pool.get_put(running_vm.key) as vm_entry:
|
|
||||||
if vm_entry.hostname != host.key and not vm_entry.in_migration:
|
|
||||||
running_vm.handle.shutdown()
|
|
||||||
logger.info("VM migration not completed successfully.")
|
|
||||||
to_be_removed.append(running_vm)
|
|
||||||
|
|
||||||
for r in to_be_removed:
|
|
||||||
running_vms.remove(r)
|
|
||||||
|
|
||||||
# To check vm running according to etcd entries
|
|
||||||
alleged_running_vms = vm_pool.by_status("RUNNING", vm_pool.by_host(host.key))
|
|
||||||
|
|
||||||
for vm_entry in alleged_running_vms:
|
|
||||||
_vm = virtualmachine.get_vm(running_vms, vm_entry.key)
|
|
||||||
# Whether, the allegedly running vm is in our
|
|
||||||
# running_vms list or not if it is said to be
|
|
||||||
# running on this host but it is not then we
|
|
||||||
# need to shut it down
|
|
||||||
|
|
||||||
# This is to capture poweroff/shutdown of a VM
|
|
||||||
# initiated by user inside VM. OR crash of VM by some
|
|
||||||
# user running process
|
|
||||||
if (_vm and not _vm.handle.is_running()) or not _vm:
|
|
||||||
logger.debug("_vm = %s, is_running() = %s" % (_vm, _vm.handle.is_running()))
|
|
||||||
vm_entry.add_log("""{} is not running but is said to be running.
|
|
||||||
So, shutting it down and declare it killed""".format(vm_entry.key))
|
|
||||||
vm_entry.declare_killed()
|
|
||||||
vm_pool.put(vm_entry)
|
|
||||||
if _vm:
|
|
||||||
running_vms.remove(_vm)
|
|
||||||
|
|
||||||
|
|
||||||
def main(hostname):
|
|
||||||
heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
|
|
||||||
|
|
||||||
host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
|
|
||||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
|
||||||
assert host is not None, "No such host with name = {}".format(hostname)
|
|
||||||
|
|
||||||
try:
|
|
||||||
heartbeat_updating_process.start()
|
|
||||||
except Exception as e:
|
|
||||||
logger.info("No Need To Go Further. Our heartbeat updating mechanism is not working")
|
|
||||||
logger.exception(e)
|
|
||||||
exit(-1)
|
|
||||||
|
|
||||||
logger.info("%s Session Started %s", '*' * 5, '*' * 5)
|
|
||||||
|
|
||||||
# It is seen that under heavy load, timeout event doesn't come
|
|
||||||
# in a predictive manner (which is intentional because we give
|
|
||||||
# higher priority to customer's requests) which delays heart
|
|
||||||
# beat update which in turn misunderstood by scheduler that the
|
|
||||||
# host is dead when it is actually alive. So, to ensure that we
|
|
||||||
# update the heart beat in a predictive manner we start Heart
|
|
||||||
# beat updating mechanism in separated thread
|
|
||||||
|
|
||||||
for events_iterator in [
|
|
||||||
etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
|
|
||||||
etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=10, value_in_json=True),
|
|
||||||
]:
|
|
||||||
for request_event in events_iterator:
|
|
||||||
request_event = RequestEntry(request_event)
|
|
||||||
|
|
||||||
if request_event.type == "TIMEOUT":
|
|
||||||
maintenance(host)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If the event is directed toward me OR I am destination of a InitVMMigration
|
|
||||||
if request_event.hostname == host.key or request_event.destination == host.key:
|
|
||||||
logger.debug("VM Request: %s", request_event)
|
|
||||||
|
|
||||||
request_pool.client.client.delete(request_event.key)
|
|
||||||
vm_entry = vm_pool.get(request_event.uuid)
|
|
||||||
|
|
||||||
if vm_entry:
|
|
||||||
if request_event.type == RequestType.StartVM:
|
|
||||||
virtualmachine.start(vm_entry)
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.StopVM:
|
|
||||||
virtualmachine.stop(vm_entry)
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.DeleteVM:
|
|
||||||
virtualmachine.delete(vm_entry)
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.InitVMMigration:
|
|
||||||
virtualmachine.start(vm_entry, host.key, find_free_port())
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.TransferVM:
|
|
||||||
virtualmachine.transfer(request_event)
|
|
||||||
else:
|
|
||||||
logger.info("VM Entry missing")
|
|
||||||
|
|
||||||
logger.info("Running VMs %s", running_vms)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
argparser = argparse.ArgumentParser()
|
|
||||||
argparser.add_argument("hostname", help="Name of this host. e.g /v1/host/1")
|
|
||||||
args = argparser.parse_args()
|
|
||||||
mp.set_start_method('spawn')
|
|
||||||
main(args.hostname)
|
|
||||||
|
|
@ -1,537 +0,0 @@
|
||||||
# QEMU library
|
|
||||||
#
|
|
||||||
# Copyright (C) 2015-2016 Red Hat Inc.
|
|
||||||
# Copyright (C) 2012 IBM Corp.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Fam Zheng <famz@redhat.com>
|
|
||||||
#
|
|
||||||
# This work is licensed under the terms of the GNU GPL, version 2. See
|
|
||||||
# the COPYING file in the top-level directory.
|
|
||||||
#
|
|
||||||
# Based on qmp.py.
|
|
||||||
#
|
|
||||||
|
|
||||||
import errno
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from . import qmp
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Mapping host architecture to any additional architectures it can
|
|
||||||
# support which often includes its 32 bit cousin.
|
|
||||||
ADDITIONAL_ARCHES = {
|
|
||||||
"x86_64": "i386",
|
|
||||||
"aarch64": "armhf"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def kvm_available(target_arch=None):
|
|
||||||
host_arch = os.uname()[4]
|
|
||||||
if target_arch and target_arch != host_arch:
|
|
||||||
if target_arch != ADDITIONAL_ARCHES.get(host_arch):
|
|
||||||
return False
|
|
||||||
return os.access("/dev/kvm", os.R_OK | os.W_OK)
|
|
||||||
|
|
||||||
|
|
||||||
class QEMUMachineError(Exception):
|
|
||||||
"""
|
|
||||||
Exception called when an error in QEMUMachine happens.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class QEMUMachineAddDeviceError(QEMUMachineError):
|
|
||||||
"""
|
|
||||||
Exception raised when a request to add a device can not be fulfilled
|
|
||||||
|
|
||||||
The failures are caused by limitations, lack of information or conflicting
|
|
||||||
requests on the QEMUMachine methods. This exception does not represent
|
|
||||||
failures reported by the QEMU binary itself.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class MonitorResponseError(qmp.QMPError):
|
|
||||||
"""
|
|
||||||
Represents erroneous QMP monitor reply
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, reply):
|
|
||||||
try:
|
|
||||||
desc = reply["error"]["desc"]
|
|
||||||
except KeyError:
|
|
||||||
desc = reply
|
|
||||||
super(MonitorResponseError, self).__init__(desc)
|
|
||||||
self.reply = reply
|
|
||||||
|
|
||||||
|
|
||||||
class QEMUMachine(object):
|
|
||||||
"""
|
|
||||||
A QEMU VM
|
|
||||||
|
|
||||||
Use this object as a context manager to ensure the QEMU process terminates::
|
|
||||||
|
|
||||||
with VM(binary) as vm:
|
|
||||||
...
|
|
||||||
# vm is guaranteed to be shut down here
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, binary, args=None, wrapper=None, name=None,
|
|
||||||
test_dir="/var/tmp", monitor_address=None,
|
|
||||||
socket_scm_helper=None):
|
|
||||||
'''
|
|
||||||
Initialize a QEMUMachine
|
|
||||||
|
|
||||||
@param binary: path to the qemu binary
|
|
||||||
@param args: list of extra arguments
|
|
||||||
@param wrapper: list of arguments used as prefix to qemu binary
|
|
||||||
@param name: prefix for socket and log file names (default: qemu-PID)
|
|
||||||
@param test_dir: where to create socket and log file
|
|
||||||
@param monitor_address: address for QMP monitor
|
|
||||||
@param socket_scm_helper: helper program, required for send_fd_scm()
|
|
||||||
@note: Qemu process is not started until launch() is used.
|
|
||||||
'''
|
|
||||||
if args is None:
|
|
||||||
args = []
|
|
||||||
if wrapper is None:
|
|
||||||
wrapper = []
|
|
||||||
if name is None:
|
|
||||||
name = "qemu-%d" % os.getpid()
|
|
||||||
self._name = name
|
|
||||||
self._monitor_address = monitor_address
|
|
||||||
self._vm_monitor = None
|
|
||||||
self._qemu_log_path = None
|
|
||||||
self._qemu_log_file = None
|
|
||||||
self._popen = None
|
|
||||||
self._binary = binary
|
|
||||||
self._args = list(args) # Force copy args in case we modify them
|
|
||||||
self._wrapper = wrapper
|
|
||||||
self._events = []
|
|
||||||
self._iolog = None
|
|
||||||
self._socket_scm_helper = socket_scm_helper
|
|
||||||
self._qmp = None
|
|
||||||
self._qemu_full_args = None
|
|
||||||
self._test_dir = test_dir
|
|
||||||
self._temp_dir = None
|
|
||||||
self._launched = False
|
|
||||||
self._machine = None
|
|
||||||
self._console_set = False
|
|
||||||
self._console_device_type = None
|
|
||||||
self._console_address = None
|
|
||||||
self._console_socket = None
|
|
||||||
|
|
||||||
# just in case logging wasn't configured by the main script:
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
self.shutdown()
|
|
||||||
return False
|
|
||||||
|
|
||||||
# This can be used to add an unused monitor instance.
|
|
||||||
def add_monitor_null(self):
|
|
||||||
self._args.append('-monitor')
|
|
||||||
self._args.append('null')
|
|
||||||
|
|
||||||
def add_fd(self, fd, fdset, opaque, opts=''):
|
|
||||||
"""
|
|
||||||
Pass a file descriptor to the VM
|
|
||||||
"""
|
|
||||||
options = ['fd=%d' % fd,
|
|
||||||
'set=%d' % fdset,
|
|
||||||
'opaque=%s' % opaque]
|
|
||||||
if opts:
|
|
||||||
options.append(opts)
|
|
||||||
|
|
||||||
# This did not exist before 3.4, but since then it is
|
|
||||||
# mandatory for our purpose
|
|
||||||
if hasattr(os, 'set_inheritable'):
|
|
||||||
os.set_inheritable(fd, True)
|
|
||||||
|
|
||||||
self._args.append('-add-fd')
|
|
||||||
self._args.append(','.join(options))
|
|
||||||
return self
|
|
||||||
|
|
||||||
# Exactly one of fd and file_path must be given.
|
|
||||||
# (If it is file_path, the helper will open that file and pass its
|
|
||||||
# own fd)
|
|
||||||
def send_fd_scm(self, fd=None, file_path=None):
|
|
||||||
# In iotest.py, the qmp should always use unix socket.
|
|
||||||
assert self._qmp.is_scm_available()
|
|
||||||
if self._socket_scm_helper is None:
|
|
||||||
raise QEMUMachineError("No path to socket_scm_helper set")
|
|
||||||
if not os.path.exists(self._socket_scm_helper):
|
|
||||||
raise QEMUMachineError("%s does not exist" %
|
|
||||||
self._socket_scm_helper)
|
|
||||||
|
|
||||||
# This did not exist before 3.4, but since then it is
|
|
||||||
# mandatory for our purpose
|
|
||||||
if hasattr(os, 'set_inheritable'):
|
|
||||||
os.set_inheritable(self._qmp.get_sock_fd(), True)
|
|
||||||
if fd is not None:
|
|
||||||
os.set_inheritable(fd, True)
|
|
||||||
|
|
||||||
fd_param = ["%s" % self._socket_scm_helper,
|
|
||||||
"%d" % self._qmp.get_sock_fd()]
|
|
||||||
|
|
||||||
if file_path is not None:
|
|
||||||
assert fd is None
|
|
||||||
fd_param.append(file_path)
|
|
||||||
else:
|
|
||||||
assert fd is not None
|
|
||||||
fd_param.append(str(fd))
|
|
||||||
|
|
||||||
devnull = open(os.path.devnull, 'rb')
|
|
||||||
proc = subprocess.Popen(fd_param, stdin=devnull, stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT, close_fds=False)
|
|
||||||
output = proc.communicate()[0]
|
|
||||||
if output:
|
|
||||||
LOG.debug(output)
|
|
||||||
|
|
||||||
return proc.returncode
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _remove_if_exists(path):
|
|
||||||
"""
|
|
||||||
Remove file object at path if it exists
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
os.remove(path)
|
|
||||||
except OSError as exception:
|
|
||||||
if exception.errno == errno.ENOENT:
|
|
||||||
return
|
|
||||||
raise
|
|
||||||
|
|
||||||
def is_running(self):
|
|
||||||
return self._popen is not None and self._popen.poll() is None
|
|
||||||
|
|
||||||
def exitcode(self):
|
|
||||||
if self._popen is None:
|
|
||||||
return None
|
|
||||||
return self._popen.poll()
|
|
||||||
|
|
||||||
def get_pid(self):
|
|
||||||
if not self.is_running():
|
|
||||||
return None
|
|
||||||
return self._popen.pid
|
|
||||||
|
|
||||||
def _load_io_log(self):
|
|
||||||
if self._qemu_log_path is not None:
|
|
||||||
with open(self._qemu_log_path, "r") as iolog:
|
|
||||||
self._iolog = iolog.read()
|
|
||||||
|
|
||||||
def _base_args(self):
|
|
||||||
if isinstance(self._monitor_address, tuple):
|
|
||||||
moncdev = "socket,id=mon,host=%s,port=%s" % (
|
|
||||||
self._monitor_address[0],
|
|
||||||
self._monitor_address[1])
|
|
||||||
else:
|
|
||||||
moncdev = 'socket,id=mon,path=%s' % self._vm_monitor
|
|
||||||
args = ['-chardev', moncdev,
|
|
||||||
'-mon', 'chardev=mon,mode=control']
|
|
||||||
if self._machine is not None:
|
|
||||||
args.extend(['-machine', self._machine])
|
|
||||||
if self._console_set:
|
|
||||||
self._console_address = os.path.join(self._temp_dir,
|
|
||||||
self._name + "-console.sock")
|
|
||||||
chardev = ('socket,id=console,path=%s,server,nowait' %
|
|
||||||
self._console_address)
|
|
||||||
args.extend(['-chardev', chardev])
|
|
||||||
if self._console_device_type is None:
|
|
||||||
args.extend(['-serial', 'chardev:console'])
|
|
||||||
else:
|
|
||||||
device = '%s,chardev=console' % self._console_device_type
|
|
||||||
args.extend(['-device', device])
|
|
||||||
return args
|
|
||||||
|
|
||||||
def _pre_launch(self):
|
|
||||||
self._temp_dir = tempfile.mkdtemp(dir=self._test_dir)
|
|
||||||
if self._monitor_address is not None:
|
|
||||||
self._vm_monitor = self._monitor_address
|
|
||||||
else:
|
|
||||||
self._vm_monitor = os.path.join(self._temp_dir,
|
|
||||||
self._name + "-monitor.sock")
|
|
||||||
self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
|
|
||||||
self._qemu_log_file = open(self._qemu_log_path, 'wb')
|
|
||||||
|
|
||||||
self._qmp = qmp.QEMUMonitorProtocol(self._vm_monitor,
|
|
||||||
server=True)
|
|
||||||
|
|
||||||
def _post_launch(self):
|
|
||||||
self._qmp.accept()
|
|
||||||
|
|
||||||
def _post_shutdown(self):
|
|
||||||
if self._qemu_log_file is not None:
|
|
||||||
self._qemu_log_file.close()
|
|
||||||
self._qemu_log_file = None
|
|
||||||
|
|
||||||
self._qemu_log_path = None
|
|
||||||
|
|
||||||
if self._console_socket is not None:
|
|
||||||
self._console_socket.close()
|
|
||||||
self._console_socket = None
|
|
||||||
|
|
||||||
if self._temp_dir is not None:
|
|
||||||
shutil.rmtree(self._temp_dir)
|
|
||||||
self._temp_dir = None
|
|
||||||
|
|
||||||
def launch(self):
|
|
||||||
"""
|
|
||||||
Launch the VM and make sure we cleanup and expose the
|
|
||||||
command line/output in case of exception
|
|
||||||
"""
|
|
||||||
|
|
||||||
if self._launched:
|
|
||||||
raise QEMUMachineError('VM already launched')
|
|
||||||
|
|
||||||
self._iolog = None
|
|
||||||
self._qemu_full_args = None
|
|
||||||
try:
|
|
||||||
self._launch()
|
|
||||||
self._launched = True
|
|
||||||
except:
|
|
||||||
self.shutdown()
|
|
||||||
|
|
||||||
LOG.debug('Error launching VM')
|
|
||||||
if self._qemu_full_args:
|
|
||||||
LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
|
|
||||||
if self._iolog:
|
|
||||||
LOG.debug('Output: %r', self._iolog)
|
|
||||||
raise Exception(self._iolog)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def _launch(self):
|
|
||||||
"""
|
|
||||||
Launch the VM and establish a QMP connection
|
|
||||||
"""
|
|
||||||
devnull = open(os.path.devnull, 'rb')
|
|
||||||
self._pre_launch()
|
|
||||||
self._qemu_full_args = (self._wrapper + [self._binary] +
|
|
||||||
self._base_args() + self._args)
|
|
||||||
LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
|
|
||||||
self._popen = subprocess.Popen(self._qemu_full_args,
|
|
||||||
stdin=devnull,
|
|
||||||
stdout=self._qemu_log_file,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
shell=False,
|
|
||||||
close_fds=False)
|
|
||||||
self._post_launch()
|
|
||||||
|
|
||||||
def wait(self):
|
|
||||||
"""
|
|
||||||
Wait for the VM to power off
|
|
||||||
"""
|
|
||||||
self._popen.wait()
|
|
||||||
self._qmp.close()
|
|
||||||
self._load_io_log()
|
|
||||||
self._post_shutdown()
|
|
||||||
|
|
||||||
def shutdown(self):
|
|
||||||
"""
|
|
||||||
Terminate the VM and clean up
|
|
||||||
"""
|
|
||||||
if self.is_running():
|
|
||||||
try:
|
|
||||||
self._qmp.cmd('quit')
|
|
||||||
self._qmp.close()
|
|
||||||
except:
|
|
||||||
self._popen.kill()
|
|
||||||
self._popen.wait()
|
|
||||||
|
|
||||||
self._load_io_log()
|
|
||||||
self._post_shutdown()
|
|
||||||
|
|
||||||
exitcode = self.exitcode()
|
|
||||||
if exitcode is not None and exitcode < 0:
|
|
||||||
msg = 'qemu received signal %i: %s'
|
|
||||||
if self._qemu_full_args:
|
|
||||||
command = ' '.join(self._qemu_full_args)
|
|
||||||
else:
|
|
||||||
command = ''
|
|
||||||
LOG.warn(msg, -exitcode, command)
|
|
||||||
|
|
||||||
self._launched = False
|
|
||||||
|
|
||||||
def qmp(self, cmd, conv_keys=True, **args):
|
|
||||||
"""
|
|
||||||
Invoke a QMP command and return the response dict
|
|
||||||
"""
|
|
||||||
qmp_args = dict()
|
|
||||||
for key, value in args.items():
|
|
||||||
if conv_keys:
|
|
||||||
qmp_args[key.replace('_', '-')] = value
|
|
||||||
else:
|
|
||||||
qmp_args[key] = value
|
|
||||||
|
|
||||||
return self._qmp.cmd(cmd, args=qmp_args)
|
|
||||||
|
|
||||||
def command(self, cmd, conv_keys=True, **args):
|
|
||||||
"""
|
|
||||||
Invoke a QMP command.
|
|
||||||
On success return the response dict.
|
|
||||||
On failure raise an exception.
|
|
||||||
"""
|
|
||||||
reply = self.qmp(cmd, conv_keys, **args)
|
|
||||||
if reply is None:
|
|
||||||
raise qmp.QMPError("Monitor is closed")
|
|
||||||
if "error" in reply:
|
|
||||||
raise MonitorResponseError(reply)
|
|
||||||
return reply["return"]
|
|
||||||
|
|
||||||
def get_qmp_event(self, wait=False):
|
|
||||||
"""
|
|
||||||
Poll for one queued QMP events and return it
|
|
||||||
"""
|
|
||||||
if len(self._events) > 0:
|
|
||||||
return self._events.pop(0)
|
|
||||||
return self._qmp.pull_event(wait=wait)
|
|
||||||
|
|
||||||
def get_qmp_events(self, wait=False):
|
|
||||||
"""
|
|
||||||
Poll for queued QMP events and return a list of dicts
|
|
||||||
"""
|
|
||||||
events = self._qmp.get_events(wait=wait)
|
|
||||||
events.extend(self._events)
|
|
||||||
del self._events[:]
|
|
||||||
self._qmp.clear_events()
|
|
||||||
return events
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def event_match(event, match=None):
|
|
||||||
"""
|
|
||||||
Check if an event matches optional match criteria.
|
|
||||||
|
|
||||||
The match criteria takes the form of a matching subdict. The event is
|
|
||||||
checked to be a superset of the subdict, recursively, with matching
|
|
||||||
values whenever the subdict values are not None.
|
|
||||||
|
|
||||||
This has a limitation that you cannot explicitly check for None values.
|
|
||||||
|
|
||||||
Examples, with the subdict queries on the left:
|
|
||||||
- None matches any object.
|
|
||||||
- {"foo": None} matches {"foo": {"bar": 1}}
|
|
||||||
- {"foo": None} matches {"foo": 5}
|
|
||||||
- {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
|
|
||||||
- {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
|
|
||||||
"""
|
|
||||||
if match is None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
try:
|
|
||||||
for key in match:
|
|
||||||
if key in event:
|
|
||||||
if not QEMUMachine.event_match(event[key], match[key]):
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
except TypeError:
|
|
||||||
# either match or event wasn't iterable (not a dict)
|
|
||||||
return match == event
|
|
||||||
|
|
||||||
def event_wait(self, name, timeout=60.0, match=None):
|
|
||||||
"""
|
|
||||||
event_wait waits for and returns a named event from QMP with a timeout.
|
|
||||||
|
|
||||||
name: The event to wait for.
|
|
||||||
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
|
|
||||||
match: Optional match criteria. See event_match for details.
|
|
||||||
"""
|
|
||||||
return self.events_wait([(name, match)], timeout)
|
|
||||||
|
|
||||||
def events_wait(self, events, timeout=60.0):
|
|
||||||
"""
|
|
||||||
events_wait waits for and returns a named event from QMP with a timeout.
|
|
||||||
|
|
||||||
events: a sequence of (name, match_criteria) tuples.
|
|
||||||
The match criteria are optional and may be None.
|
|
||||||
See event_match for details.
|
|
||||||
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _match(event):
|
|
||||||
for name, match in events:
|
|
||||||
if (event['event'] == name and
|
|
||||||
self.event_match(event, match)):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Search cached events
|
|
||||||
for event in self._events:
|
|
||||||
if _match(event):
|
|
||||||
self._events.remove(event)
|
|
||||||
return event
|
|
||||||
|
|
||||||
# Poll for new events
|
|
||||||
while True:
|
|
||||||
event = self._qmp.pull_event(wait=timeout)
|
|
||||||
if _match(event):
|
|
||||||
return event
|
|
||||||
self._events.append(event)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_log(self):
|
|
||||||
"""
|
|
||||||
After self.shutdown or failed qemu execution, this returns the output
|
|
||||||
of the qemu process.
|
|
||||||
"""
|
|
||||||
return self._iolog
|
|
||||||
|
|
||||||
def add_args(self, *args):
|
|
||||||
"""
|
|
||||||
Adds to the list of extra arguments to be given to the QEMU binary
|
|
||||||
"""
|
|
||||||
self._args.extend(args)
|
|
||||||
|
|
||||||
def set_machine(self, machine_type):
|
|
||||||
"""
|
|
||||||
Sets the machine type
|
|
||||||
|
|
||||||
If set, the machine type will be added to the base arguments
|
|
||||||
of the resulting QEMU command line.
|
|
||||||
"""
|
|
||||||
self._machine = machine_type
|
|
||||||
|
|
||||||
def set_console(self, device_type=None):
|
|
||||||
"""
|
|
||||||
Sets the device type for a console device
|
|
||||||
|
|
||||||
If set, the console device and a backing character device will
|
|
||||||
be added to the base arguments of the resulting QEMU command
|
|
||||||
line.
|
|
||||||
|
|
||||||
This is a convenience method that will either use the provided
|
|
||||||
device type, or default to a "-serial chardev:console" command
|
|
||||||
line argument.
|
|
||||||
|
|
||||||
The actual setting of command line arguments will be be done at
|
|
||||||
machine launch time, as it depends on the temporary directory
|
|
||||||
to be created.
|
|
||||||
|
|
||||||
@param device_type: the device type, such as "isa-serial". If
|
|
||||||
None is given (the default value) a "-serial
|
|
||||||
chardev:console" command line argument will
|
|
||||||
be used instead, resorting to the machine's
|
|
||||||
default device type.
|
|
||||||
"""
|
|
||||||
self._console_set = True
|
|
||||||
self._console_device_type = device_type
|
|
||||||
|
|
||||||
@property
|
|
||||||
def console_socket(self):
|
|
||||||
"""
|
|
||||||
Returns a socket connected to the console
|
|
||||||
"""
|
|
||||||
if self._console_socket is None:
|
|
||||||
self._console_socket = socket.socket(socket.AF_UNIX,
|
|
||||||
socket.SOCK_STREAM)
|
|
||||||
self._console_socket.connect(self._console_address)
|
|
||||||
return self._console_socket
|
|
||||||
|
|
@ -1,255 +0,0 @@
|
||||||
# QEMU Monitor Protocol Python class
|
|
||||||
#
|
|
||||||
# Copyright (C) 2009, 2010 Red Hat Inc.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Luiz Capitulino <lcapitulino@redhat.com>
|
|
||||||
#
|
|
||||||
# This work is licensed under the terms of the GNU GPL, version 2. See
|
|
||||||
# the COPYING file in the top-level directory.
|
|
||||||
|
|
||||||
import errno
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import socket
|
|
||||||
|
|
||||||
|
|
||||||
class QMPError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QMPConnectError(QMPError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QMPCapabilitiesError(QMPError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QMPTimeoutError(QMPError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QEMUMonitorProtocol(object):
|
|
||||||
#: Logger object for debugging messages
|
|
||||||
logger = logging.getLogger('QMP')
|
|
||||||
#: Socket's error class
|
|
||||||
error = socket.error
|
|
||||||
#: Socket's timeout
|
|
||||||
timeout = socket.timeout
|
|
||||||
|
|
||||||
def __init__(self, address, server=False):
|
|
||||||
"""
|
|
||||||
Create a QEMUMonitorProtocol class.
|
|
||||||
|
|
||||||
@param address: QEMU address, can be either a unix socket path (string)
|
|
||||||
or a tuple in the form ( address, port ) for a TCP
|
|
||||||
connection
|
|
||||||
@param server: server mode listens on the socket (bool)
|
|
||||||
@raise socket.error on socket connection errors
|
|
||||||
@note No connection is established, this is done by the connect() or
|
|
||||||
accept() methods
|
|
||||||
"""
|
|
||||||
self.__events = []
|
|
||||||
self.__address = address
|
|
||||||
self.__sock = self.__get_sock()
|
|
||||||
self.__sockfile = None
|
|
||||||
if server:
|
|
||||||
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
self.__sock.bind(self.__address)
|
|
||||||
self.__sock.listen(1)
|
|
||||||
|
|
||||||
def __get_sock(self):
|
|
||||||
if isinstance(self.__address, tuple):
|
|
||||||
family = socket.AF_INET
|
|
||||||
else:
|
|
||||||
family = socket.AF_UNIX
|
|
||||||
return socket.socket(family, socket.SOCK_STREAM)
|
|
||||||
|
|
||||||
def __negotiate_capabilities(self):
|
|
||||||
greeting = self.__json_read()
|
|
||||||
if greeting is None or "QMP" not in greeting:
|
|
||||||
raise QMPConnectError
|
|
||||||
# Greeting seems ok, negotiate capabilities
|
|
||||||
resp = self.cmd('qmp_capabilities')
|
|
||||||
if "return" in resp:
|
|
||||||
return greeting
|
|
||||||
raise QMPCapabilitiesError
|
|
||||||
|
|
||||||
def __json_read(self, only_event=False):
|
|
||||||
while True:
|
|
||||||
data = self.__sockfile.readline()
|
|
||||||
if not data:
|
|
||||||
return
|
|
||||||
resp = json.loads(data)
|
|
||||||
if 'event' in resp:
|
|
||||||
self.logger.debug("<<< %s", resp)
|
|
||||||
self.__events.append(resp)
|
|
||||||
if not only_event:
|
|
||||||
continue
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def __get_events(self, wait=False):
|
|
||||||
"""
|
|
||||||
Check for new events in the stream and cache them in __events.
|
|
||||||
|
|
||||||
@param wait (bool): block until an event is available.
|
|
||||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
|
||||||
|
|
||||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
|
||||||
period elapses.
|
|
||||||
@raise QMPConnectError: If wait is True but no events could be
|
|
||||||
retrieved or if some other error occurred.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Check for new events regardless and pull them into the cache:
|
|
||||||
self.__sock.setblocking(0)
|
|
||||||
try:
|
|
||||||
self.__json_read()
|
|
||||||
except socket.error as err:
|
|
||||||
if err[0] == errno.EAGAIN:
|
|
||||||
# No data available
|
|
||||||
pass
|
|
||||||
self.__sock.setblocking(1)
|
|
||||||
|
|
||||||
# Wait for new events, if needed.
|
|
||||||
# if wait is 0.0, this means "no wait" and is also implicitly false.
|
|
||||||
if not self.__events and wait:
|
|
||||||
if isinstance(wait, float):
|
|
||||||
self.__sock.settimeout(wait)
|
|
||||||
try:
|
|
||||||
ret = self.__json_read(only_event=True)
|
|
||||||
except socket.timeout:
|
|
||||||
raise QMPTimeoutError("Timeout waiting for event")
|
|
||||||
except:
|
|
||||||
raise QMPConnectError("Error while reading from socket")
|
|
||||||
if ret is None:
|
|
||||||
raise QMPConnectError("Error while reading from socket")
|
|
||||||
self.__sock.settimeout(None)
|
|
||||||
|
|
||||||
def connect(self, negotiate=True):
|
|
||||||
"""
|
|
||||||
Connect to the QMP Monitor and perform capabilities negotiation.
|
|
||||||
|
|
||||||
@return QMP greeting dict
|
|
||||||
@raise socket.error on socket connection errors
|
|
||||||
@raise QMPConnectError if the greeting is not received
|
|
||||||
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
|
||||||
"""
|
|
||||||
self.__sock.connect(self.__address)
|
|
||||||
self.__sockfile = self.__sock.makefile()
|
|
||||||
if negotiate:
|
|
||||||
return self.__negotiate_capabilities()
|
|
||||||
|
|
||||||
def accept(self):
|
|
||||||
"""
|
|
||||||
Await connection from QMP Monitor and perform capabilities negotiation.
|
|
||||||
|
|
||||||
@return QMP greeting dict
|
|
||||||
@raise socket.error on socket connection errors
|
|
||||||
@raise QMPConnectError if the greeting is not received
|
|
||||||
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
|
||||||
"""
|
|
||||||
self.__sock.settimeout(15)
|
|
||||||
self.__sock, _ = self.__sock.accept()
|
|
||||||
self.__sockfile = self.__sock.makefile()
|
|
||||||
return self.__negotiate_capabilities()
|
|
||||||
|
|
||||||
def cmd_obj(self, qmp_cmd):
|
|
||||||
"""
|
|
||||||
Send a QMP command to the QMP Monitor.
|
|
||||||
|
|
||||||
@param qmp_cmd: QMP command to be sent as a Python dict
|
|
||||||
@return QMP response as a Python dict or None if the connection has
|
|
||||||
been closed
|
|
||||||
"""
|
|
||||||
self.logger.debug(">>> %s", qmp_cmd)
|
|
||||||
try:
|
|
||||||
self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
|
|
||||||
except socket.error as err:
|
|
||||||
if err[0] == errno.EPIPE:
|
|
||||||
return
|
|
||||||
raise socket.error(err)
|
|
||||||
resp = self.__json_read()
|
|
||||||
self.logger.debug("<<< %s", resp)
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def cmd(self, name, args=None, cmd_id=None):
|
|
||||||
"""
|
|
||||||
Build a QMP command and send it to the QMP Monitor.
|
|
||||||
|
|
||||||
@param name: command name (string)
|
|
||||||
@param args: command arguments (dict)
|
|
||||||
@param cmd_id: command id (dict, list, string or int)
|
|
||||||
"""
|
|
||||||
qmp_cmd = {'execute': name}
|
|
||||||
if args:
|
|
||||||
qmp_cmd['arguments'] = args
|
|
||||||
if cmd_id:
|
|
||||||
qmp_cmd['id'] = cmd_id
|
|
||||||
return self.cmd_obj(qmp_cmd)
|
|
||||||
|
|
||||||
def command(self, cmd, **kwds):
|
|
||||||
"""
|
|
||||||
Build and send a QMP command to the monitor, report errors if any
|
|
||||||
"""
|
|
||||||
ret = self.cmd(cmd, kwds)
|
|
||||||
if "error" in ret:
|
|
||||||
raise Exception(ret['error']['desc'])
|
|
||||||
return ret['return']
|
|
||||||
|
|
||||||
def pull_event(self, wait=False):
|
|
||||||
"""
|
|
||||||
Pulls a single event.
|
|
||||||
|
|
||||||
@param wait (bool): block until an event is available.
|
|
||||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
|
||||||
|
|
||||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
|
||||||
period elapses.
|
|
||||||
@raise QMPConnectError: If wait is True but no events could be
|
|
||||||
retrieved or if some other error occurred.
|
|
||||||
|
|
||||||
@return The first available QMP event, or None.
|
|
||||||
"""
|
|
||||||
self.__get_events(wait)
|
|
||||||
|
|
||||||
if self.__events:
|
|
||||||
return self.__events.pop(0)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_events(self, wait=False):
|
|
||||||
"""
|
|
||||||
Get a list of available QMP events.
|
|
||||||
|
|
||||||
@param wait (bool): block until an event is available.
|
|
||||||
@param wait (float): If wait is a float, treat it as a timeout value.
|
|
||||||
|
|
||||||
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
|
||||||
period elapses.
|
|
||||||
@raise QMPConnectError: If wait is True but no events could be
|
|
||||||
retrieved or if some other error occurred.
|
|
||||||
|
|
||||||
@return The list of available QMP events.
|
|
||||||
"""
|
|
||||||
self.__get_events(wait)
|
|
||||||
return self.__events
|
|
||||||
|
|
||||||
def clear_events(self):
|
|
||||||
"""
|
|
||||||
Clear current list of pending events.
|
|
||||||
"""
|
|
||||||
self.__events = []
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.__sock.close()
|
|
||||||
self.__sockfile.close()
|
|
||||||
|
|
||||||
def settimeout(self, timeout):
|
|
||||||
self.__sock.settimeout(timeout)
|
|
||||||
|
|
||||||
def get_sock_fd(self):
|
|
||||||
return self.__sock.fileno()
|
|
||||||
|
|
||||||
def is_scm_available(self):
|
|
||||||
return self.__sock.family == socket.AF_UNIX
|
|
||||||
|
|
@ -1,384 +0,0 @@
|
||||||
# QEMU Manual
|
|
||||||
# https://qemu.weilnetz.de/doc/qemu-doc.html
|
|
||||||
|
|
||||||
# For QEMU Monitor Protocol Commands Information, See
|
|
||||||
# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
|
|
||||||
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import subprocess as sp
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
|
|
||||||
from functools import wraps
|
|
||||||
from string import Template
|
|
||||||
from typing import Union
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
import bitmath
|
|
||||||
import sshtunnel
|
|
||||||
|
|
||||||
from ucloud.common.helpers import get_ipv6_address
|
|
||||||
from ucloud.common.request import RequestEntry, RequestType
|
|
||||||
from ucloud.common.vm import VMEntry, VMStatus
|
|
||||||
from ucloud.config import (etcd_client, request_pool,
|
|
||||||
running_vms, vm_pool, env_vars,
|
|
||||||
image_storage_handler)
|
|
||||||
from . import qmp
|
|
||||||
from ucloud.host import logger
|
|
||||||
|
|
||||||
|
|
||||||
class VM:
|
|
||||||
def __init__(self, key, handle, vnc_socket_file):
|
|
||||||
self.key = key # type: str
|
|
||||||
self.handle = handle # type: qmp.QEMUMachine
|
|
||||||
self.vnc_socket_file = vnc_socket_file # type: tempfile.NamedTemporaryFile
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "VM({})".format(self.key)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_network_interface(iface):
|
|
||||||
try:
|
|
||||||
sp.check_output(['ip', 'link', 'del', iface])
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_network(network_name, network_owner):
|
|
||||||
network = etcd_client.get(join_path(env_vars.get("NETWORK_PREFIX"),
|
|
||||||
network_owner,
|
|
||||||
network_name),
|
|
||||||
value_in_json=True)
|
|
||||||
return network
|
|
||||||
|
|
||||||
|
|
||||||
def delete_vm_network(vm_entry):
|
|
||||||
try:
|
|
||||||
for network in vm_entry.network:
|
|
||||||
network_name = network[0]
|
|
||||||
tap_mac = network[1]
|
|
||||||
tap_id = network[2]
|
|
||||||
|
|
||||||
delete_network_interface('tap{}'.format(tap_id))
|
|
||||||
|
|
||||||
owners_vms = vm_pool.by_owner(vm_entry.owner)
|
|
||||||
owners_running_vms = vm_pool.by_status(VMStatus.running,
|
|
||||||
_vms=owners_vms)
|
|
||||||
|
|
||||||
networks = map(lambda n: n[0],
|
|
||||||
map(lambda vm: vm.network, owners_running_vms)
|
|
||||||
)
|
|
||||||
networks_in_use_by_user_vms = [vm[0] for vm in networks]
|
|
||||||
if network_name not in networks_in_use_by_user_vms:
|
|
||||||
network_entry = resolve_network(network[0], vm_entry.owner)
|
|
||||||
if network_entry:
|
|
||||||
network_type = network_entry.value["type"]
|
|
||||||
network_id = network_entry.value["id"]
|
|
||||||
if network_type == "vxlan":
|
|
||||||
delete_network_interface('br{}'.format(network_id))
|
|
||||||
delete_network_interface('vxlan{}'.format(network_id))
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Exception in network interface deletion")
|
|
||||||
|
|
||||||
|
|
||||||
def create_dev(script, _id, dev, ip=None):
|
|
||||||
command = [script, _id, dev]
|
|
||||||
if ip:
|
|
||||||
command.append(ip)
|
|
||||||
try:
|
|
||||||
output = sp.check_output(command, stderr=sp.PIPE)
|
|
||||||
except Exception as e:
|
|
||||||
print(e.stderr)
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return output.decode("utf-8").strip()
|
|
||||||
|
|
||||||
|
|
||||||
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
|
|
||||||
network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
|
|
||||||
vxlan = create_dev(script=os.path.join(network_script_base, 'create-vxlan.sh'),
|
|
||||||
_id=_id, dev=_dev)
|
|
||||||
if vxlan:
|
|
||||||
bridge = create_dev(script=os.path.join(network_script_base, 'create-bridge.sh'),
|
|
||||||
_id=_id, dev=vxlan, ip=ip)
|
|
||||||
if bridge:
|
|
||||||
tap = create_dev(script=os.path.join(network_script_base, 'create-tap.sh'),
|
|
||||||
_id=str(tap_id), dev=bridge)
|
|
||||||
if tap:
|
|
||||||
return tap
|
|
||||||
|
|
||||||
|
|
||||||
def random_bytes(num=6):
|
|
||||||
return [random.randrange(256) for _ in range(num)]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
|
|
||||||
mac = random_bytes()
|
|
||||||
if oui:
|
|
||||||
if type(oui) == str:
|
|
||||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
|
||||||
mac = oui + random_bytes(num=6 - len(oui))
|
|
||||||
else:
|
|
||||||
if multicast:
|
|
||||||
mac[0] |= 1 # set bit 0
|
|
||||||
else:
|
|
||||||
mac[0] &= ~1 # clear bit 0
|
|
||||||
if uaa:
|
|
||||||
mac[0] &= ~(1 << 1) # clear bit 1
|
|
||||||
else:
|
|
||||||
mac[0] |= 1 << 1 # set bit 1
|
|
||||||
return separator.join(byte_fmt % b for b in mac)
|
|
||||||
|
|
||||||
|
|
||||||
def update_radvd_conf(etcd_client):
|
|
||||||
network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
|
|
||||||
|
|
||||||
networks = {
|
|
||||||
net.value['ipv6']: net.value['id']
|
|
||||||
for net in etcd_client.get_prefix('/v1/network/', value_in_json=True)
|
|
||||||
if net.value.get('ipv6')
|
|
||||||
}
|
|
||||||
radvd_template = open(os.path.join(network_script_base,
|
|
||||||
'radvd-template.conf'), 'r').read()
|
|
||||||
radvd_template = Template(radvd_template)
|
|
||||||
|
|
||||||
content = [radvd_template.safe_substitute(bridge='br{}'.format(networks[net]),
|
|
||||||
prefix=net)
|
|
||||||
for net in networks if networks.get(net)]
|
|
||||||
|
|
||||||
with open('/etc/radvd.conf', 'w') as radvd_conf:
|
|
||||||
radvd_conf.writelines(content)
|
|
||||||
try:
|
|
||||||
sp.check_output(['systemctl', 'restart', 'radvd'])
|
|
||||||
except Exception:
|
|
||||||
sp.check_output(['service', 'radvd', 'restart'])
|
|
||||||
|
|
||||||
|
|
||||||
def get_start_command_args(vm_entry, vnc_sock_filename: str, migration=False, migration_port=None):
|
|
||||||
threads_per_core = 1
|
|
||||||
vm_memory = int(bitmath.parse_string_unsafe(vm_entry.specs["ram"]).to_MB())
|
|
||||||
vm_cpus = int(vm_entry.specs["cpu"])
|
|
||||||
vm_uuid = vm_entry.uuid
|
|
||||||
vm_networks = vm_entry.network
|
|
||||||
|
|
||||||
command = "-name {}_{}".format(vm_entry.owner, vm_entry.name)
|
|
||||||
|
|
||||||
command += " -drive file={},format=raw,if=virtio,cache=none".format(
|
|
||||||
image_storage_handler.qemu_path_string(vm_uuid)
|
|
||||||
)
|
|
||||||
command += " -device virtio-rng-pci -vnc unix:{}".format(vnc_sock_filename)
|
|
||||||
command += " -m {} -smp cores={},threads={}".format(
|
|
||||||
vm_memory, vm_cpus, threads_per_core
|
|
||||||
)
|
|
||||||
|
|
||||||
if migration:
|
|
||||||
command += " -incoming tcp:[::]:{}".format(migration_port)
|
|
||||||
|
|
||||||
tap = None
|
|
||||||
for network_mac_and_tap in vm_networks:
|
|
||||||
network_name, mac, tap = network_mac_and_tap
|
|
||||||
|
|
||||||
_key = os.path.join(env_vars.get('NETWORK_PREFIX'), vm_entry.owner, network_name)
|
|
||||||
network = etcd_client.get(_key, value_in_json=True)
|
|
||||||
network_type = network.value["type"]
|
|
||||||
network_id = str(network.value["id"])
|
|
||||||
network_ipv6 = network.value["ipv6"]
|
|
||||||
|
|
||||||
if network_type == "vxlan":
|
|
||||||
tap = create_vxlan_br_tap(_id=network_id,
|
|
||||||
_dev=env_vars.get("VXLAN_PHY_DEV"),
|
|
||||||
tap_id=tap,
|
|
||||||
ip=network_ipv6)
|
|
||||||
update_radvd_conf(etcd_client)
|
|
||||||
|
|
||||||
command += " -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no" \
|
|
||||||
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}" \
|
|
||||||
.format(tap=tap, net_id=network_id, mac=mac)
|
|
||||||
|
|
||||||
return command.split(" ")
|
|
||||||
|
|
||||||
|
|
||||||
def create_vm_object(vm_entry, migration=False, migration_port=None):
|
|
||||||
# NOTE: If migration suddenly stop working, having different
|
|
||||||
# VNC unix filename on source and destination host can
|
|
||||||
# be a possible cause of it.
|
|
||||||
|
|
||||||
# REQUIREMENT: Use Unix Socket instead of TCP Port for VNC
|
|
||||||
vnc_sock_file = tempfile.NamedTemporaryFile()
|
|
||||||
|
|
||||||
qemu_args = get_start_command_args(
|
|
||||||
vm_entry=vm_entry,
|
|
||||||
vnc_sock_filename=vnc_sock_file.name,
|
|
||||||
migration=migration,
|
|
||||||
migration_port=migration_port,
|
|
||||||
)
|
|
||||||
qemu_machine = qmp.QEMUMachine("/usr/bin/qemu-system-x86_64", args=qemu_args)
|
|
||||||
return VM(vm_entry.key, qemu_machine, vnc_sock_file)
|
|
||||||
|
|
||||||
|
|
||||||
def get_vm(vm_list: list, vm_key) -> Union[VM, None]:
|
|
||||||
return next((vm for vm in vm_list if vm.key == vm_key), None)
|
|
||||||
|
|
||||||
|
|
||||||
def need_running_vm(func):
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(e):
|
|
||||||
vm = get_vm(running_vms, e.key)
|
|
||||||
if vm:
|
|
||||||
try:
|
|
||||||
status = vm.handle.command("query-status")
|
|
||||||
logger.debug("VM Status Check - %s", status)
|
|
||||||
except Exception as exception:
|
|
||||||
logger.info("%s failed - VM %s %s", func.__name__, e, exception)
|
|
||||||
else:
|
|
||||||
return func(e)
|
|
||||||
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
logger.info("%s failed because VM %s is not running", func.__name__, e.key)
|
|
||||||
return None
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def create(vm_entry: VMEntry):
|
|
||||||
if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
|
|
||||||
# File Already exists. No Problem Continue
|
|
||||||
logger.debug("Image for vm %s exists", vm_entry.uuid)
|
|
||||||
else:
|
|
||||||
vm_hdd = int(bitmath.parse_string_unsafe(vm_entry.specs["os-ssd"]).to_MB())
|
|
||||||
if image_storage_handler.make_vm_image(src=vm_entry.image_uuid, dest=vm_entry.uuid):
|
|
||||||
if not image_storage_handler.resize_vm_image(path=vm_entry.uuid, size=vm_hdd):
|
|
||||||
vm_entry.status = VMStatus.error
|
|
||||||
else:
|
|
||||||
logger.info("New VM Created")
|
|
||||||
|
|
||||||
|
|
||||||
def start(vm_entry: VMEntry, destination_host_key=None, migration_port=None):
|
|
||||||
_vm = get_vm(running_vms, vm_entry.key)
|
|
||||||
|
|
||||||
# VM already running. No need to proceed further.
|
|
||||||
if _vm:
|
|
||||||
logger.info("VM %s already running" % vm_entry.uuid)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
logger.info("Trying to start %s" % vm_entry.uuid)
|
|
||||||
if destination_host_key:
|
|
||||||
launch_vm(vm_entry, migration=True, migration_port=migration_port,
|
|
||||||
destination_host_key=destination_host_key)
|
|
||||||
else:
|
|
||||||
create(vm_entry)
|
|
||||||
launch_vm(vm_entry)
|
|
||||||
|
|
||||||
|
|
||||||
@need_running_vm
|
|
||||||
def stop(vm_entry):
|
|
||||||
vm = get_vm(running_vms, vm_entry.key)
|
|
||||||
vm.handle.shutdown()
|
|
||||||
if not vm.handle.is_running():
|
|
||||||
vm_entry.add_log("Shutdown successfully")
|
|
||||||
vm_entry.declare_stopped()
|
|
||||||
vm_pool.put(vm_entry)
|
|
||||||
running_vms.remove(vm)
|
|
||||||
delete_vm_network(vm_entry)
|
|
||||||
|
|
||||||
|
|
||||||
def delete(vm_entry):
|
|
||||||
logger.info("Deleting VM | %s", vm_entry)
|
|
||||||
stop(vm_entry)
|
|
||||||
|
|
||||||
if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
|
|
||||||
r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
|
|
||||||
if r_status:
|
|
||||||
etcd_client.client.delete(vm_entry.key)
|
|
||||||
else:
|
|
||||||
etcd_client.client.delete(vm_entry.key)
|
|
||||||
|
|
||||||
def transfer(request_event):
|
|
||||||
# This function would run on source host i.e host on which the vm
|
|
||||||
# is running initially. This host would be responsible for transferring
|
|
||||||
# vm state to destination host.
|
|
||||||
|
|
||||||
_host, _port = request_event.parameters["host"], request_event.parameters["port"]
|
|
||||||
_uuid = request_event.uuid
|
|
||||||
_destination = request_event.destination_host_key
|
|
||||||
vm = get_vm(running_vms, join_path(env_vars.get('VM_PREFIX'), _uuid))
|
|
||||||
|
|
||||||
if vm:
|
|
||||||
tunnel = sshtunnel.SSHTunnelForwarder(
|
|
||||||
_host,
|
|
||||||
ssh_username=env_vars.get("ssh_username"),
|
|
||||||
ssh_pkey=env_vars.get("ssh_pkey"),
|
|
||||||
remote_bind_address=("127.0.0.1", _port),
|
|
||||||
ssh_proxy_enabled=True,
|
|
||||||
ssh_proxy=(_host, 22)
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
tunnel.start()
|
|
||||||
except sshtunnel.BaseSSHTunnelForwarderError:
|
|
||||||
logger.exception("Couldn't establish connection to (%s, 22)", _host)
|
|
||||||
else:
|
|
||||||
vm.handle.command(
|
|
||||||
"migrate", uri="tcp:0.0.0.0:{}".format(tunnel.local_bind_port)
|
|
||||||
)
|
|
||||||
|
|
||||||
status = vm.handle.command("query-migrate")["status"]
|
|
||||||
while status not in ["failed", "completed"]:
|
|
||||||
time.sleep(2)
|
|
||||||
status = vm.handle.command("query-migrate")["status"]
|
|
||||||
|
|
||||||
with vm_pool.get_put(request_event.uuid) as source_vm:
|
|
||||||
if status == "failed":
|
|
||||||
source_vm.add_log("Migration Failed")
|
|
||||||
elif status == "completed":
|
|
||||||
# If VM is successfully migrated then shutdown the VM
|
|
||||||
# on this host and update hostname to destination host key
|
|
||||||
source_vm.add_log("Successfully migrated")
|
|
||||||
source_vm.hostname = _destination
|
|
||||||
running_vms.remove(vm)
|
|
||||||
vm.handle.shutdown()
|
|
||||||
source_vm.in_migration = False # VM transfer finished
|
|
||||||
finally:
|
|
||||||
tunnel.close()
|
|
||||||
|
|
||||||
|
|
||||||
def launch_vm(vm_entry, migration=False, migration_port=None, destination_host_key=None):
|
|
||||||
logger.info("Starting %s" % vm_entry.key)
|
|
||||||
|
|
||||||
vm = create_vm_object(vm_entry, migration=migration, migration_port=migration_port)
|
|
||||||
try:
|
|
||||||
vm.handle.launch()
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Error Occured while starting VM")
|
|
||||||
vm.handle.shutdown()
|
|
||||||
|
|
||||||
if migration:
|
|
||||||
# We don't care whether MachineError or any other error occurred
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# Error during typical launch of a vm
|
|
||||||
vm.handle.shutdown()
|
|
||||||
vm_entry.declare_killed()
|
|
||||||
vm_pool.put(vm_entry)
|
|
||||||
else:
|
|
||||||
vm_entry.vnc_socket = vm.vnc_socket_file.name
|
|
||||||
running_vms.append(vm)
|
|
||||||
|
|
||||||
if migration:
|
|
||||||
vm_entry.in_migration = True
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.TransferVM,
|
|
||||||
hostname=vm_entry.hostname,
|
|
||||||
parameters={"host": get_ipv6_address(), "port": migration_port},
|
|
||||||
uuid=vm_entry.uuid,
|
|
||||||
destination_host_key=destination_host_key,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX")
|
|
||||||
)
|
|
||||||
request_pool.put(r)
|
|
||||||
else:
|
|
||||||
# Typical launching of a vm
|
|
||||||
vm_entry.status = VMStatus.running
|
|
||||||
vm_entry.add_log("Started successfully")
|
|
||||||
|
|
||||||
vm_pool.put(vm_entry)
|
|
||||||
|
|
@ -1,78 +0,0 @@
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from os.path import join as join_path
|
|
||||||
from ucloud.config import etcd_client, env_vars, image_storage_handler
|
|
||||||
from ucloud.imagescanner import logger
|
|
||||||
|
|
||||||
|
|
||||||
def qemu_img_type(path):
|
|
||||||
qemu_img_info_command = ["qemu-img", "info", "--output", "json", path]
|
|
||||||
try:
|
|
||||||
qemu_img_info = subprocess.check_output(qemu_img_info_command)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
|
|
||||||
return qemu_img_info["format"]
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# We want to get images entries that requests images to be created
|
|
||||||
images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
|
|
||||||
images_to_be_created = list(filter(lambda im: im.value['status'] == 'TO_BE_CREATED', images))
|
|
||||||
|
|
||||||
for image in images_to_be_created:
|
|
||||||
try:
|
|
||||||
image_uuid = image.key.split('/')[-1]
|
|
||||||
image_owner = image.value['owner']
|
|
||||||
image_filename = image.value['filename']
|
|
||||||
image_store_name = image.value['store_name']
|
|
||||||
image_full_path = join_path(env_vars.get('BASE_DIR'), image_owner, image_filename)
|
|
||||||
|
|
||||||
image_stores = etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True)
|
|
||||||
user_image_store = next(filter(
|
|
||||||
lambda s, store_name=image_store_name: s.value["name"] == store_name,
|
|
||||||
image_stores
|
|
||||||
))
|
|
||||||
|
|
||||||
image_store_pool = user_image_store.value['attributes']['pool']
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
else:
|
|
||||||
# At least our basic data is available
|
|
||||||
qemu_img_convert_command = ["qemu-img", "convert", "-f", "qcow2",
|
|
||||||
"-O", "raw", image_full_path, "image.raw"]
|
|
||||||
|
|
||||||
if qemu_img_type(image_full_path) == "qcow2":
|
|
||||||
try:
|
|
||||||
# Convert .qcow2 to .raw
|
|
||||||
subprocess.check_output(qemu_img_convert_command)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
else:
|
|
||||||
# Import and Protect
|
|
||||||
r_status = image_storage_handler.import_image(src="image.raw",
|
|
||||||
dest=image_uuid,
|
|
||||||
protect=True)
|
|
||||||
if r_status:
|
|
||||||
# Everything is successfully done
|
|
||||||
image.value["status"] = "CREATED"
|
|
||||||
etcd_client.put(image.key, json.dumps(image.value))
|
|
||||||
|
|
||||||
else:
|
|
||||||
# The user provided image is either not found or of invalid format
|
|
||||||
image.value["status"] = "INVALID_IMAGE"
|
|
||||||
etcd_client.put(image.key, json.dumps(image.value))
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.remove("image.raw")
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,91 +0,0 @@
|
||||||
import os
|
|
||||||
|
|
||||||
from flask import Flask, request
|
|
||||||
from flask_restful import Resource, Api
|
|
||||||
|
|
||||||
from ucloud.config import etcd_client, env_vars, vm_pool
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
api = Api(app)
|
|
||||||
|
|
||||||
|
|
||||||
def get_vm_entry(mac_addr):
|
|
||||||
return next(filter(lambda vm: mac_addr in list(zip(*vm.network))[1], vm_pool.vms), None)
|
|
||||||
|
|
||||||
|
|
||||||
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
|
|
||||||
def ipv62mac(ipv6):
|
|
||||||
# remove subnet info if given
|
|
||||||
subnet_index = ipv6.find('/')
|
|
||||||
if subnet_index != -1:
|
|
||||||
ipv6 = ipv6[:subnet_index]
|
|
||||||
|
|
||||||
ipv6_parts = ipv6.split(':')
|
|
||||||
mac_parts = list()
|
|
||||||
for ipv6_part in ipv6_parts[-4:]:
|
|
||||||
while len(ipv6_part) < 4:
|
|
||||||
ipv6_part = '0' + ipv6_part
|
|
||||||
mac_parts.append(ipv6_part[:2])
|
|
||||||
mac_parts.append(ipv6_part[-2:])
|
|
||||||
|
|
||||||
# modify parts to match MAC value
|
|
||||||
mac_parts[0] = '%02x' % (int(mac_parts[0], 16) ^ 2)
|
|
||||||
del mac_parts[4]
|
|
||||||
del mac_parts[3]
|
|
||||||
return ':'.join(mac_parts)
|
|
||||||
|
|
||||||
|
|
||||||
class Root(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
data = get_vm_entry(ipv62mac(request.remote_addr))
|
|
||||||
|
|
||||||
if not data:
|
|
||||||
return {'message': 'Metadata for such VM does not exists.'}, 404
|
|
||||||
else:
|
|
||||||
|
|
||||||
# {env_vars.get('USER_PREFIX')}/{realm}/{name}/key
|
|
||||||
etcd_key = os.path.join(env_vars.get('USER_PREFIX'), data.value['owner_realm'],
|
|
||||||
data.value['owner'], 'key')
|
|
||||||
etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
|
|
||||||
user_personal_ssh_keys = [key.value for key in etcd_entry]
|
|
||||||
data.value['metadata']['ssh-keys'] += user_personal_ssh_keys
|
|
||||||
return data.value['metadata'], 200
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
return {'message': 'Previous Implementation is deprecated.'}
|
|
||||||
# data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
|
|
||||||
# print(data)
|
|
||||||
# if data:
|
|
||||||
# for k in request.json:
|
|
||||||
# if k not in data.value:
|
|
||||||
# data.value[k] = request.json[k]
|
|
||||||
# if k.endswith("-list"):
|
|
||||||
# data.value[k] = [request.json[k]]
|
|
||||||
# else:
|
|
||||||
# if k.endswith("-list"):
|
|
||||||
# data.value[k].append(request.json[k])
|
|
||||||
# else:
|
|
||||||
# data.value[k] = request.json[k]
|
|
||||||
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
|
||||||
# data.value, value_in_json=True)
|
|
||||||
# else:
|
|
||||||
# data = {}
|
|
||||||
# for k in request.json:
|
|
||||||
# data[k] = request.json[k]
|
|
||||||
# if k.endswith("-list"):
|
|
||||||
# data[k] = [request.json[k]]
|
|
||||||
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
|
||||||
# data, value_in_json=True)
|
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(Root, '/')
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
app.run(debug=True, host="::", port="80")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
import sys
|
|
||||||
import subprocess as sp
|
|
||||||
|
|
||||||
from os.path import isdir
|
|
||||||
from ucloud.config import env_vars
|
|
||||||
|
|
||||||
|
|
||||||
def check():
|
|
||||||
#########################
|
|
||||||
# ucloud-image-scanner #
|
|
||||||
#########################
|
|
||||||
if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('IMAGE_DIR')):
|
|
||||||
print("You have set STORAGE_BACKEND to filesystem. So,"
|
|
||||||
"the {} must exists. But, it don't".format(env_vars.get('IMAGE_DIR')))
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
try:
|
|
||||||
sp.check_output(['which', 'qemu-img'])
|
|
||||||
except Exception:
|
|
||||||
print("qemu-img missing")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
###############
|
|
||||||
# ucloud-host #
|
|
||||||
###############
|
|
||||||
|
|
||||||
if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('VM_DIR')):
|
|
||||||
print("You have set STORAGE_BACKEND to filesystem. So, the vm directory mentioned"
|
|
||||||
" in .env file must exists. But, it don't.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
check()
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
@ -1,93 +0,0 @@
|
||||||
# TODO
|
|
||||||
# 1. send an email to an email address defined by env['admin-email']
|
|
||||||
# if resources are finished
|
|
||||||
# 2. Introduce a status endpoint of the scheduler -
|
|
||||||
# maybe expose a prometheus compatible output
|
|
||||||
|
|
||||||
from ucloud.common.request import RequestEntry, RequestType
|
|
||||||
from ucloud.config import etcd_client
|
|
||||||
from ucloud.config import host_pool, request_pool, vm_pool, env_vars
|
|
||||||
from .helper import (get_suitable_host, dead_host_mitigation, dead_host_detection,
|
|
||||||
assign_host, NoSuitableHostFound)
|
|
||||||
from . import logger
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logger.info("%s SESSION STARTED %s", '*' * 5, '*' * 5)
|
|
||||||
|
|
||||||
pending_vms = []
|
|
||||||
|
|
||||||
for request_iterator in [
|
|
||||||
etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
|
|
||||||
etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=5, value_in_json=True),
|
|
||||||
]:
|
|
||||||
for request_event in request_iterator:
|
|
||||||
request_entry = RequestEntry(request_event)
|
|
||||||
# Never Run time critical mechanism inside timeout
|
|
||||||
# mechanism because timeout mechanism only comes
|
|
||||||
# when no other event is happening. It means under
|
|
||||||
# heavy load there would not be a timeout event.
|
|
||||||
if request_entry.type == "TIMEOUT":
|
|
||||||
|
|
||||||
# Detect hosts that are dead and set their status
|
|
||||||
# to "DEAD", and their VMs' status to "KILLED"
|
|
||||||
dead_hosts = dead_host_detection()
|
|
||||||
if dead_hosts:
|
|
||||||
logger.debug("Dead hosts: %s", dead_hosts)
|
|
||||||
dead_host_mitigation(dead_hosts)
|
|
||||||
|
|
||||||
# If there are VMs that weren't assigned a host
|
|
||||||
# because there wasn't a host available which
|
|
||||||
# meets requirement of that VM then we would
|
|
||||||
# create a new ScheduleVM request for that VM
|
|
||||||
# on our behalf.
|
|
||||||
while pending_vms:
|
|
||||||
pending_vm_entry = pending_vms.pop()
|
|
||||||
r = RequestEntry.from_scratch(type="ScheduleVM",
|
|
||||||
uuid=pending_vm_entry.uuid,
|
|
||||||
hostname=pending_vm_entry.hostname,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX"))
|
|
||||||
request_pool.put(r)
|
|
||||||
|
|
||||||
elif request_entry.type == RequestType.ScheduleVM:
|
|
||||||
logger.debug("%s, %s", request_entry.key, request_entry.value)
|
|
||||||
|
|
||||||
vm_entry = vm_pool.get(request_entry.uuid)
|
|
||||||
if vm_entry is None:
|
|
||||||
logger.info("Trying to act on {} but it is deleted".format(request_entry.uuid))
|
|
||||||
continue
|
|
||||||
etcd_client.client.delete(request_entry.key) # consume Request
|
|
||||||
|
|
||||||
# If the Request is about a VM which is labelled as "migration"
|
|
||||||
# and has a destination
|
|
||||||
if hasattr(request_entry, "migration") and request_entry.migration \
|
|
||||||
and hasattr(request_entry, "destination") and request_entry.destination:
|
|
||||||
try:
|
|
||||||
get_suitable_host(vm_specs=vm_entry.specs,
|
|
||||||
hosts=[host_pool.get(request_entry.destination)])
|
|
||||||
except NoSuitableHostFound:
|
|
||||||
logger.info("Requested destination host doesn't have enough capacity"
|
|
||||||
"to hold %s" % vm_entry.uuid)
|
|
||||||
else:
|
|
||||||
r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
|
|
||||||
uuid=request_entry.uuid,
|
|
||||||
destination=request_entry.destination,
|
|
||||||
request_prefix=env_vars.get("REQUEST_PREFIX"))
|
|
||||||
request_pool.put(r)
|
|
||||||
|
|
||||||
# If the Request is about a VM that just want to get started/created
|
|
||||||
else:
|
|
||||||
# assign_host only returns None when we couldn't be able to assign
|
|
||||||
# a host to a VM because of resource constraints
|
|
||||||
try:
|
|
||||||
assign_host(vm_entry)
|
|
||||||
except NoSuitableHostFound:
|
|
||||||
vm_entry.add_log("Can't schedule VM. No Resource Left.")
|
|
||||||
vm_pool.put(vm_entry)
|
|
||||||
|
|
||||||
pending_vms.append(vm_entry)
|
|
||||||
logger.info("No Resource Left. Emailing admin....")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
2
uncloud/__init__.py
Normal file
2
uncloud/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
class UncloudException(Exception):
|
||||||
|
pass
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from ucloud.config import etcd_client, env_vars
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
|
||||||
class Optional:
|
class Optional:
|
||||||
|
|
@ -19,12 +19,16 @@ class Field:
|
||||||
|
|
||||||
def is_valid(self):
|
def is_valid(self):
|
||||||
if self.value == KeyError:
|
if self.value == KeyError:
|
||||||
self.add_error("'{}' field is a required field".format(self.name))
|
self.add_error(
|
||||||
|
"'{}' field is a required field".format(self.name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if isinstance(self.value, Optional):
|
if isinstance(self.value, Optional):
|
||||||
pass
|
pass
|
||||||
elif not isinstance(self.value, self.type):
|
elif not isinstance(self.value, self.type):
|
||||||
self.add_error("Incorrect Type for '{}' field".format(self.name))
|
self.add_error(
|
||||||
|
"Incorrect Type for '{}' field".format(self.name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.validation()
|
self.validation()
|
||||||
|
|
||||||
|
|
@ -48,6 +52,8 @@ class VmUUIDField(Field):
|
||||||
self.validation = self.vm_uuid_validation
|
self.validation = self.vm_uuid_validation
|
||||||
|
|
||||||
def vm_uuid_validation(self):
|
def vm_uuid_validation(self):
|
||||||
r = etcd_client.get(os.path.join(env_vars.get('VM_PREFIX'), self.uuid))
|
r = shared.etcd_client.get(
|
||||||
|
os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
|
||||||
|
)
|
||||||
if not r:
|
if not r:
|
||||||
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
||||||
19
uncloud/api/create_image_store.py
Executable file
19
uncloud/api/create_image_store.py
Executable file
|
|
@ -0,0 +1,19 @@
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'is_public': True,
|
||||||
|
'type': 'ceph',
|
||||||
|
'name': 'images',
|
||||||
|
'description': 'first ever public image-store',
|
||||||
|
'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
||||||
|
}
|
||||||
|
|
||||||
|
shared.etcd_client.put(
|
||||||
|
os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
|
||||||
|
json.dumps(data),
|
||||||
|
)
|
||||||
|
|
@ -1,48 +1,51 @@
|
||||||
import binascii
|
import binascii
|
||||||
import ipaddress
|
import ipaddress
|
||||||
import random
|
import random
|
||||||
import subprocess as sp
|
import logging
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from pyotp import TOTP
|
from pyotp import TOTP
|
||||||
|
|
||||||
from ucloud.config import vm_pool, env_vars
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_otp(name, realm, token):
|
def check_otp(name, realm, token):
|
||||||
try:
|
try:
|
||||||
data = {
|
data = {
|
||||||
"auth_name": env_vars.get("AUTH_NAME"),
|
"auth_name": shared.settings["otp"]["auth_name"],
|
||||||
"auth_token": TOTP(env_vars.get("AUTH_SEED")).now(),
|
"auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
|
||||||
"auth_realm": env_vars.get("AUTH_REALM"),
|
"auth_realm": shared.settings["otp"]["auth_realm"],
|
||||||
"name": name,
|
"name": name,
|
||||||
"realm": realm,
|
"realm": realm,
|
||||||
"token": token,
|
"token": token,
|
||||||
}
|
}
|
||||||
except binascii.Error:
|
except binascii.Error as err:
|
||||||
|
logger.error(
|
||||||
|
"Cannot compute OTP for seed: {}".format(
|
||||||
|
shared.settings["otp"]["auth_seed"]
|
||||||
|
)
|
||||||
|
)
|
||||||
return 400
|
return 400
|
||||||
|
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
"{OTP_SERVER}{OTP_VERIFY_ENDPOINT}".format(
|
shared.settings["otp"]["verification_controller_url"], json=data
|
||||||
OTP_SERVER=env_vars.get("OTP_SERVER", ""),
|
|
||||||
OTP_VERIFY_ENDPOINT=env_vars.get("OTP_VERIFY_ENDPOINT", "verify/"),
|
|
||||||
),
|
|
||||||
json=data,
|
|
||||||
)
|
)
|
||||||
return response.status_code
|
return response.status_code
|
||||||
|
|
||||||
|
|
||||||
def resolve_vm_name(name, owner):
|
def resolve_vm_name(name, owner):
|
||||||
"""Return UUID of Virtual Machine of name == name and owner == owner
|
"""Return UUID of Virtual Machine of name == name and owner == owner
|
||||||
|
|
||||||
Input: name of vm, owner of vm.
|
Input: name of vm, owner of vm.
|
||||||
Output: uuid of vm if found otherwise None
|
Output: uuid of vm if found otherwise None
|
||||||
"""
|
"""
|
||||||
result = next(
|
result = next(
|
||||||
filter(
|
filter(
|
||||||
lambda vm: vm.value["owner"] == owner and vm.value["name"] == name,
|
lambda vm: vm.value["owner"] == owner
|
||||||
vm_pool.vms,
|
and vm.value["name"] == name,
|
||||||
|
shared.vm_pool.vms,
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
|
@ -54,7 +57,7 @@ def resolve_vm_name(name, owner):
|
||||||
|
|
||||||
def resolve_image_name(name, etcd_client):
|
def resolve_image_name(name, etcd_client):
|
||||||
"""Return image uuid given its name and its store
|
"""Return image uuid given its name and its store
|
||||||
|
|
||||||
* If the provided name is not in correct format
|
* If the provided name is not in correct format
|
||||||
i.e {store_name}:{image_name} return ValueError
|
i.e {store_name}:{image_name} return ValueError
|
||||||
* If no such image found then return KeyError
|
* If no such image found then return KeyError
|
||||||
|
|
@ -70,26 +73,35 @@ def resolve_image_name(name, etcd_client):
|
||||||
"""
|
"""
|
||||||
Examples, where it would work and where it would raise exception
|
Examples, where it would work and where it would raise exception
|
||||||
"images:alpine" --> ["images", "alpine"]
|
"images:alpine" --> ["images", "alpine"]
|
||||||
|
|
||||||
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
||||||
|
|
||||||
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
||||||
as too many values to unpack
|
as too many values to unpack
|
||||||
"""
|
"""
|
||||||
store_name, image_name = store_name_and_image_name
|
store_name, image_name = store_name_and_image_name
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ValueError("Image name not in correct format i.e {store_name}:{image_name}")
|
raise ValueError(
|
||||||
|
"Image name not in correct format i.e {store_name}:{image_name}"
|
||||||
|
)
|
||||||
|
|
||||||
images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
|
images = etcd_client.get_prefix(
|
||||||
|
shared.settings["etcd"]["image_prefix"], value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
# Try to find image with name == image_name and store_name == store_name
|
# Try to find image with name == image_name and store_name == store_name
|
||||||
try:
|
try:
|
||||||
image = next(filter(lambda im: im.value['name'] == image_name
|
image = next(
|
||||||
and im.value['store_name'] == store_name, images))
|
filter(
|
||||||
|
lambda im: im.value["name"] == image_name
|
||||||
|
and im.value["store_name"] == store_name,
|
||||||
|
images,
|
||||||
|
)
|
||||||
|
)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
raise KeyError("No image with name {} found.".format(name))
|
raise KeyError("No image with name {} found.".format(name))
|
||||||
else:
|
else:
|
||||||
image_uuid = image.key.split('/')[-1]
|
image_uuid = image.key.split("/")[-1]
|
||||||
|
|
||||||
return image_uuid
|
return image_uuid
|
||||||
|
|
||||||
|
|
@ -98,7 +110,7 @@ def random_bytes(num=6):
|
||||||
return [random.randrange(256) for _ in range(num)]
|
return [random.randrange(256) for _ in range(num)]
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
|
def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
|
||||||
mac = random_bytes()
|
mac = random_bytes()
|
||||||
if oui:
|
if oui:
|
||||||
if type(oui) == str:
|
if type(oui) == str:
|
||||||
|
|
@ -116,36 +128,6 @@ def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='
|
||||||
return separator.join(byte_fmt % b for b in mac)
|
return separator.join(byte_fmt % b for b in mac)
|
||||||
|
|
||||||
|
|
||||||
def get_ip_addr(mac_address, device):
|
|
||||||
"""Return IP address of a device provided its mac address / link local address
|
|
||||||
and the device with which it is connected.
|
|
||||||
|
|
||||||
For Example, if we call get_ip_addr(mac_address="52:54:00:12:34:56", device="br0")
|
|
||||||
the following two scenarios can happen
|
|
||||||
1. It would return None if we can't be able to find device whose mac_address is equal
|
|
||||||
to the arg:mac_address or the mentioned arg:device does not exists or the ip address
|
|
||||||
we found is local.
|
|
||||||
2. It would return ip_address of device whose mac_address is equal to arg:mac_address
|
|
||||||
and is connected/neighbor of arg:device
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
output = sp.check_output(['ip', '-6', 'neigh', 'show', 'dev', device], stderr=sp.PIPE)
|
|
||||||
except sp.CalledProcessError:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
result = []
|
|
||||||
output = output.strip().decode("utf-8")
|
|
||||||
output = output.split("\n")
|
|
||||||
for entry in output:
|
|
||||||
entry = entry.split()
|
|
||||||
if entry:
|
|
||||||
ip = ipaddress.ip_address(entry[0])
|
|
||||||
mac = entry[2]
|
|
||||||
if ip.is_global and mac_address == mac:
|
|
||||||
result.append(ip)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def mac2ipv6(mac, prefix):
|
def mac2ipv6(mac, prefix):
|
||||||
# only accept MACs separated by a colon
|
# only accept MACs separated by a colon
|
||||||
parts = mac.split(":")
|
parts = mac.split(":")
|
||||||
|
|
@ -158,8 +140,9 @@ def mac2ipv6(mac, prefix):
|
||||||
# format output
|
# format output
|
||||||
ipv6_parts = [str(0)] * 4
|
ipv6_parts = [str(0)] * 4
|
||||||
for i in range(0, len(parts), 2):
|
for i in range(0, len(parts), 2):
|
||||||
ipv6_parts.append("".join(parts[i:i + 2]))
|
ipv6_parts.append("".join(parts[i : i + 2]))
|
||||||
|
|
||||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
||||||
prefix = ipaddress.IPv6Address(prefix)
|
prefix = ipaddress.IPv6Address(prefix)
|
||||||
return str(prefix + int(lower_part))
|
return str(prefix + int(lower_part))
|
||||||
|
|
||||||
600
uncloud/api/main.py
Normal file
600
uncloud/api/main.py
Normal file
|
|
@ -0,0 +1,600 @@
|
||||||
|
import json
|
||||||
|
import pynetbox
|
||||||
|
import logging
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
from os.path import join as join_path
|
||||||
|
|
||||||
|
from flask import Flask, request
|
||||||
|
from flask_restful import Resource, Api
|
||||||
|
from werkzeug.exceptions import HTTPException
|
||||||
|
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
from uncloud.common import counters
|
||||||
|
from uncloud.common.vm import VMStatus
|
||||||
|
from uncloud.common.request import RequestEntry, RequestType
|
||||||
|
from uncloud.api import schemas
|
||||||
|
from uncloud.api.helper import generate_mac, mac2ipv6
|
||||||
|
from uncloud import UncloudException
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
api = Api(app)
|
||||||
|
app.logger.handlers.clear()
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser('api', add_help=False)
|
||||||
|
arg_parser.add_argument('--port', '-p')
|
||||||
|
|
||||||
|
|
||||||
|
@app.errorhandler(Exception)
|
||||||
|
def handle_exception(e):
|
||||||
|
app.logger.error(e)
|
||||||
|
# pass through HTTP errors
|
||||||
|
if isinstance(e, HTTPException):
|
||||||
|
return e
|
||||||
|
|
||||||
|
# now you're handling non-HTTP exceptions only
|
||||||
|
return {'message': 'Server Error'}, 500
|
||||||
|
|
||||||
|
|
||||||
|
class CreateVM(Resource):
|
||||||
|
"""API Request to Handle Creation of VM"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.CreateVMSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
vm_uuid = uuid4().hex
|
||||||
|
vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
|
||||||
|
specs = {
|
||||||
|
'cpu': validator.specs['cpu'],
|
||||||
|
'ram': validator.specs['ram'],
|
||||||
|
'os-ssd': validator.specs['os-ssd'],
|
||||||
|
'hdd': validator.specs['hdd'],
|
||||||
|
}
|
||||||
|
macs = [generate_mac() for _ in range(len(data['network']))]
|
||||||
|
tap_ids = [
|
||||||
|
counters.increment_etcd_counter(
|
||||||
|
shared.etcd_client, shared.settings['etcd']['tap_counter']
|
||||||
|
)
|
||||||
|
for _ in range(len(data['network']))
|
||||||
|
]
|
||||||
|
vm_entry = {
|
||||||
|
'name': data['vm_name'],
|
||||||
|
'owner': data['name'],
|
||||||
|
'owner_realm': data['realm'],
|
||||||
|
'specs': specs,
|
||||||
|
'hostname': '',
|
||||||
|
'status': VMStatus.stopped,
|
||||||
|
'image_uuid': validator.image_uuid,
|
||||||
|
'log': [],
|
||||||
|
'vnc_socket': '',
|
||||||
|
'network': list(zip(data['network'], macs, tap_ids)),
|
||||||
|
'metadata': {'ssh-keys': []},
|
||||||
|
'in_migration': False,
|
||||||
|
}
|
||||||
|
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||||
|
|
||||||
|
# Create ScheduleVM Request
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type=RequestType.ScheduleVM,
|
||||||
|
uuid=vm_uuid,
|
||||||
|
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||||
|
)
|
||||||
|
shared.request_pool.put(r)
|
||||||
|
|
||||||
|
return {'message': 'VM Creation Queued'}, 200
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class VmStatus(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.VMStatusSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
vm = shared.vm_pool.get(
|
||||||
|
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||||
|
)
|
||||||
|
vm_value = vm.value.copy()
|
||||||
|
vm_value['ip'] = []
|
||||||
|
for network_mac_and_tap in vm.network:
|
||||||
|
network_name, mac, tap = network_mac_and_tap
|
||||||
|
network = shared.etcd_client.get(
|
||||||
|
join_path(
|
||||||
|
shared.settings['etcd']['network_prefix'],
|
||||||
|
data['name'],
|
||||||
|
network_name,
|
||||||
|
),
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
|
ipv6_addr = (
|
||||||
|
network.value.get('ipv6').split('::')[0] + '::'
|
||||||
|
)
|
||||||
|
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
|
||||||
|
vm.value = vm_value
|
||||||
|
return vm.value
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class CreateImage(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.CreateImageSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
file_entry = shared.etcd_client.get(
|
||||||
|
join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
|
||||||
|
)
|
||||||
|
file_entry_value = json.loads(file_entry.value)
|
||||||
|
|
||||||
|
image_entry_json = {
|
||||||
|
'status': 'TO_BE_CREATED',
|
||||||
|
'owner': file_entry_value['owner'],
|
||||||
|
'filename': file_entry_value['filename'],
|
||||||
|
'name': data['name'],
|
||||||
|
'store_name': data['image_store'],
|
||||||
|
'visibility': 'public',
|
||||||
|
}
|
||||||
|
shared.etcd_client.put(
|
||||||
|
join_path(
|
||||||
|
shared.settings['etcd']['image_prefix'], data['uuid']
|
||||||
|
),
|
||||||
|
json.dumps(image_entry_json),
|
||||||
|
)
|
||||||
|
|
||||||
|
return {'message': 'Image queued for creation.'}
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListPublicImages(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
images = shared.etcd_client.get_prefix(
|
||||||
|
shared.settings['etcd']['image_prefix'], value_in_json=True
|
||||||
|
)
|
||||||
|
r = {'images': []}
|
||||||
|
for image in images:
|
||||||
|
image_key = '{}:{}'.format(
|
||||||
|
image.value['store_name'], image.value['name']
|
||||||
|
)
|
||||||
|
r['images'].append(
|
||||||
|
{'name': image_key, 'status': image.value['status']}
|
||||||
|
)
|
||||||
|
return r, 200
|
||||||
|
|
||||||
|
|
||||||
|
class VMAction(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.VmActionSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vm_entry = shared.vm_pool.get(
|
||||||
|
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||||
|
)
|
||||||
|
action = data['action']
|
||||||
|
|
||||||
|
if action == 'start':
|
||||||
|
action = 'schedule'
|
||||||
|
|
||||||
|
if action == 'delete' and vm_entry.hostname == '':
|
||||||
|
if shared.storage_handler.is_vm_image_exists(
|
||||||
|
vm_entry.uuid
|
||||||
|
):
|
||||||
|
r_status = shared.storage_handler.delete_vm_image(
|
||||||
|
vm_entry.uuid
|
||||||
|
)
|
||||||
|
if r_status:
|
||||||
|
shared.etcd_client.client.delete(vm_entry.key)
|
||||||
|
return {'message': 'VM successfully deleted'}
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
'Some Error Occurred while deleting VM'
|
||||||
|
)
|
||||||
|
return {'message': 'VM deletion unsuccessfull'}
|
||||||
|
else:
|
||||||
|
shared.etcd_client.client.delete(vm_entry.key)
|
||||||
|
return {'message': 'VM successfully deleted'}
|
||||||
|
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type='{}VM'.format(action.title()),
|
||||||
|
uuid=data['uuid'],
|
||||||
|
hostname=vm_entry.hostname,
|
||||||
|
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||||
|
)
|
||||||
|
shared.request_pool.put(r)
|
||||||
|
return (
|
||||||
|
{'message': 'VM {} Queued'.format(action.title())},
|
||||||
|
200,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class VMMigration(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.VmMigrationSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vm = shared.vm_pool.get(data['uuid'])
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type=RequestType.InitVMMigration,
|
||||||
|
uuid=vm.uuid,
|
||||||
|
hostname=join_path(
|
||||||
|
shared.settings['etcd']['host_prefix'],
|
||||||
|
validator.destination.value,
|
||||||
|
),
|
||||||
|
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||||
|
)
|
||||||
|
|
||||||
|
shared.request_pool.put(r)
|
||||||
|
return (
|
||||||
|
{'message': 'VM Migration Initialization Queued'},
|
||||||
|
200,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListUserVM(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.OTPSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vms = shared.etcd_client.get_prefix(
|
||||||
|
shared.settings['etcd']['vm_prefix'], value_in_json=True
|
||||||
|
)
|
||||||
|
return_vms = []
|
||||||
|
user_vms = filter(
|
||||||
|
lambda v: v.value['owner'] == data['name'], vms
|
||||||
|
)
|
||||||
|
for vm in user_vms:
|
||||||
|
return_vms.append(
|
||||||
|
{
|
||||||
|
'name': vm.value['name'],
|
||||||
|
'vm_uuid': vm.key.split('/')[-1],
|
||||||
|
'specs': vm.value['specs'],
|
||||||
|
'status': vm.value['status'],
|
||||||
|
'hostname': vm.value['hostname'],
|
||||||
|
'vnc_socket': vm.value.get('vnc_socket', None),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if return_vms:
|
||||||
|
return {'message': return_vms}, 200
|
||||||
|
return {'message': 'No VM found'}, 404
|
||||||
|
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListUserFiles(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.OTPSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
files = shared.etcd_client.get_prefix(
|
||||||
|
shared.settings['etcd']['file_prefix'], value_in_json=True
|
||||||
|
)
|
||||||
|
return_files = []
|
||||||
|
user_files = [f for f in files if f.value['owner'] == data['name']]
|
||||||
|
for file in user_files:
|
||||||
|
file_uuid = file.key.split('/')[-1]
|
||||||
|
file = file.value
|
||||||
|
file['uuid'] = file_uuid
|
||||||
|
|
||||||
|
file.pop('sha512sum', None)
|
||||||
|
file.pop('owner', None)
|
||||||
|
|
||||||
|
return_files.append(file)
|
||||||
|
return {'message': return_files}, 200
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class CreateHost(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.CreateHostSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
host_key = join_path(
|
||||||
|
shared.settings['etcd']['host_prefix'], uuid4().hex
|
||||||
|
)
|
||||||
|
host_entry = {
|
||||||
|
'specs': data['specs'],
|
||||||
|
'hostname': data['hostname'],
|
||||||
|
'status': 'DEAD',
|
||||||
|
'last_heartbeat': '',
|
||||||
|
}
|
||||||
|
shared.etcd_client.put(
|
||||||
|
host_key, host_entry, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return {'message': 'Host Created'}, 200
|
||||||
|
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListHost(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
hosts = shared.host_pool.hosts
|
||||||
|
r = {
|
||||||
|
host.key: {
|
||||||
|
'status': host.status,
|
||||||
|
'specs': host.specs,
|
||||||
|
'hostname': host.hostname,
|
||||||
|
}
|
||||||
|
for host in hosts
|
||||||
|
}
|
||||||
|
return r, 200
|
||||||
|
|
||||||
|
|
||||||
|
class GetSSHKeys(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.GetSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
if not validator.key_name.value:
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/
|
||||||
|
etcd_key = join_path(
|
||||||
|
shared.settings['etcd']['user_prefix'],
|
||||||
|
data['realm'],
|
||||||
|
data['name'],
|
||||||
|
'key',
|
||||||
|
)
|
||||||
|
etcd_entry = shared.etcd_client.get_prefix(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = {
|
||||||
|
key.key.split('/')[-1]: key.value
|
||||||
|
for key in etcd_entry
|
||||||
|
}
|
||||||
|
return {'keys': keys}
|
||||||
|
else:
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = join_path(
|
||||||
|
shared.settings['etcd']['user_prefix'],
|
||||||
|
data['realm'],
|
||||||
|
data['name'],
|
||||||
|
'key',
|
||||||
|
data['key_name'],
|
||||||
|
)
|
||||||
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if etcd_entry:
|
||||||
|
return {
|
||||||
|
'keys': {
|
||||||
|
etcd_entry.key.split('/')[
|
||||||
|
-1
|
||||||
|
]: etcd_entry.value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {'keys': {}}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class AddSSHKey(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.AddSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = join_path(
|
||||||
|
shared.settings['etcd']['user_prefix'],
|
||||||
|
data['realm'],
|
||||||
|
data['name'],
|
||||||
|
'key',
|
||||||
|
data['key_name'],
|
||||||
|
)
|
||||||
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
|
if etcd_entry:
|
||||||
|
return {
|
||||||
|
'message': 'Key with name "{}" already exists'.format(
|
||||||
|
data['key_name']
|
||||||
|
)
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Key Not Found. It implies user' haven't added any key yet.
|
||||||
|
shared.etcd_client.put(
|
||||||
|
etcd_key, data['key'], value_in_json=True
|
||||||
|
)
|
||||||
|
return {'message': 'Key added successfully'}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class RemoveSSHKey(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.RemoveSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = join_path(
|
||||||
|
shared.settings['etcd']['user_prefix'],
|
||||||
|
data['realm'],
|
||||||
|
data['name'],
|
||||||
|
'key',
|
||||||
|
data['key_name'],
|
||||||
|
)
|
||||||
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
|
if etcd_entry:
|
||||||
|
shared.etcd_client.client.delete(etcd_key)
|
||||||
|
return {'message': 'Key successfully removed.'}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'message': 'No Key with name "{}" Exists at all.'.format(
|
||||||
|
data['key_name']
|
||||||
|
)
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class CreateNetwork(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.CreateNetwork(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
|
||||||
|
network_entry = {
|
||||||
|
'id': counters.increment_etcd_counter(
|
||||||
|
shared.etcd_client, shared.settings['etcd']['vxlan_counter']
|
||||||
|
),
|
||||||
|
'type': data['type'],
|
||||||
|
}
|
||||||
|
if validator.user.value:
|
||||||
|
try:
|
||||||
|
nb = pynetbox.api(
|
||||||
|
url=shared.settings['netbox']['url'],
|
||||||
|
token=shared.settings['netbox']['token'],
|
||||||
|
)
|
||||||
|
nb_prefix = nb.ipam.prefixes.get(
|
||||||
|
prefix=shared.settings['network']['prefix']
|
||||||
|
)
|
||||||
|
prefix = nb_prefix.available_prefixes.create(
|
||||||
|
data={
|
||||||
|
'prefix_length': int(
|
||||||
|
shared.settings['network']['prefix_length']
|
||||||
|
),
|
||||||
|
'description': '{}\'s network "{}"'.format(
|
||||||
|
data['name'], data['network_name']
|
||||||
|
),
|
||||||
|
'is_pool': True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
app.logger.error(err)
|
||||||
|
return {
|
||||||
|
'message': 'Error occured while creating network.'
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
network_entry['ipv6'] = prefix['prefix']
|
||||||
|
else:
|
||||||
|
network_entry['ipv6'] = 'fd00::/64'
|
||||||
|
|
||||||
|
network_key = join_path(
|
||||||
|
shared.settings['etcd']['network_prefix'],
|
||||||
|
data['name'],
|
||||||
|
data['network_name'],
|
||||||
|
)
|
||||||
|
shared.etcd_client.put(
|
||||||
|
network_key, network_entry, value_in_json=True
|
||||||
|
)
|
||||||
|
return {'message': 'Network successfully added.'}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListUserNetwork(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = schemas.OTPSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
prefix = join_path(
|
||||||
|
shared.settings['etcd']['network_prefix'], data['name']
|
||||||
|
)
|
||||||
|
networks = shared.etcd_client.get_prefix(
|
||||||
|
prefix, value_in_json=True
|
||||||
|
)
|
||||||
|
user_networks = []
|
||||||
|
for net in networks:
|
||||||
|
net.value['name'] = net.key.split('/')[-1]
|
||||||
|
user_networks.append(net.value)
|
||||||
|
return {'networks': user_networks}, 200
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
api.add_resource(CreateVM, '/vm/create')
|
||||||
|
api.add_resource(VmStatus, '/vm/status')
|
||||||
|
|
||||||
|
api.add_resource(VMAction, '/vm/action')
|
||||||
|
api.add_resource(VMMigration, '/vm/migrate')
|
||||||
|
|
||||||
|
api.add_resource(CreateImage, '/image/create')
|
||||||
|
api.add_resource(ListPublicImages, '/image/list-public')
|
||||||
|
|
||||||
|
api.add_resource(ListUserVM, '/user/vms')
|
||||||
|
api.add_resource(ListUserFiles, '/user/files')
|
||||||
|
api.add_resource(ListUserNetwork, '/user/networks')
|
||||||
|
|
||||||
|
api.add_resource(AddSSHKey, '/user/add-ssh')
|
||||||
|
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
|
||||||
|
api.add_resource(GetSSHKeys, '/user/get-ssh')
|
||||||
|
|
||||||
|
api.add_resource(CreateHost, '/host/create')
|
||||||
|
api.add_resource(ListHost, '/host/list')
|
||||||
|
|
||||||
|
api.add_resource(CreateNetwork, '/network/create')
|
||||||
|
|
||||||
|
|
||||||
|
def main(arguments):
|
||||||
|
debug = arguments['debug']
|
||||||
|
port = arguments['port']
|
||||||
|
|
||||||
|
try:
|
||||||
|
image_stores = list(
|
||||||
|
shared.etcd_client.get_prefix(
|
||||||
|
shared.settings['etcd']['image_store_prefix'], value_in_json=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
image_stores = False
|
||||||
|
|
||||||
|
# Do not inject default values that might be very wrong
|
||||||
|
# fail when required, not before
|
||||||
|
#
|
||||||
|
# if not image_stores:
|
||||||
|
# data = {
|
||||||
|
# 'is_public': True,
|
||||||
|
# 'type': 'ceph',
|
||||||
|
# 'name': 'images',
|
||||||
|
# 'description': 'first ever public image-store',
|
||||||
|
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
||||||
|
# }
|
||||||
|
|
||||||
|
# shared.etcd_client.put(
|
||||||
|
# join_path(
|
||||||
|
# shared.settings['etcd']['image_store_prefix'], uuid4().hex
|
||||||
|
# ),
|
||||||
|
# json.dumps(data),
|
||||||
|
# )
|
||||||
|
|
||||||
|
try:
|
||||||
|
app.run(host='::', port=port, debug=debug)
|
||||||
|
except OSError as e:
|
||||||
|
raise UncloudException('Failed to start Flask: {}'.format(e))
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
"""
|
"""
|
||||||
This module contain classes thats validates and intercept/modify
|
This module contain classes thats validates and intercept/modify
|
||||||
data coming from ucloud-cli (user)
|
data coming from uncloud-cli (user)
|
||||||
|
|
||||||
It was primarily developed as an alternative to argument parser
|
It was primarily developed as an alternative to argument parser
|
||||||
of Flask_Restful which is going to be deprecated. I also tried
|
of Flask_Restful which is going to be deprecated. I also tried
|
||||||
|
|
@ -19,10 +19,10 @@ import os
|
||||||
|
|
||||||
import bitmath
|
import bitmath
|
||||||
|
|
||||||
from ucloud.common.host import HostStatus
|
from uncloud.common.host import HostStatus
|
||||||
from ucloud.common.vm import VMStatus
|
from uncloud.common.vm import VMStatus
|
||||||
from ucloud.config import etcd_client, env_vars, vm_pool, host_pool
|
from uncloud.common.shared import shared
|
||||||
from . import helper
|
from . import helper, logger
|
||||||
from .common_fields import Field, VmUUIDField
|
from .common_fields import Field, VmUUIDField
|
||||||
from .helper import check_otp, resolve_vm_name
|
from .helper import check_otp, resolve_vm_name
|
||||||
|
|
||||||
|
|
@ -79,7 +79,12 @@ class OTPSchema(BaseSchema):
|
||||||
super().__init__(data=data, fields=_fields)
|
super().__init__(data=data, fields=_fields)
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
if check_otp(self.name.value, self.realm.value, self.token.value) != 200:
|
if (
|
||||||
|
check_otp(
|
||||||
|
self.name.value, self.realm.value, self.token.value
|
||||||
|
)
|
||||||
|
!= 200
|
||||||
|
):
|
||||||
self.add_error("Wrong Credentials")
|
self.add_error("Wrong Credentials")
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -91,7 +96,9 @@ class CreateImageSchema(BaseSchema):
|
||||||
# Fields
|
# Fields
|
||||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||||
self.name = Field("name", str, data.get("name", KeyError))
|
self.name = Field("name", str, data.get("name", KeyError))
|
||||||
self.image_store = Field("image_store", str, data.get("image_store", KeyError))
|
self.image_store = Field(
|
||||||
|
"image_store", str, data.get("image_store", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validations
|
# Validations
|
||||||
self.uuid.validation = self.file_uuid_validation
|
self.uuid.validation = self.file_uuid_validation
|
||||||
|
|
@ -102,34 +109,51 @@ class CreateImageSchema(BaseSchema):
|
||||||
super().__init__(data, fields)
|
super().__init__(data, fields)
|
||||||
|
|
||||||
def file_uuid_validation(self):
|
def file_uuid_validation(self):
|
||||||
file_entry = etcd_client.get(os.path.join(env_vars.get('FILE_PREFIX'), self.uuid.value))
|
file_entry = shared.etcd_client.get(
|
||||||
|
os.path.join(
|
||||||
|
shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
|
||||||
|
)
|
||||||
|
)
|
||||||
if file_entry is None:
|
if file_entry is None:
|
||||||
self.add_error(
|
self.add_error(
|
||||||
"Image File with uuid '{}' Not Found".format(self.uuid.value)
|
"Image File with uuid '{}' Not Found".format(
|
||||||
|
self.uuid.value
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def image_store_name_validation(self):
|
def image_store_name_validation(self):
|
||||||
image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX')))
|
image_stores = list(
|
||||||
|
shared.etcd_client.get_prefix(
|
||||||
|
shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
image_store = next(
|
image_store = next(
|
||||||
filter(
|
filter(
|
||||||
lambda s: json.loads(s.value)["name"] == self.image_store.value,
|
lambda s: json.loads(s.value)["name"]
|
||||||
|
== self.image_store.value,
|
||||||
image_stores,
|
image_stores,
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
if not image_store:
|
if not image_store:
|
||||||
self.add_error("Store '{}' does not exists".format(self.image_store.value))
|
self.add_error(
|
||||||
|
"Store '{}' does not exists".format(
|
||||||
|
self.image_store.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Host Operations
|
# Host Operations
|
||||||
|
|
||||||
|
|
||||||
class CreateHostSchema(OTPSchema):
|
class CreateHostSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.parsed_specs = {}
|
|
||||||
# Fields
|
# Fields
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
self.hostname = Field("hostname", str, data.get("hostname", KeyError))
|
self.hostname = Field(
|
||||||
|
"hostname", str, data.get("hostname", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
self.specs.validation = self.specs_validation
|
self.specs.validation = self.specs_validation
|
||||||
|
|
@ -141,22 +165,28 @@ class CreateHostSchema(OTPSchema):
|
||||||
def specs_validation(self):
|
def specs_validation(self):
|
||||||
ALLOWED_BASE = 10
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
_cpu = self.specs.value.get('cpu', KeyError)
|
_cpu = self.specs.value.get("cpu", KeyError)
|
||||||
_ram = self.specs.value.get('ram', KeyError)
|
_ram = self.specs.value.get("ram", KeyError)
|
||||||
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||||
_hdd = self.specs.value.get('hdd', KeyError)
|
_hdd = self.specs.value.get("hdd", KeyError)
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
self.add_error(
|
||||||
|
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified RAM is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified RAM is not in correct units"
|
||||||
|
)
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified OS-SSD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified OS-SSD is not in correct units"
|
||||||
|
)
|
||||||
|
|
||||||
if _cpu < 1:
|
if _cpu < 1:
|
||||||
self.add_error("CPU must be atleast 1")
|
self.add_error("CPU must be atleast 1")
|
||||||
|
|
@ -171,7 +201,9 @@ class CreateHostSchema(OTPSchema):
|
||||||
for hdd in _hdd:
|
for hdd in _hdd:
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified HDD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified HDD is not in correct units"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
|
@ -182,15 +214,17 @@ class CreateHostSchema(OTPSchema):
|
||||||
else:
|
else:
|
||||||
if self.get_errors():
|
if self.get_errors():
|
||||||
self.specs = {
|
self.specs = {
|
||||||
'cpu': _cpu,
|
"cpu": _cpu,
|
||||||
'ram': str(parsed_ram),
|
"ram": str(parsed_ram),
|
||||||
'os-ssd': str(parsed_os_ssd),
|
"os-ssd": str(parsed_os_ssd),
|
||||||
'hdd': parsed_hdd
|
"hdd": parsed_hdd,
|
||||||
}
|
}
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
if self.realm.value != "ungleich-admin":
|
if self.realm.value != "ungleich-admin":
|
||||||
self.add_error("Invalid Credentials/Insufficient Permission")
|
self.add_error(
|
||||||
|
"Invalid Credentials/Insufficient Permission"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# VM Operations
|
# VM Operations
|
||||||
|
|
@ -198,13 +232,15 @@ class CreateHostSchema(OTPSchema):
|
||||||
|
|
||||||
class CreateVMSchema(OTPSchema):
|
class CreateVMSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.parsed_specs = {}
|
|
||||||
|
|
||||||
# Fields
|
# Fields
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
self.vm_name = Field("vm_name", str, data.get("vm_name", KeyError))
|
self.vm_name = Field(
|
||||||
|
"vm_name", str, data.get("vm_name", KeyError)
|
||||||
|
)
|
||||||
self.image = Field("image", str, data.get("image", KeyError))
|
self.image = Field("image", str, data.get("image", KeyError))
|
||||||
self.network = Field("network", list, data.get("network", KeyError))
|
self.network = Field(
|
||||||
|
"network", list, data.get("network", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
self.image.validation = self.image_validation
|
self.image.validation = self.image_validation
|
||||||
|
|
@ -218,16 +254,25 @@ class CreateVMSchema(OTPSchema):
|
||||||
|
|
||||||
def image_validation(self):
|
def image_validation(self):
|
||||||
try:
|
try:
|
||||||
image_uuid = helper.resolve_image_name(self.image.value, etcd_client)
|
image_uuid = helper.resolve_image_name(
|
||||||
|
self.image.value, shared.etcd_client
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
logger.exception(
|
||||||
|
"Cannot resolve image name = %s", self.image.value
|
||||||
|
)
|
||||||
self.add_error(str(e))
|
self.add_error(str(e))
|
||||||
else:
|
else:
|
||||||
self.image_uuid = image_uuid
|
self.image_uuid = image_uuid
|
||||||
|
|
||||||
def vm_name_validation(self):
|
def vm_name_validation(self):
|
||||||
if resolve_vm_name(name=self.vm_name.value, owner=self.name.value):
|
if resolve_vm_name(
|
||||||
|
name=self.vm_name.value, owner=self.name.value
|
||||||
|
):
|
||||||
self.add_error(
|
self.add_error(
|
||||||
'VM with same name "{}" already exists'.format(self.vm_name.value)
|
'VM with same name "{}" already exists'.format(
|
||||||
|
self.vm_name.value
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def network_validation(self):
|
def network_validation(self):
|
||||||
|
|
@ -235,34 +280,48 @@ class CreateVMSchema(OTPSchema):
|
||||||
|
|
||||||
if _network:
|
if _network:
|
||||||
for net in _network:
|
for net in _network:
|
||||||
network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
|
network = shared.etcd_client.get(
|
||||||
self.name.value,
|
os.path.join(
|
||||||
net), value_in_json=True)
|
shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
|
||||||
|
self.name.value,
|
||||||
|
net,
|
||||||
|
),
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
if not network:
|
if not network:
|
||||||
self.add_error("Network with name {} does not exists" \
|
self.add_error(
|
||||||
.format(net))
|
"Network with name {} does not exists".format(
|
||||||
|
net
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def specs_validation(self):
|
def specs_validation(self):
|
||||||
ALLOWED_BASE = 10
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
_cpu = self.specs.value.get('cpu', KeyError)
|
_cpu = self.specs.value.get("cpu", KeyError)
|
||||||
_ram = self.specs.value.get('ram', KeyError)
|
_ram = self.specs.value.get("ram", KeyError)
|
||||||
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||||
_hdd = self.specs.value.get('hdd', KeyError)
|
_hdd = self.specs.value.get("hdd", KeyError)
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
self.add_error(
|
||||||
|
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified RAM is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified RAM is not in correct units"
|
||||||
|
)
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified OS-SSD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified OS-SSD is not in correct units"
|
||||||
|
)
|
||||||
|
|
||||||
if _cpu < 1:
|
if int(_cpu) < 1:
|
||||||
self.add_error("CPU must be atleast 1")
|
self.add_error("CPU must be atleast 1")
|
||||||
|
|
||||||
if parsed_ram < bitmath.GB(1):
|
if parsed_ram < bitmath.GB(1):
|
||||||
|
|
@ -275,7 +334,9 @@ class CreateVMSchema(OTPSchema):
|
||||||
for hdd in _hdd:
|
for hdd in _hdd:
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified HDD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified HDD is not in correct units"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
|
@ -286,21 +347,24 @@ class CreateVMSchema(OTPSchema):
|
||||||
else:
|
else:
|
||||||
if self.get_errors():
|
if self.get_errors():
|
||||||
self.specs = {
|
self.specs = {
|
||||||
'cpu': _cpu,
|
"cpu": _cpu,
|
||||||
'ram': str(parsed_ram),
|
"ram": str(parsed_ram),
|
||||||
'os-ssd': str(parsed_os_ssd),
|
"os-ssd": str(parsed_os_ssd),
|
||||||
'hdd': parsed_hdd
|
"hdd": parsed_hdd,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class VMStatusSchema(OTPSchema):
|
class VMStatusSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
|
|
||||||
|
|
@ -309,9 +373,10 @@ class VMStatusSchema(OTPSchema):
|
||||||
super().__init__(data, fields)
|
super().__init__(data, fields)
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
|
|
@ -319,11 +384,14 @@ class VMStatusSchema(OTPSchema):
|
||||||
class VmActionSchema(OTPSchema):
|
class VmActionSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
self.action = Field("action", str, data.get("action", KeyError))
|
self.action = Field("action", str, data.get("action", KeyError))
|
||||||
|
|
@ -338,20 +406,23 @@ class VmActionSchema(OTPSchema):
|
||||||
allowed_actions = ["start", "stop", "delete"]
|
allowed_actions = ["start", "stop", "delete"]
|
||||||
if self.action.value not in allowed_actions:
|
if self.action.value not in allowed_actions:
|
||||||
self.add_error(
|
self.add_error(
|
||||||
"Invalid Action. Allowed Actions are {}".format(allowed_actions)
|
"Invalid Action. Allowed Actions are {}".format(
|
||||||
|
allowed_actions
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
if (
|
if (
|
||||||
self.action.value == "start"
|
self.action.value == "start"
|
||||||
and vm.status == VMStatus.running
|
and vm.status == VMStatus.running
|
||||||
and vm.hostname != ""
|
and vm.hostname != ""
|
||||||
):
|
):
|
||||||
self.add_error("VM Already Running")
|
self.add_error("VM Already Running")
|
||||||
|
|
||||||
|
|
@ -365,15 +436,20 @@ class VmActionSchema(OTPSchema):
|
||||||
class VmMigrationSchema(OTPSchema):
|
class VmMigrationSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
|
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
self.destination = Field("destination", str, data.get("destination", KeyError))
|
self.destination = Field(
|
||||||
|
"destination", str, data.get("destination", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
self.destination.validation = self.destination_validation
|
self.destination.validation = self.destination_validation
|
||||||
|
|
||||||
|
|
@ -382,31 +458,47 @@ class VmMigrationSchema(OTPSchema):
|
||||||
|
|
||||||
def destination_validation(self):
|
def destination_validation(self):
|
||||||
hostname = self.destination.value
|
hostname = self.destination.value
|
||||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
host = next(
|
||||||
|
filter(
|
||||||
|
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
if not host:
|
if not host:
|
||||||
self.add_error("No Such Host ({}) exists".format(self.destination.value))
|
self.add_error(
|
||||||
|
"No Such Host ({}) exists".format(
|
||||||
|
self.destination.value
|
||||||
|
)
|
||||||
|
)
|
||||||
elif host.status != HostStatus.alive:
|
elif host.status != HostStatus.alive:
|
||||||
self.add_error("Destination Host is dead")
|
self.add_error("Destination Host is dead")
|
||||||
else:
|
else:
|
||||||
self.destination.value = host.key
|
self.destination.value = host.key
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
if vm.status != VMStatus.running:
|
if vm.status != VMStatus.running:
|
||||||
self.add_error("Can't migrate non-running VM")
|
self.add_error("Can't migrate non-running VM")
|
||||||
|
|
||||||
if vm.hostname == os.path.join(env_vars.get('HOST_PREFIX'), self.destination.value):
|
if vm.hostname == os.path.join(
|
||||||
self.add_error("Destination host couldn't be same as Source Host")
|
shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
|
||||||
|
):
|
||||||
|
self.add_error(
|
||||||
|
"Destination host couldn't be same as Source Host"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AddSSHSchema(OTPSchema):
|
class AddSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", KeyError)
|
||||||
|
)
|
||||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||||
|
|
||||||
fields = [self.key_name, self.key]
|
fields = [self.key_name, self.key]
|
||||||
|
|
@ -415,7 +507,9 @@ class AddSSHSchema(OTPSchema):
|
||||||
|
|
||||||
class RemoveSSHSchema(OTPSchema):
|
class RemoveSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
fields = [self.key_name]
|
fields = [self.key_name]
|
||||||
super().__init__(data=data, fields=fields)
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
@ -423,7 +517,9 @@ class RemoveSSHSchema(OTPSchema):
|
||||||
|
|
||||||
class GetSSHSchema(OTPSchema):
|
class GetSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", None))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", None)
|
||||||
|
)
|
||||||
|
|
||||||
fields = [self.key_name]
|
fields = [self.key_name]
|
||||||
super().__init__(data=data, fields=fields)
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
@ -442,15 +538,20 @@ class CreateNetwork(OTPSchema):
|
||||||
super().__init__(data, fields=fields)
|
super().__init__(data, fields=fields)
|
||||||
|
|
||||||
def network_name_validation(self):
|
def network_name_validation(self):
|
||||||
network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
|
key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
|
||||||
self.name.value,
|
network = shared.etcd_client.get(key, value_in_json=True)
|
||||||
self.network_name.value),
|
|
||||||
value_in_json=True)
|
|
||||||
if network:
|
if network:
|
||||||
self.add_error("Network with name {} already exists" \
|
self.add_error(
|
||||||
.format(self.network_name.value))
|
"Network with name {} already exists".format(
|
||||||
|
self.network_name.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def network_type_validation(self):
|
def network_type_validation(self):
|
||||||
supported_network_types = ["vxlan"]
|
supported_network_types = ["vxlan"]
|
||||||
if self.type.value not in supported_network_types:
|
if self.type.value not in supported_network_types:
|
||||||
self.add_error("Unsupported Network Type. Supported network types are {}".format(supported_network_types))
|
self.add_error(
|
||||||
|
"Unsupported Network Type. Supported network types are {}".format(
|
||||||
|
supported_network_types
|
||||||
|
)
|
||||||
|
)
|
||||||
46
uncloud/cli/helper.py
Normal file
46
uncloud/cli/helper.py
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
import argparse
|
||||||
|
import binascii
|
||||||
|
|
||||||
|
from pyotp import TOTP
|
||||||
|
from os.path import join as join_path
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
|
||||||
|
def get_otp_parser():
|
||||||
|
otp_parser = argparse.ArgumentParser('otp')
|
||||||
|
otp_parser.add_argument('--name')
|
||||||
|
otp_parser.add_argument('--realm')
|
||||||
|
otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
|
||||||
|
|
||||||
|
return otp_parser
|
||||||
|
|
||||||
|
|
||||||
|
def load_dump_pretty(content):
|
||||||
|
if isinstance(content, bytes):
|
||||||
|
content = content.decode('utf-8')
|
||||||
|
parsed = json.loads(content)
|
||||||
|
return json.dumps(parsed, indent=4, sort_keys=True)
|
||||||
|
|
||||||
|
|
||||||
|
def make_request(*args, data=None, request_method=requests.post):
|
||||||
|
try:
|
||||||
|
r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
|
||||||
|
except requests.exceptions.RequestException:
|
||||||
|
print('Error occurred while connecting to API server.')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
print(load_dump_pretty(r.content))
|
||||||
|
except Exception:
|
||||||
|
print('Error occurred while getting output from api server.')
|
||||||
|
|
||||||
|
|
||||||
|
def get_token(seed):
|
||||||
|
if seed is not None:
|
||||||
|
try:
|
||||||
|
token = TOTP(seed).now()
|
||||||
|
except binascii.Error:
|
||||||
|
raise argparse.ArgumentTypeError('Invalid seed')
|
||||||
|
else:
|
||||||
|
return token
|
||||||
45
uncloud/cli/host.py
Normal file
45
uncloud/cli/host.py
Normal file
|
|
@ -0,0 +1,45 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from uncloud.cli.helper import make_request, get_otp_parser
|
||||||
|
from uncloud.common.parser import BaseParser
|
||||||
|
|
||||||
|
|
||||||
|
class HostParser(BaseParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('host')
|
||||||
|
|
||||||
|
def create(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
||||||
|
p.add_argument('--hostname', required=True)
|
||||||
|
p.add_argument('--cpu', required=True, type=int)
|
||||||
|
p.add_argument('--ram', required=True)
|
||||||
|
p.add_argument('--os-ssd', required=True)
|
||||||
|
p.add_argument('--hdd', default=list())
|
||||||
|
|
||||||
|
def list(self, **kwargs):
|
||||||
|
self.subparser.add_parser('list', **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
parser = HostParser()
|
||||||
|
arg_parser = parser.arg_parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(**kwargs):
|
||||||
|
subcommand = kwargs.pop('host_subcommand')
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
request_method = requests.post
|
||||||
|
data = None
|
||||||
|
if subcommand == 'create':
|
||||||
|
kwargs['specs'] = {
|
||||||
|
'cpu': kwargs.pop('cpu'),
|
||||||
|
'ram': kwargs.pop('ram'),
|
||||||
|
'os-ssd': kwargs.pop('os_ssd'),
|
||||||
|
'hdd': kwargs.pop('hdd')
|
||||||
|
}
|
||||||
|
data = kwargs
|
||||||
|
elif subcommand == 'list':
|
||||||
|
request_method = requests.get
|
||||||
|
|
||||||
|
make_request('host', subcommand, data=data, request_method=request_method)
|
||||||
38
uncloud/cli/image.py
Normal file
38
uncloud/cli/image.py
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from uncloud.cli.helper import make_request
|
||||||
|
from uncloud.common.parser import BaseParser
|
||||||
|
|
||||||
|
|
||||||
|
class ImageParser(BaseParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('image')
|
||||||
|
|
||||||
|
def create(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('create', **kwargs)
|
||||||
|
p.add_argument('--name', required=True)
|
||||||
|
p.add_argument('--uuid', required=True)
|
||||||
|
p.add_argument('--image-store', required=True, dest='image_store')
|
||||||
|
|
||||||
|
def list(self, **kwargs):
|
||||||
|
self.subparser.add_parser('list', **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
parser = ImageParser()
|
||||||
|
arg_parser = parser.arg_parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(**kwargs):
|
||||||
|
subcommand = kwargs.pop('image_subcommand')
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
data = None
|
||||||
|
request_method = requests.post
|
||||||
|
if subcommand == 'list':
|
||||||
|
subcommand = 'list-public'
|
||||||
|
request_method = requests.get
|
||||||
|
elif subcommand == 'create':
|
||||||
|
data = kwargs
|
||||||
|
|
||||||
|
make_request('image', subcommand, data=data, request_method=request_method)
|
||||||
23
uncloud/cli/main.py
Normal file
23
uncloud/cli/main.py
Normal file
|
|
@ -0,0 +1,23 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser('cli', add_help=False)
|
||||||
|
subparser = arg_parser.add_subparsers(dest='subcommand')
|
||||||
|
|
||||||
|
for component in ['user', 'host', 'image', 'network', 'vm']:
|
||||||
|
module = importlib.import_module('uncloud.cli.{}'.format(component))
|
||||||
|
parser = getattr(module, 'arg_parser')
|
||||||
|
subparser.add_parser(name=parser.prog, parents=[parser])
|
||||||
|
|
||||||
|
|
||||||
|
def main(arguments):
|
||||||
|
if not arguments['subcommand']:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
name = arguments.pop('subcommand')
|
||||||
|
arguments.pop('debug')
|
||||||
|
mod = importlib.import_module('uncloud.cli.{}'.format(name))
|
||||||
|
_main = getattr(mod, 'main')
|
||||||
|
_main(**arguments)
|
||||||
32
uncloud/cli/network.py
Normal file
32
uncloud/cli/network.py
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from uncloud.cli.helper import make_request, get_otp_parser
|
||||||
|
from uncloud.common.parser import BaseParser
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkParser(BaseParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('network')
|
||||||
|
|
||||||
|
def create(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
||||||
|
p.add_argument('--network-name', required=True)
|
||||||
|
p.add_argument('--network-type', required=True, dest='type')
|
||||||
|
p.add_argument('--user', action='store_true')
|
||||||
|
|
||||||
|
|
||||||
|
parser = NetworkParser()
|
||||||
|
arg_parser = parser.arg_parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(**kwargs):
|
||||||
|
subcommand = kwargs.pop('network_subcommand')
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
data = None
|
||||||
|
request_method = requests.post
|
||||||
|
if subcommand == 'create':
|
||||||
|
data = kwargs
|
||||||
|
|
||||||
|
make_request('network', subcommand, data=data, request_method=request_method)
|
||||||
41
uncloud/cli/user.py
Executable file
41
uncloud/cli/user.py
Executable file
|
|
@ -0,0 +1,41 @@
|
||||||
|
from uncloud.cli.helper import make_request, get_otp_parser
|
||||||
|
from uncloud.common.parser import BaseParser
|
||||||
|
|
||||||
|
|
||||||
|
class UserParser(BaseParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('user')
|
||||||
|
|
||||||
|
def files(self, **kwargs):
|
||||||
|
self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
|
||||||
|
|
||||||
|
def vms(self, **kwargs):
|
||||||
|
self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
|
||||||
|
|
||||||
|
def networks(self, **kwargs):
|
||||||
|
self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
|
||||||
|
|
||||||
|
def add_ssh(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
|
||||||
|
p.add_argument('--key-name', required=True)
|
||||||
|
p.add_argument('--key', required=True)
|
||||||
|
|
||||||
|
def get_ssh(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
|
||||||
|
p.add_argument('--key-name', default='')
|
||||||
|
|
||||||
|
def remove_ssh(self, **kwargs):
|
||||||
|
p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
|
||||||
|
p.add_argument('--key-name', required=True)
|
||||||
|
|
||||||
|
|
||||||
|
parser = UserParser()
|
||||||
|
arg_parser = parser.arg_parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(**kwargs):
|
||||||
|
subcommand = kwargs.pop('user_subcommand')
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
make_request('user', subcommand, data=kwargs)
|
||||||
62
uncloud/cli/vm.py
Normal file
62
uncloud/cli/vm.py
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
from uncloud.common.parser import BaseParser
|
||||||
|
from uncloud.cli.helper import make_request, get_otp_parser
|
||||||
|
|
||||||
|
|
||||||
|
class VMParser(BaseParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__('vm')
|
||||||
|
|
||||||
|
def start(self, **args):
|
||||||
|
p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
|
||||||
|
def stop(self, **args):
|
||||||
|
p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
|
||||||
|
def status(self, **args):
|
||||||
|
p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
|
||||||
|
def delete(self, **args):
|
||||||
|
p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
|
||||||
|
def migrate(self, **args):
|
||||||
|
p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
p.add_argument('--destination', required=True)
|
||||||
|
|
||||||
|
def create(self, **args):
|
||||||
|
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
|
||||||
|
p.add_argument('--cpu', required=True)
|
||||||
|
p.add_argument('--ram', required=True)
|
||||||
|
p.add_argument('--os-ssd', required=True)
|
||||||
|
p.add_argument('--hdd', action='append', default=list())
|
||||||
|
p.add_argument('--image', required=True)
|
||||||
|
p.add_argument('--network', action='append', default=[])
|
||||||
|
p.add_argument('--vm-name', required=True)
|
||||||
|
|
||||||
|
|
||||||
|
parser = VMParser()
|
||||||
|
arg_parser = parser.arg_parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(**kwargs):
|
||||||
|
subcommand = kwargs.pop('vm_subcommand')
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
data = kwargs
|
||||||
|
endpoint = subcommand
|
||||||
|
if subcommand in ['start', 'stop', 'delete']:
|
||||||
|
endpoint = 'action'
|
||||||
|
data['action'] = subcommand
|
||||||
|
elif subcommand == 'create':
|
||||||
|
kwargs['specs'] = {
|
||||||
|
'cpu': kwargs.pop('cpu'),
|
||||||
|
'ram': kwargs.pop('ram'),
|
||||||
|
'os-ssd': kwargs.pop('os_ssd'),
|
||||||
|
'hdd': kwargs.pop('hdd')
|
||||||
|
}
|
||||||
|
make_request('vm', endpoint, data=data)
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from etcd3_wrapper import EtcdEntry
|
from .etcd_wrapper import EtcdEntry
|
||||||
|
|
||||||
|
|
||||||
class SpecificEtcdEntryBase:
|
class SpecificEtcdEntryBase:
|
||||||
26
uncloud/common/cli.py
Normal file
26
uncloud/common/cli.py
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
from pyotp import TOTP
|
||||||
|
|
||||||
|
|
||||||
|
def get_token(seed):
|
||||||
|
if seed is not None:
|
||||||
|
try:
|
||||||
|
token = TOTP(seed).now()
|
||||||
|
except Exception:
|
||||||
|
raise Exception('Invalid seed')
|
||||||
|
else:
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_otp_credentials(kwargs):
|
||||||
|
d = {
|
||||||
|
'name': shared.settings['client']['name'],
|
||||||
|
'realm': shared.settings['client']['realm'],
|
||||||
|
'token': get_token(shared.settings['client']['seed'])
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v in d.items():
|
||||||
|
if k in kwargs and kwargs[k] is None:
|
||||||
|
kwargs.update({k: v})
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from etcd3_wrapper import Etcd3Wrapper
|
from .etcd_wrapper import Etcd3Wrapper
|
||||||
|
|
||||||
|
|
||||||
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||||
75
uncloud/common/etcd_wrapper.py
Normal file
75
uncloud/common/etcd_wrapper.py
Normal file
|
|
@ -0,0 +1,75 @@
|
||||||
|
import etcd3
|
||||||
|
import json
|
||||||
|
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
from uncloud import UncloudException
|
||||||
|
from uncloud.common import logger
|
||||||
|
|
||||||
|
|
||||||
|
class EtcdEntry:
|
||||||
|
def __init__(self, meta_or_key, value, value_in_json=False):
|
||||||
|
if hasattr(meta_or_key, 'key'):
|
||||||
|
# if meta has attr 'key' then get it
|
||||||
|
self.key = meta_or_key.key.decode('utf-8')
|
||||||
|
else:
|
||||||
|
# otherwise meta is the 'key'
|
||||||
|
self.key = meta_or_key
|
||||||
|
self.value = value.decode('utf-8')
|
||||||
|
|
||||||
|
if value_in_json:
|
||||||
|
self.value = json.loads(self.value)
|
||||||
|
|
||||||
|
|
||||||
|
def readable_errors(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except etcd3.exceptions.ConnectionFailedError:
|
||||||
|
raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
|
||||||
|
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||||
|
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
||||||
|
except Exception:
|
||||||
|
logger.exception('Some etcd error occured. See syslog for details.')
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class Etcd3Wrapper:
|
||||||
|
@readable_errors
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.client = etcd3.client(*args, **kwargs)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def get(self, *args, value_in_json=False, **kwargs):
|
||||||
|
_value, _key = self.client.get(*args, **kwargs)
|
||||||
|
if _key is None or _value is None:
|
||||||
|
return None
|
||||||
|
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def put(self, *args, value_in_json=False, **kwargs):
|
||||||
|
_key, _value = args
|
||||||
|
if value_in_json:
|
||||||
|
_value = json.dumps(_value)
|
||||||
|
|
||||||
|
if not isinstance(_key, str):
|
||||||
|
_key = _key.decode('utf-8')
|
||||||
|
|
||||||
|
return self.client.put(_key, _value, **kwargs)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
|
||||||
|
event_iterator = self.client.get_prefix(*args, **kwargs)
|
||||||
|
for e in event_iterator:
|
||||||
|
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def watch_prefix(self, key, raise_exception=True, value_in_json=False):
|
||||||
|
event_iterator, cancel = self.client.watch_prefix(key)
|
||||||
|
for e in event_iterator:
|
||||||
|
if hasattr(e, '_event'):
|
||||||
|
e = e._event
|
||||||
|
if e.type == e.PUT:
|
||||||
|
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
|
||||||
|
|
@ -7,7 +7,7 @@ from .classes import SpecificEtcdEntryBase
|
||||||
|
|
||||||
|
|
||||||
class HostStatus:
|
class HostStatus:
|
||||||
"""Possible Statuses of ucloud host."""
|
"""Possible Statuses of uncloud host."""
|
||||||
|
|
||||||
alive = "ALIVE"
|
alive = "ALIVE"
|
||||||
dead = "DEAD"
|
dead = "DEAD"
|
||||||
|
|
@ -26,11 +26,13 @@ class HostEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def update_heartbeat(self):
|
def update_heartbeat(self):
|
||||||
self.status = HostStatus.alive
|
self.status = HostStatus.alive
|
||||||
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
def is_alive(self):
|
def is_alive(self):
|
||||||
last_heartbeat = datetime.strptime(self.last_heartbeat, "%Y-%m-%d %H:%M:%S")
|
last_heartbeat = datetime.strptime(
|
||||||
delta = datetime.now() - last_heartbeat
|
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
|
delta = datetime.utcnow() - last_heartbeat
|
||||||
if delta.total_seconds() > 60:
|
if delta.total_seconds() > 60:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
70
uncloud/common/network.py
Normal file
70
uncloud/common/network.py
Normal file
|
|
@ -0,0 +1,70 @@
|
||||||
|
import subprocess as sp
|
||||||
|
import random
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def random_bytes(num=6):
|
||||||
|
return [random.randrange(256) for _ in range(num)]
|
||||||
|
|
||||||
|
|
||||||
|
def generate_mac(
|
||||||
|
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||||
|
):
|
||||||
|
mac = random_bytes()
|
||||||
|
if oui:
|
||||||
|
if type(oui) == str:
|
||||||
|
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||||
|
mac = oui + random_bytes(num=6 - len(oui))
|
||||||
|
else:
|
||||||
|
if multicast:
|
||||||
|
mac[0] |= 1 # set bit 0
|
||||||
|
else:
|
||||||
|
mac[0] &= ~1 # clear bit 0
|
||||||
|
if uaa:
|
||||||
|
mac[0] &= ~(1 << 1) # clear bit 1
|
||||||
|
else:
|
||||||
|
mac[0] |= 1 << 1 # set bit 1
|
||||||
|
return separator.join(byte_fmt % b for b in mac)
|
||||||
|
|
||||||
|
|
||||||
|
def create_dev(script, _id, dev, ip=None):
|
||||||
|
command = [
|
||||||
|
"sudo",
|
||||||
|
"-p",
|
||||||
|
"Enter password to create network devices for vm: ",
|
||||||
|
script,
|
||||||
|
str(_id),
|
||||||
|
dev,
|
||||||
|
]
|
||||||
|
if ip:
|
||||||
|
command.append(ip)
|
||||||
|
try:
|
||||||
|
output = sp.check_output(command, stderr=sp.PIPE)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Creation of interface %s failed.", dev)
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return output.decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
|
def delete_network_interface(iface):
|
||||||
|
try:
|
||||||
|
sp.check_output(
|
||||||
|
[
|
||||||
|
"sudo",
|
||||||
|
"-p",
|
||||||
|
"Enter password to remove {} network device: ".format(
|
||||||
|
iface
|
||||||
|
),
|
||||||
|
"ip",
|
||||||
|
"link",
|
||||||
|
"del",
|
||||||
|
iface,
|
||||||
|
],
|
||||||
|
stderr=sp.PIPE,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Interface %s Deletion failed", iface)
|
||||||
|
|
||||||
13
uncloud/common/parser.py
Normal file
13
uncloud/common/parser.py
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
class BaseParser:
|
||||||
|
def __init__(self, command):
|
||||||
|
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
|
||||||
|
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
|
||||||
|
self.common_args = {'add_help': False}
|
||||||
|
|
||||||
|
methods = [attr for attr in dir(self) if not attr.startswith('__')
|
||||||
|
and type(getattr(self, attr)).__name__ == 'method']
|
||||||
|
for method in methods:
|
||||||
|
getattr(self, method)(**self.common_args)
|
||||||
|
|
@ -2,9 +2,8 @@ import json
|
||||||
from os.path import join
|
from os.path import join
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from etcd3_wrapper.etcd3_wrapper import PsuedoEtcdEntry
|
from uncloud.common.etcd_wrapper import EtcdEntry
|
||||||
|
from uncloud.common.classes import SpecificEtcdEntryBase
|
||||||
from .classes import SpecificEtcdEntryBase
|
|
||||||
|
|
||||||
|
|
||||||
class RequestType:
|
class RequestType:
|
||||||
|
|
@ -18,8 +17,9 @@ class RequestType:
|
||||||
|
|
||||||
|
|
||||||
class RequestEntry(SpecificEtcdEntryBase):
|
class RequestEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def __init__(self, e):
|
def __init__(self, e):
|
||||||
|
self.destination_sock_path = None
|
||||||
|
self.destination_host_key = None
|
||||||
self.type = None # type: str
|
self.type = None # type: str
|
||||||
self.migration = None # type: bool
|
self.migration = None # type: bool
|
||||||
self.destination = None # type: str
|
self.destination = None # type: str
|
||||||
|
|
@ -29,8 +29,8 @@ class RequestEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_scratch(cls, request_prefix, **kwargs):
|
def from_scratch(cls, request_prefix, **kwargs):
|
||||||
e = PsuedoEtcdEntry(join(request_prefix, uuid4().hex),
|
e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
|
||||||
value=json.dumps(kwargs).encode("utf-8"), value_in_json=True)
|
value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
|
||||||
return cls(e)
|
return cls(e)
|
||||||
|
|
||||||
|
|
||||||
41
uncloud/common/schemas.py
Normal file
41
uncloud/common/schemas.py
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
import bitmath
|
||||||
|
|
||||||
|
from marshmallow import fields, Schema
|
||||||
|
|
||||||
|
|
||||||
|
class StorageUnit(fields.Field):
|
||||||
|
def _serialize(self, value, attr, obj, **kwargs):
|
||||||
|
return str(value)
|
||||||
|
|
||||||
|
def _deserialize(self, value, attr, data, **kwargs):
|
||||||
|
return bitmath.parse_string_unsafe(value)
|
||||||
|
|
||||||
|
|
||||||
|
class SpecsSchema(Schema):
|
||||||
|
cpu = fields.Int()
|
||||||
|
ram = StorageUnit()
|
||||||
|
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
|
||||||
|
hdd = fields.List(StorageUnit())
|
||||||
|
|
||||||
|
|
||||||
|
class VMSchema(Schema):
|
||||||
|
name = fields.Str()
|
||||||
|
owner = fields.Str()
|
||||||
|
owner_realm = fields.Str()
|
||||||
|
specs = fields.Nested(SpecsSchema)
|
||||||
|
status = fields.Str()
|
||||||
|
log = fields.List(fields.Str())
|
||||||
|
vnc_socket = fields.Str()
|
||||||
|
image_uuid = fields.Str()
|
||||||
|
hostname = fields.Str()
|
||||||
|
metadata = fields.Dict()
|
||||||
|
network = fields.List(
|
||||||
|
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
|
||||||
|
)
|
||||||
|
in_migration = fields.Bool()
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkSchema(Schema):
|
||||||
|
_id = fields.Int(data_key="id", attribute="id")
|
||||||
|
_type = fields.Str(data_key="type", attribute="type")
|
||||||
|
ipv6 = fields.Str()
|
||||||
136
uncloud/common/settings.py
Normal file
136
uncloud/common/settings.py
Normal file
|
|
@ -0,0 +1,136 @@
|
||||||
|
import configparser
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
||||||
|
from os.path import join as join_path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
settings = None
|
||||||
|
|
||||||
|
|
||||||
|
class CustomConfigParser(configparser.RawConfigParser):
|
||||||
|
def __getitem__(self, key):
|
||||||
|
try:
|
||||||
|
result = super().__getitem__(key)
|
||||||
|
except KeyError as err:
|
||||||
|
raise KeyError(
|
||||||
|
'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
|
||||||
|
key
|
||||||
|
)
|
||||||
|
) from err
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class Settings(object):
|
||||||
|
def __init__(self, conf_dir, seed_value=None):
|
||||||
|
conf_name = 'uncloud.conf'
|
||||||
|
self.config_file = join_path(conf_dir, conf_name)
|
||||||
|
|
||||||
|
# this is used to cache config from etcd for 1 minutes. Without this we
|
||||||
|
# would make a lot of requests to etcd which slows down everything.
|
||||||
|
self.last_config_update = datetime.fromtimestamp(0)
|
||||||
|
|
||||||
|
self.config_parser = CustomConfigParser(allow_no_value=True)
|
||||||
|
self.config_parser.add_section('etcd')
|
||||||
|
self.config_parser.set('etcd', 'base_prefix', '/')
|
||||||
|
|
||||||
|
if os.access(self.config_file, os.R_OK):
|
||||||
|
self.config_parser.read(self.config_file)
|
||||||
|
else:
|
||||||
|
raise FileNotFoundError('Config file %s not found!', self.config_file)
|
||||||
|
self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
|
||||||
|
|
||||||
|
self.read_internal_values()
|
||||||
|
|
||||||
|
if seed_value is None:
|
||||||
|
seed_value = dict()
|
||||||
|
|
||||||
|
self.config_parser.read_dict(seed_value)
|
||||||
|
|
||||||
|
def get_etcd_client(self):
|
||||||
|
args = tuple()
|
||||||
|
try:
|
||||||
|
kwargs = {
|
||||||
|
'host': self.config_parser.get('etcd', 'url'),
|
||||||
|
'port': self.config_parser.get('etcd', 'port'),
|
||||||
|
'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
|
||||||
|
'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
|
||||||
|
'cert_key': self.config_parser.get('etcd', 'cert_key'),
|
||||||
|
}
|
||||||
|
except configparser.Error as err:
|
||||||
|
raise configparser.Error(
|
||||||
|
'{} in config file {}'.format(
|
||||||
|
err.message, self.config_file
|
||||||
|
)
|
||||||
|
) from err
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
wrapper = Etcd3Wrapper(*args, **kwargs)
|
||||||
|
except Exception as err:
|
||||||
|
logger.error(
|
||||||
|
'etcd connection not successfull. Please check your config file.'
|
||||||
|
'\nDetails: %s\netcd connection parameters: %s',
|
||||||
|
err,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def read_internal_values(self):
|
||||||
|
base_prefix = self['etcd']['base_prefix']
|
||||||
|
self.config_parser.read_dict(
|
||||||
|
{
|
||||||
|
'etcd': {
|
||||||
|
'file_prefix': join_path(base_prefix, 'files/'),
|
||||||
|
'host_prefix': join_path(base_prefix, 'hosts/'),
|
||||||
|
'image_prefix': join_path(base_prefix, 'images/'),
|
||||||
|
'image_store_prefix': join_path(base_prefix, 'imagestore/'),
|
||||||
|
'network_prefix': join_path(base_prefix, 'networks/'),
|
||||||
|
'request_prefix': join_path(base_prefix, 'requests/'),
|
||||||
|
'user_prefix': join_path(base_prefix, 'users/'),
|
||||||
|
'vm_prefix': join_path(base_prefix, 'vms/'),
|
||||||
|
'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
|
||||||
|
'tap_counter': join_path(base_prefix, 'counters/tap')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def read_config_file_values(self, config_file):
|
||||||
|
try:
|
||||||
|
# Trying to read configuration file
|
||||||
|
with open(config_file) as config_file_handle:
|
||||||
|
self.config_parser.read_file(config_file_handle)
|
||||||
|
except FileNotFoundError:
|
||||||
|
sys.exit('Configuration file {} not found!'.format(config_file))
|
||||||
|
except Exception as err:
|
||||||
|
logger.exception(err)
|
||||||
|
sys.exit('Error occurred while reading configuration file')
|
||||||
|
|
||||||
|
def read_values_from_etcd(self):
|
||||||
|
etcd_client = self.get_etcd_client()
|
||||||
|
if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
|
||||||
|
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
|
||||||
|
if config_from_etcd:
|
||||||
|
self.config_parser.read_dict(config_from_etcd.value)
|
||||||
|
self.last_config_update = datetime.utcnow()
|
||||||
|
else:
|
||||||
|
raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
# Allow failing to read from etcd if we have
|
||||||
|
# it locally
|
||||||
|
if key not in self.config_parser.sections():
|
||||||
|
try:
|
||||||
|
self.read_values_from_etcd()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
return self.config_parser[key]
|
||||||
|
|
||||||
|
|
||||||
|
def get_settings():
|
||||||
|
return settings
|
||||||
34
uncloud/common/shared.py
Normal file
34
uncloud/common/shared.py
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
from uncloud.common.settings import get_settings
|
||||||
|
from uncloud.common.vm import VmPool
|
||||||
|
from uncloud.common.host import HostPool
|
||||||
|
from uncloud.common.request import RequestPool
|
||||||
|
import uncloud.common.storage_handlers as storage_handlers
|
||||||
|
|
||||||
|
|
||||||
|
class Shared:
|
||||||
|
@property
|
||||||
|
def settings(self):
|
||||||
|
return get_settings()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def etcd_client(self):
|
||||||
|
return self.settings.get_etcd_client()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def host_pool(self):
|
||||||
|
return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def vm_pool(self):
|
||||||
|
return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request_pool(self):
|
||||||
|
return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def storage_handler(self):
|
||||||
|
return storage_handlers.get_storage_handler()
|
||||||
|
|
||||||
|
|
||||||
|
shared = Shared()
|
||||||
|
|
@ -6,17 +6,20 @@ import stat
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from . import logger
|
from . import logger
|
||||||
from os.path import join as join_path
|
from os.path import join as join_path
|
||||||
|
import uncloud.common.shared as shared
|
||||||
|
|
||||||
|
|
||||||
class ImageStorageHandler(ABC):
|
class ImageStorageHandler(ABC):
|
||||||
|
handler_name = "base"
|
||||||
|
|
||||||
def __init__(self, image_base, vm_base):
|
def __init__(self, image_base, vm_base):
|
||||||
self.image_base = image_base
|
self.image_base = image_base
|
||||||
self.vm_base = vm_base
|
self.vm_base = vm_base
|
||||||
|
|
||||||
def import_image(self, image_src, image_dest, protect=False):
|
def import_image(self, image_src, image_dest, protect=False):
|
||||||
"""Put an image at the destination
|
"""Put an image at the destination
|
||||||
:param src: An Image file
|
:param image_src: An Image file
|
||||||
:param dest: A path where :param src: is to be put.
|
:param image_dest: A path where :param src: is to be put.
|
||||||
:param protect: If protect is true then the dest is protect (readonly etc)
|
:param protect: If protect is true then the dest is protect (readonly etc)
|
||||||
The obj must exist on filesystem.
|
The obj must exist on filesystem.
|
||||||
"""
|
"""
|
||||||
|
|
@ -26,8 +29,8 @@ class ImageStorageHandler(ABC):
|
||||||
def make_vm_image(self, image_path, path):
|
def make_vm_image(self, image_path, path):
|
||||||
"""Copy image from src to dest
|
"""Copy image from src to dest
|
||||||
|
|
||||||
:param src: A path
|
:param image_path: A path
|
||||||
:param dest: A path
|
:param path: A path
|
||||||
|
|
||||||
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
|
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
|
||||||
"""
|
"""
|
||||||
|
|
@ -43,14 +46,17 @@ class ImageStorageHandler(ABC):
|
||||||
def delete_vm_image(self, path):
|
def delete_vm_image(self, path):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def execute_command(self, command, report=True):
|
def execute_command(self, command, report=True, error_origin=None):
|
||||||
|
if not error_origin:
|
||||||
|
error_origin = self.handler_name
|
||||||
|
|
||||||
command = list(map(str, command))
|
command = list(map(str, command))
|
||||||
try:
|
try:
|
||||||
output = sp.check_output(command, stderr=sp.PIPE)
|
sp.check_output(command, stderr=sp.PIPE)
|
||||||
except Exception as e:
|
except sp.CalledProcessError as e:
|
||||||
|
_stderr = e.stderr.decode("utf-8").strip()
|
||||||
if report:
|
if report:
|
||||||
print(e)
|
logger.exception("%s:- %s", error_origin, _stderr)
|
||||||
logger.exception(e)
|
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
@ -65,12 +71,16 @@ class ImageStorageHandler(ABC):
|
||||||
|
|
||||||
|
|
||||||
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
handler_name = "Filesystem"
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
def import_image(self, src, dest, protect=False):
|
||||||
dest = join_path(self.image_base, dest)
|
dest = join_path(self.image_base, dest)
|
||||||
try:
|
try:
|
||||||
shutil.copy(src, dest)
|
shutil.copy(src, dest)
|
||||||
if protect:
|
if protect:
|
||||||
os.chmod(dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
|
os.chmod(
|
||||||
|
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return False
|
return False
|
||||||
|
|
@ -80,7 +90,7 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
src = join_path(self.image_base, src)
|
src = join_path(self.image_base, src)
|
||||||
dest = join_path(self.vm_base, dest)
|
dest = join_path(self.vm_base, dest)
|
||||||
try:
|
try:
|
||||||
shutil.copy(src, dest)
|
shutil.copyfile(src, dest)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return False
|
return False
|
||||||
|
|
@ -88,7 +98,14 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
|
||||||
def resize_vm_image(self, path, size):
|
def resize_vm_image(self, path, size):
|
||||||
path = join_path(self.vm_base, path)
|
path = join_path(self.vm_base, path)
|
||||||
command = ["qemu-img", "resize", "-f", "raw", path, "{}M".format(size)]
|
command = [
|
||||||
|
"qemu-img",
|
||||||
|
"resize",
|
||||||
|
"-f",
|
||||||
|
"raw",
|
||||||
|
path,
|
||||||
|
"{}M".format(size),
|
||||||
|
]
|
||||||
if self.execute_command(command):
|
if self.execute_command(command):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
|
@ -117,17 +134,33 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
|
||||||
|
|
||||||
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
handler_name = "Ceph"
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
def import_image(self, src, dest, protect=False):
|
||||||
dest = join_path(self.image_base, dest)
|
dest = join_path(self.image_base, dest)
|
||||||
command = ["rbd", "import", src, dest]
|
import_command = ["rbd", "import", src, dest]
|
||||||
|
commands = [import_command]
|
||||||
if protect:
|
if protect:
|
||||||
snap_create_command = ["rbd", "snap", "create", "{}@protected".format(dest)]
|
snap_create_command = [
|
||||||
snap_protect_command = ["rbd", "snap", "protect", "{}@protected".format(dest)]
|
"rbd",
|
||||||
|
"snap",
|
||||||
|
"create",
|
||||||
|
"{}@protected".format(dest),
|
||||||
|
]
|
||||||
|
snap_protect_command = [
|
||||||
|
"rbd",
|
||||||
|
"snap",
|
||||||
|
"protect",
|
||||||
|
"{}@protected".format(dest),
|
||||||
|
]
|
||||||
|
commands.append(snap_create_command)
|
||||||
|
commands.append(snap_protect_command)
|
||||||
|
|
||||||
return self.execute_command(command) and self.execute_command(snap_create_command) and\
|
result = True
|
||||||
self.execute_command(snap_protect_command)
|
for command in commands:
|
||||||
|
result = result and self.execute_command(command)
|
||||||
|
|
||||||
return self.execute_command(command)
|
return result
|
||||||
|
|
||||||
def make_vm_image(self, src, dest):
|
def make_vm_image(self, src, dest):
|
||||||
src = join_path(self.image_base, src)
|
src = join_path(self.image_base, src)
|
||||||
|
|
@ -156,3 +189,19 @@ class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||||
path = join_path(self.vm_base, path)
|
path = join_path(self.vm_base, path)
|
||||||
command = ["rbd", "info", path]
|
command = ["rbd", "info", path]
|
||||||
return self.execute_command(command, report=False)
|
return self.execute_command(command, report=False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_storage_handler():
|
||||||
|
__storage_backend = shared.shared.settings["storage"]["storage_backend"]
|
||||||
|
if __storage_backend == "filesystem":
|
||||||
|
return FileSystemBasedImageStorageHandler(
|
||||||
|
vm_base=shared.shared.settings["storage"]["vm_dir"],
|
||||||
|
image_base=shared.shared.settings["storage"]["image_dir"],
|
||||||
|
)
|
||||||
|
elif __storage_backend == "ceph":
|
||||||
|
return CEPHBasedImageStorageHandler(
|
||||||
|
vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
|
||||||
|
image_base=shared.shared.settings["storage"]["ceph_image_pool"],
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown Image Storage Handler")
|
||||||
|
|
@ -12,8 +12,13 @@ class VMStatus:
|
||||||
error = "ERROR" # An error occurred that cannot be resolved automatically
|
error = "ERROR" # An error occurred that cannot be resolved automatically
|
||||||
|
|
||||||
|
|
||||||
class VMEntry(SpecificEtcdEntryBase):
|
def declare_stopped(vm):
|
||||||
|
vm["hostname"] = ""
|
||||||
|
vm["in_migration"] = False
|
||||||
|
vm["status"] = VMStatus.stopped
|
||||||
|
|
||||||
|
|
||||||
|
class VMEntry(SpecificEtcdEntryBase):
|
||||||
def __init__(self, e):
|
def __init__(self, e):
|
||||||
self.owner = None # type: str
|
self.owner = None # type: str
|
||||||
self.specs = None # type: dict
|
self.specs = None # type: dict
|
||||||
|
|
@ -42,7 +47,9 @@ class VMEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def add_log(self, msg):
|
def add_log(self, msg):
|
||||||
self.log = self.log[:5]
|
self.log = self.log[:5]
|
||||||
self.log.append("{} - {}".format(datetime.now().isoformat(), msg))
|
self.log.append(
|
||||||
|
"{} - {}".format(datetime.now().isoformat(), msg)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VmPool:
|
class VmPool:
|
||||||
57
uncloud/configure/main.py
Normal file
57
uncloud/configure/main.py
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser('configure', add_help=False)
|
||||||
|
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
|
||||||
|
|
||||||
|
otp_parser = configure_subparsers.add_parser('otp')
|
||||||
|
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
|
||||||
|
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
|
||||||
|
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
|
||||||
|
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
|
||||||
|
|
||||||
|
network_parser = configure_subparsers.add_parser('network')
|
||||||
|
network_parser.add_argument('--prefix-length', required=True, type=int)
|
||||||
|
network_parser.add_argument('--prefix', required=True)
|
||||||
|
network_parser.add_argument('--vxlan-phy-dev', required=True)
|
||||||
|
|
||||||
|
netbox_parser = configure_subparsers.add_parser('netbox')
|
||||||
|
netbox_parser.add_argument('--url', required=True)
|
||||||
|
netbox_parser.add_argument('--token', required=True)
|
||||||
|
|
||||||
|
ssh_parser = configure_subparsers.add_parser('ssh')
|
||||||
|
ssh_parser.add_argument('--username', default='root')
|
||||||
|
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
|
||||||
|
|
||||||
|
storage_parser = configure_subparsers.add_parser('storage')
|
||||||
|
storage_parser.add_argument('--file-dir', required=True)
|
||||||
|
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
|
||||||
|
|
||||||
|
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
|
||||||
|
filesystem_storage_parser.add_argument('--vm-dir', required=True)
|
||||||
|
filesystem_storage_parser.add_argument('--image-dir', required=True)
|
||||||
|
|
||||||
|
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
|
||||||
|
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
|
||||||
|
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
|
||||||
|
|
||||||
|
|
||||||
|
def update_config(section, kwargs):
|
||||||
|
uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
|
||||||
|
if not uncloud_config:
|
||||||
|
uncloud_config = {}
|
||||||
|
else:
|
||||||
|
uncloud_config = uncloud_config.value
|
||||||
|
|
||||||
|
uncloud_config[section] = kwargs
|
||||||
|
shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
|
||||||
|
|
||||||
|
|
||||||
|
def main(arguments):
|
||||||
|
subcommand = arguments['subcommand']
|
||||||
|
if not subcommand:
|
||||||
|
arg_parser.print_help()
|
||||||
|
else:
|
||||||
|
update_config(subcommand, arguments)
|
||||||
85
uncloud/filescanner/main.py
Executable file
85
uncloud/filescanner/main.py
Executable file
|
|
@ -0,0 +1,85 @@
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import subprocess as sp
|
||||||
|
import time
|
||||||
|
import argparse
|
||||||
|
import bitmath
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from . import logger
|
||||||
|
from uncloud.common.shared import shared
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
|
||||||
|
arg_parser.add_argument('--hostname', required=True)
|
||||||
|
|
||||||
|
|
||||||
|
def sha512sum(file: str):
|
||||||
|
"""Use sha512sum utility to compute sha512 sum of arg:file
|
||||||
|
|
||||||
|
IF arg:file does not exists:
|
||||||
|
raise FileNotFoundError exception
|
||||||
|
ELSE IF sum successfully computer:
|
||||||
|
return computed sha512 sum
|
||||||
|
ELSE:
|
||||||
|
return None
|
||||||
|
"""
|
||||||
|
if not isinstance(file, str):
|
||||||
|
raise TypeError
|
||||||
|
try:
|
||||||
|
output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
|
||||||
|
except sp.CalledProcessError as e:
|
||||||
|
error = e.stderr.decode('utf-8')
|
||||||
|
if 'No such file or directory' in error:
|
||||||
|
raise FileNotFoundError from None
|
||||||
|
else:
|
||||||
|
output = output.decode('utf-8').strip()
|
||||||
|
output = output.split(' ')
|
||||||
|
return output[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def track_file(file, base_dir, host):
|
||||||
|
file_path = file.relative_to(base_dir)
|
||||||
|
file_str = str(file)
|
||||||
|
# Get Username
|
||||||
|
try:
|
||||||
|
owner = file_path.parts[0]
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
file_path = file_path.relative_to(owner)
|
||||||
|
creation_date = time.ctime(os.stat(file_str).st_ctime)
|
||||||
|
|
||||||
|
entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
|
||||||
|
entry_value = {
|
||||||
|
'filename': str(file_path),
|
||||||
|
'owner': owner,
|
||||||
|
'sha512sum': sha512sum(file_str),
|
||||||
|
'creation_date': creation_date,
|
||||||
|
'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
|
||||||
|
'host': host
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info('Tracking %s', file_str)
|
||||||
|
|
||||||
|
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
||||||
|
|
||||||
|
|
||||||
|
def main(arguments):
|
||||||
|
hostname = arguments['hostname']
|
||||||
|
base_dir = shared.settings['storage']['file_dir']
|
||||||
|
# Recursively Get All Files and Folder below BASE_DIR
|
||||||
|
files = glob.glob('{}/**'.format(base_dir), recursive=True)
|
||||||
|
files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
|
||||||
|
|
||||||
|
# Files that are already tracked
|
||||||
|
tracked_files = [
|
||||||
|
pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
|
||||||
|
for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
|
||||||
|
if f.value['host'] == hostname
|
||||||
|
]
|
||||||
|
untracked_files = set(files) - set(tracked_files)
|
||||||
|
for file in untracked_files:
|
||||||
|
track_file(file, base_dir, hostname)
|
||||||
39
uncloud/hack/config.py
Normal file
39
uncloud/hack/config.py
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||||
|
#
|
||||||
|
# This file is part of uncloud.
|
||||||
|
#
|
||||||
|
# uncloud is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# uncloud is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
class Config(object):
|
||||||
|
def __init__(self, arguments):
|
||||||
|
""" read arguments dicts as a base """
|
||||||
|
|
||||||
|
self.arguments = arguments
|
||||||
|
|
||||||
|
# Split them so *etcd_args can be used and we can
|
||||||
|
# iterate over etcd_hosts
|
||||||
|
self.etcd_hosts = [ arguments['etcd_host'] ]
|
||||||
|
self.etcd_args = {
|
||||||
|
'ca_cert': arguments['etcd_ca_cert'],
|
||||||
|
'cert_cert': arguments['etcd_cert_cert'],
|
||||||
|
'cert_key': arguments['etcd_cert_key'],
|
||||||
|
# 'user': None,
|
||||||
|
# 'password': None
|
||||||
|
}
|
||||||
|
self.etcd_prefix = '/nicohack/'
|
||||||
113
uncloud/hack/db.py
Normal file
113
uncloud/hack/db.py
Normal file
|
|
@ -0,0 +1,113 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||||
|
#
|
||||||
|
# This file is part of uncloud.
|
||||||
|
#
|
||||||
|
# uncloud is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# uncloud is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
import etcd3
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from functools import wraps
|
||||||
|
from uncloud import UncloudException
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def readable_errors(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except etcd3.exceptions.ConnectionFailedError as e:
|
||||||
|
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
|
||||||
|
except etcd3.exceptions.ConnectionTimeoutError as e:
|
||||||
|
raise UncloudException('etcd connection timeout. {}'.format(e))
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class DB(object):
|
||||||
|
def __init__(self, config, prefix="/"):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
# Root for everything
|
||||||
|
self.base_prefix= '/nicohack'
|
||||||
|
|
||||||
|
# Can be set from outside
|
||||||
|
self.prefix = prefix
|
||||||
|
|
||||||
|
self.connect()
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def connect(self):
|
||||||
|
self._db_clients = []
|
||||||
|
for endpoint in self.config.etcd_hosts:
|
||||||
|
client = etcd3.client(host=endpoint, **self.config.etcd_args)
|
||||||
|
self._db_clients.append(client)
|
||||||
|
|
||||||
|
def realkey(self, key):
|
||||||
|
return "{}{}/{}".format(self.base_prefix,
|
||||||
|
self.prefix,
|
||||||
|
key)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def get(self, key, as_json=False, **kwargs):
|
||||||
|
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
|
||||||
|
|
||||||
|
if as_json:
|
||||||
|
value = json.loads(value)
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def set(self, key, value, as_json=False, **kwargs):
|
||||||
|
if as_json:
|
||||||
|
value = json.dumps(value)
|
||||||
|
|
||||||
|
# FIXME: iterate over clients in case of failure ?
|
||||||
|
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
|
||||||
|
|
||||||
|
@readable_errors
|
||||||
|
def increment(self, key, **kwargs):
|
||||||
|
print(self.realkey(key))
|
||||||
|
|
||||||
|
|
||||||
|
print("prelock")
|
||||||
|
lock = self._db_clients[0].lock('/nicohack/foo')
|
||||||
|
print("prelockacq")
|
||||||
|
lock.acquire()
|
||||||
|
print("prelockrelease")
|
||||||
|
lock.release()
|
||||||
|
|
||||||
|
with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
|
||||||
|
print("in lock")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
|
||||||
|
# self.set(self.realkey(key), str(value + 1), **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
endpoints = [ "https://etcd1.ungleich.ch:2379",
|
||||||
|
"https://etcd2.ungleich.ch:2379",
|
||||||
|
"https://etcd3.ungleich.ch:2379" ]
|
||||||
|
|
||||||
|
db = DB(url=endpoints)
|
||||||
3
uncloud/hack/hackcloud/.gitignore
vendored
Normal file
3
uncloud/hack/hackcloud/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
*.iso
|
||||||
|
radvdpid
|
||||||
|
foo
|
||||||
1
uncloud/hack/hackcloud/__init__.py
Normal file
1
uncloud/hack/hackcloud/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
|
||||||
6
uncloud/hack/hackcloud/etcd-client.sh
Normal file
6
uncloud/hack/hackcloud/etcd-client.sh
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
|
||||||
|
--key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
|
||||||
|
--cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
|
||||||
|
--endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
|
||||||
3
uncloud/hack/hackcloud/ifdown.sh
Executable file
3
uncloud/hack/hackcloud/ifdown.sh
Executable file
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo $@
|
||||||
7
uncloud/hack/hackcloud/ifup.sh
Executable file
7
uncloud/hack/hackcloud/ifup.sh
Executable file
|
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
dev=$1; shift
|
||||||
|
|
||||||
|
# bridge is setup from outside
|
||||||
|
ip link set dev "$dev" master ${bridge}
|
||||||
|
ip link set dev "$dev" up
|
||||||
1
uncloud/hack/hackcloud/mac-last
Normal file
1
uncloud/hack/hackcloud/mac-last
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
000000000252
|
||||||
1
uncloud/hack/hackcloud/mac-prefix
Normal file
1
uncloud/hack/hackcloud/mac-prefix
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
02:00
|
||||||
29
uncloud/hack/hackcloud/net.sh
Executable file
29
uncloud/hack/hackcloud/net.sh
Executable file
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
netid=100
|
||||||
|
dev=wlp2s0
|
||||||
|
dev=wlp0s20f3
|
||||||
|
#dev=wlan0
|
||||||
|
|
||||||
|
ip=2a0a:e5c1:111:888::48/64
|
||||||
|
vxlandev=vxlan${netid}
|
||||||
|
bridgedev=br${netid}
|
||||||
|
|
||||||
|
ip -6 link add ${vxlandev} type vxlan \
|
||||||
|
id ${netid} \
|
||||||
|
dstport 4789 \
|
||||||
|
group ff05::${netid} \
|
||||||
|
dev ${dev} \
|
||||||
|
ttl 5
|
||||||
|
|
||||||
|
ip link set ${vxlandev} up
|
||||||
|
|
||||||
|
|
||||||
|
ip link add ${bridgedev} type bridge
|
||||||
|
ip link set ${bridgedev} up
|
||||||
|
|
||||||
|
ip link set ${vxlandev} master ${bridgedev} up
|
||||||
|
|
||||||
|
ip addr add ${ip} dev ${bridgedev}
|
||||||
31
uncloud/hack/hackcloud/nftrules
Normal file
31
uncloud/hack/hackcloud/nftrules
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
flush ruleset
|
||||||
|
|
||||||
|
table bridge filter {
|
||||||
|
chain prerouting {
|
||||||
|
type filter hook prerouting priority 0;
|
||||||
|
policy accept;
|
||||||
|
|
||||||
|
ibrname br100 jump br100
|
||||||
|
}
|
||||||
|
|
||||||
|
chain br100 {
|
||||||
|
# Allow all incoming traffic from outside
|
||||||
|
iifname vxlan100 accept
|
||||||
|
|
||||||
|
# Default blocks: router advertisements, dhcpv6, dhcpv4
|
||||||
|
icmpv6 type nd-router-advert drop
|
||||||
|
ip6 version 6 udp sport 547 drop
|
||||||
|
ip version 4 udp sport 67 drop
|
||||||
|
|
||||||
|
jump br100_vmlist
|
||||||
|
drop
|
||||||
|
}
|
||||||
|
chain br100_vmlist {
|
||||||
|
# VM1
|
||||||
|
iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
|
||||||
|
|
||||||
|
# VM2
|
||||||
|
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
|
||||||
|
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
|
||||||
|
}
|
||||||
|
}
|
||||||
13
uncloud/hack/hackcloud/radvd.conf
Normal file
13
uncloud/hack/hackcloud/radvd.conf
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
interface br100
|
||||||
|
{
|
||||||
|
AdvSendAdvert on;
|
||||||
|
MinRtrAdvInterval 3;
|
||||||
|
MaxRtrAdvInterval 5;
|
||||||
|
AdvDefaultLifetime 3600;
|
||||||
|
|
||||||
|
prefix 2a0a:e5c1:111:888::/64 {
|
||||||
|
};
|
||||||
|
|
||||||
|
RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
|
||||||
|
DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
|
||||||
|
};
|
||||||
3
uncloud/hack/hackcloud/radvd.sh
Normal file
3
uncloud/hack/hackcloud/radvd.sh
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
radvd -C ./radvd.conf -n -p ./radvdpid
|
||||||
29
uncloud/hack/hackcloud/vm.sh
Executable file
29
uncloud/hack/hackcloud/vm.sh
Executable file
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# if [ $# -ne 1 ]; then
|
||||||
|
# echo "$0: owner"
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
qemu=/usr/bin/qemu-system-x86_64
|
||||||
|
|
||||||
|
accel=kvm
|
||||||
|
#accel=tcg
|
||||||
|
|
||||||
|
memory=1024
|
||||||
|
cores=2
|
||||||
|
uuid=$(uuidgen)
|
||||||
|
mac=$(./mac-gen.py)
|
||||||
|
owner=nico
|
||||||
|
|
||||||
|
export bridge=br100
|
||||||
|
|
||||||
|
set -x
|
||||||
|
$qemu -name "uncloud-${uuid}" \
|
||||||
|
-machine pc,accel=${accel} \
|
||||||
|
-m ${memory} \
|
||||||
|
-smp ${cores} \
|
||||||
|
-uuid ${uuid} \
|
||||||
|
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
||||||
|
-netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
|
||||||
|
-device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}
|
||||||
102
uncloud/hack/mac.py
Executable file
102
uncloud/hack/mac.py
Executable file
|
|
@ -0,0 +1,102 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# 2012 Nico Schottelius (nico-cinv at schottelius.org)
|
||||||
|
#
|
||||||
|
# This file is part of cinv.
|
||||||
|
#
|
||||||
|
# cinv is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# cinv is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with cinv. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os.path
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from uncloud import UncloudException
|
||||||
|
from uncloud.hack.db import DB
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MAC(object):
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.no_db = self.config.arguments['no_db']
|
||||||
|
if not self.no_db:
|
||||||
|
self.db = DB(config, prefix="/mac")
|
||||||
|
|
||||||
|
self.prefix = 0x420000000000
|
||||||
|
self._number = 0 # Not set by default
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate_mac(mac):
|
||||||
|
if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
|
||||||
|
raise Error("Not a valid mac address: %s" % mac)
|
||||||
|
|
||||||
|
def last_used_index(self):
|
||||||
|
if not self.no_db:
|
||||||
|
value = self.db.get("last_used_index")
|
||||||
|
if not value:
|
||||||
|
self.db.set("last_used_index", "0")
|
||||||
|
value = self.db.get("last_used_index")
|
||||||
|
|
||||||
|
else:
|
||||||
|
value = "0"
|
||||||
|
|
||||||
|
return int(value)
|
||||||
|
|
||||||
|
def last_used_mac(self):
|
||||||
|
return self.int_to_mac(self.prefix + self.last_used_index())
|
||||||
|
|
||||||
|
def to_colon_format(self):
|
||||||
|
b = self._number.to_bytes(6, byteorder="big")
|
||||||
|
return ':'.join(format(s, '02x') for s in b)
|
||||||
|
|
||||||
|
def to_str_format(self):
|
||||||
|
b = self._number.to_bytes(6, byteorder="big")
|
||||||
|
return ''.join(format(s, '02x') for s in b)
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
last_number = self.last_used_index()
|
||||||
|
|
||||||
|
if last_number == int('0xffffffff', 16):
|
||||||
|
raise UncloudException("Exhausted all possible mac addresses - try to free some")
|
||||||
|
|
||||||
|
next_number = last_number + 1
|
||||||
|
self._number = self.prefix + next_number
|
||||||
|
|
||||||
|
#next_number_string = "{:012x}".format(next_number)
|
||||||
|
#next_mac = self.int_to_mac(next_mac_number)
|
||||||
|
# db_entry = {}
|
||||||
|
# db_entry['vm_uuid'] = vmuuid
|
||||||
|
# db_entry['index'] = next_number
|
||||||
|
# db_entry['mac_address'] = next_mac
|
||||||
|
|
||||||
|
# should be one transaction
|
||||||
|
# self.db.increment("last_used_index")
|
||||||
|
# self.db.set("used/{}".format(next_mac),
|
||||||
|
# db_entry, as_json=True)
|
||||||
|
|
||||||
|
def __int__(self):
|
||||||
|
return self._number
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.to_str_format()
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.to_colon_format()
|
||||||
92
uncloud/hack/main.py
Normal file
92
uncloud/hack/main.py
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from uncloud.hack.vm import VM
|
||||||
|
from uncloud.hack.config import Config
|
||||||
|
from uncloud.hack.mac import MAC
|
||||||
|
from uncloud.hack.net import VXLANBridge, DNSRA
|
||||||
|
|
||||||
|
from uncloud import UncloudException
|
||||||
|
|
||||||
|
arg_parser = argparse.ArgumentParser('hack', add_help=False)
|
||||||
|
#description="Commands that are unfinished - use at own risk")
|
||||||
|
arg_parser.add_argument('--last-used-mac', action='store_true')
|
||||||
|
arg_parser.add_argument('--get-new-mac', action='store_true')
|
||||||
|
|
||||||
|
arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
|
||||||
|
arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
|
||||||
|
arg_parser.add_argument('--network', help="/64 IPv6 network")
|
||||||
|
arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
|
||||||
|
arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
|
||||||
|
arg_parser.add_argument('--run-dns-ra', action='store_true',
|
||||||
|
help="Provide router advertisements and DNS resolution via dnsmasq")
|
||||||
|
arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
|
||||||
|
|
||||||
|
arg_parser.add_argument('--create-vm', action='store_true')
|
||||||
|
arg_parser.add_argument('--destroy-vm', action='store_true')
|
||||||
|
arg_parser.add_argument('--get-vm-status', action='store_true')
|
||||||
|
arg_parser.add_argument('--get-vm-vnc', action='store_true')
|
||||||
|
arg_parser.add_argument('--list-vms', action='store_true')
|
||||||
|
arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int)
|
||||||
|
arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int)
|
||||||
|
arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
|
||||||
|
arg_parser.add_argument('--uuid', help="VM UUID")
|
||||||
|
|
||||||
|
arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
|
||||||
|
arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def main(arguments):
|
||||||
|
config = Config(arguments)
|
||||||
|
|
||||||
|
if arguments['create_vm']:
|
||||||
|
vm = VM(config)
|
||||||
|
vm.create()
|
||||||
|
|
||||||
|
if arguments['destroy_vm']:
|
||||||
|
vm = VM(config)
|
||||||
|
vm.stop()
|
||||||
|
|
||||||
|
if arguments['get_vm_status']:
|
||||||
|
vm = VM(config)
|
||||||
|
vm.status()
|
||||||
|
|
||||||
|
if arguments['get_vm_vnc']:
|
||||||
|
vm = VM(config)
|
||||||
|
vm.vnc_addr()
|
||||||
|
|
||||||
|
if arguments['list_vms']:
|
||||||
|
vm = VM(config)
|
||||||
|
vm.list()
|
||||||
|
|
||||||
|
if arguments['last_used_mac']:
|
||||||
|
m = MAC(config)
|
||||||
|
print(m.last_used_mac())
|
||||||
|
|
||||||
|
if arguments['get_new_mac']:
|
||||||
|
print(MAC(config).get_next())
|
||||||
|
|
||||||
|
#if arguments['init_network']:
|
||||||
|
if arguments['create_vxlan']:
|
||||||
|
if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
|
||||||
|
raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
||||||
|
vb = VXLANBridge(vni=arguments['vni'],
|
||||||
|
route=arguments['network'],
|
||||||
|
uplinkdev=arguments['vxlan_uplink_device'],
|
||||||
|
use_sudo=arguments['use_sudo'])
|
||||||
|
vb._setup_vxlan()
|
||||||
|
vb._setup_bridge()
|
||||||
|
vb._add_vxlan_to_bridge()
|
||||||
|
vb._route_network()
|
||||||
|
|
||||||
|
if arguments['run_dns_ra']:
|
||||||
|
if not arguments['network'] or not arguments['vni']:
|
||||||
|
raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
||||||
|
|
||||||
|
dnsra = DNSRA(route=arguments['network'],
|
||||||
|
vni=arguments['vni'],
|
||||||
|
use_sudo=arguments['use_sudo'])
|
||||||
|
dnsra._setup_dnsmasq()
|
||||||
116
uncloud/hack/net.py
Normal file
116
uncloud/hack/net.py
Normal file
|
|
@ -0,0 +1,116 @@
|
||||||
|
import subprocess
|
||||||
|
import ipaddress
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
from uncloud import UncloudException
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class VXLANBridge(object):
|
||||||
|
cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
|
||||||
|
cmd_up_dev = "{sudo}ip link set {dev} up"
|
||||||
|
cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
|
||||||
|
cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
|
||||||
|
cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
|
||||||
|
cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
|
||||||
|
|
||||||
|
# VXLAN ids are at maximum 24 bit - use a /104
|
||||||
|
multicast_network = ipaddress.IPv6Network("ff05::/104")
|
||||||
|
max_vni = (2**24)-1
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
vni,
|
||||||
|
uplinkdev,
|
||||||
|
route=None,
|
||||||
|
use_sudo=False):
|
||||||
|
self.config = {}
|
||||||
|
|
||||||
|
if vni > self.max_vni:
|
||||||
|
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
||||||
|
|
||||||
|
if use_sudo:
|
||||||
|
self.config['sudo'] = 'sudo '
|
||||||
|
else:
|
||||||
|
self.config['sudo'] = ''
|
||||||
|
|
||||||
|
self.config['vni_dec'] = vni
|
||||||
|
self.config['vni_hex'] = "{:x}".format(vni)
|
||||||
|
self.config['multicast_address'] = self.multicast_network[vni]
|
||||||
|
|
||||||
|
self.config['route_network'] = ipaddress.IPv6Network(route)
|
||||||
|
self.config['route'] = route
|
||||||
|
|
||||||
|
self.config['uplinkdev'] = uplinkdev
|
||||||
|
self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
|
||||||
|
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
||||||
|
|
||||||
|
|
||||||
|
def setup_networking(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _setup_vxlan(self):
|
||||||
|
self._execute_cmd(self.cmd_create_vxlan)
|
||||||
|
self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
|
||||||
|
|
||||||
|
def _setup_bridge(self):
|
||||||
|
self._execute_cmd(self.cmd_create_bridge)
|
||||||
|
self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
|
||||||
|
|
||||||
|
def _route_network(self):
|
||||||
|
self._execute_cmd(self.cmd_add_route_dev)
|
||||||
|
|
||||||
|
def _add_vxlan_to_bridge(self):
|
||||||
|
self._execute_cmd(self.cmd_add_to_bridge)
|
||||||
|
|
||||||
|
def _execute_cmd(self, cmd_string, **kwargs):
|
||||||
|
cmd = cmd_string.format(**self.config, **kwargs)
|
||||||
|
log.info("Executing: {}".format(cmd))
|
||||||
|
subprocess.run(cmd.split())
|
||||||
|
|
||||||
|
class ManagementBridge(VXLANBridge):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DNSRA(object):
|
||||||
|
# VXLAN ids are at maximum 24 bit
|
||||||
|
max_vni = (2**24)-1
|
||||||
|
|
||||||
|
|
||||||
|
# Command to start dnsmasq
|
||||||
|
cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra"
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
vni,
|
||||||
|
route=None,
|
||||||
|
use_sudo=False):
|
||||||
|
self.config = {}
|
||||||
|
|
||||||
|
if vni > self.max_vni:
|
||||||
|
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
||||||
|
|
||||||
|
if use_sudo:
|
||||||
|
self.config['sudo'] = 'sudo '
|
||||||
|
else:
|
||||||
|
self.config['sudo'] = ''
|
||||||
|
|
||||||
|
#TODO: remove if not needed
|
||||||
|
#self.config['vni_dec'] = vni
|
||||||
|
self.config['vni_hex'] = "{:x}".format(vni)
|
||||||
|
|
||||||
|
# dnsmasq only wants the network without the prefix, therefore, cut it off
|
||||||
|
self.config['route'] = ipaddress.IPv6Network(route).network_address
|
||||||
|
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
||||||
|
|
||||||
|
def _setup_dnsmasq(self):
|
||||||
|
self._execute_cmd(self.cmd_start_dnsmasq)
|
||||||
|
|
||||||
|
def _execute_cmd(self, cmd_string, **kwargs):
|
||||||
|
cmd = cmd_string.format(**self.config, **kwargs)
|
||||||
|
log.info("Executing: {}".format(cmd))
|
||||||
|
print("Executing: {}".format(cmd))
|
||||||
|
subprocess.run(cmd.split())
|
||||||
|
|
||||||
|
class Firewall(object):
|
||||||
|
pass
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue