diff --git a/.gitignore b/.gitignore
index 82146fa..6f0d9df 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,17 +2,19 @@
.vscode
-ucloud/docs/build
+uncloud/docs/build
logs.txt
-ucloud.egg-info
+uncloud.egg-info
# run artefacts
default.etcd
__pycache__
# build artefacts
-ucloud/version.py
+uncloud/version.py
build/
venv/
dist/
+
+*.iso
diff --git a/bin/gen-version b/bin/gen-version
new file mode 100755
index 0000000..06c3e22
--- /dev/null
+++ b/bin/gen-version
@@ -0,0 +1,29 @@
+#!/bin/sh
+# -*- coding: utf-8 -*-
+#
+# 2019-2020 Nico Schottelius (nico-uncloud at schottelius.org)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+#
+#
+
+
+# Wrapper for real script to allow execution from checkout
+dir=${0%/*}
+
+# Ensure version is present - the bundled/shipped version contains a static version,
+# the git version contains a dynamic version
+printf "VERSION = \"%s\"\n" "$(git describe --tags --abbrev=0)" > ${dir}/../uncloud/version.py
diff --git a/bin/ucloud b/bin/uncloud
similarity index 90%
rename from bin/ucloud
rename to bin/uncloud
index e178413..1c572d5 100755
--- a/bin/ucloud
+++ b/bin/uncloud
@@ -25,9 +25,9 @@ dir=${0%/*}
# Ensure version is present - the bundled/shipped version contains a static version,
# the git version contains a dynamic version
-printf "VERSION = \"%s\"\n" "$(git describe)" > ${dir}/../ucloud/version.py
+${dir}/gen-version
libdir=$(cd "${dir}/../" && pwd -P)
export PYTHONPATH="${libdir}"
-"$dir/../scripts/ucloud" "$@"
+"$dir/../scripts/uncloud" "$@"
diff --git a/bin/uncloud-run-reinstall b/bin/uncloud-run-reinstall
new file mode 100755
index 0000000..b211613
--- /dev/null
+++ b/bin/uncloud-run-reinstall
@@ -0,0 +1,29 @@
+#!/bin/sh
+# -*- coding: utf-8 -*-
+#
+# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
+#
+# This file is part of ucloud.
+#
+# ucloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ucloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with ucloud. If not, see .
+#
+#
+
+# Wrapper for real script to allow execution from checkout
+dir=${0%/*}
+
+${dir}/gen-version;
+pip uninstall -y uncloud >/dev/null
+python setup.py install >/dev/null
+${dir}/uncloud "$@"
diff --git a/conf/uncloud.conf b/conf/uncloud.conf
new file mode 100644
index 0000000..6a1b500
--- /dev/null
+++ b/conf/uncloud.conf
@@ -0,0 +1,13 @@
+[etcd]
+url = localhost
+port = 2379
+base_prefix = /
+ca_cert
+cert_cert
+cert_key
+
+[client]
+name = replace_me
+realm = replace_me
+seed = replace_me
+api_server = http://localhost:5000
\ No newline at end of file
diff --git a/ucloud/docs/Makefile b/docs/Makefile
similarity index 93%
rename from ucloud/docs/Makefile
rename to docs/Makefile
index 5e7ea85..246b56c 100644
--- a/ucloud/docs/Makefile
+++ b/docs/Makefile
@@ -7,7 +7,7 @@ SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source/
BUILDDIR = build/
-DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/ucloud/
+DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/uncloud/
.PHONY: all build clean
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..a5afbaa
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,12 @@
+# uncloud docs
+
+## Requirements
+1. Python3
+2. Sphinx
+
+## Usage
+Run `make build` to build docs.
+
+Run `make clean` to remove build directory.
+
+Run `make publish` to push build dir to https://ungleich.ch/ucloud/
\ No newline at end of file
diff --git a/ucloud/__init__.py b/docs/__init__.py
similarity index 100%
rename from ucloud/__init__.py
rename to docs/__init__.py
diff --git a/ucloud/docs/__init__.py b/docs/source/__init__.py
similarity index 100%
rename from ucloud/docs/__init__.py
rename to docs/source/__init__.py
diff --git a/ucloud/docs/source/admin-guide b/docs/source/admin-guide.rst
similarity index 72%
rename from ucloud/docs/source/admin-guide
rename to docs/source/admin-guide.rst
index ec6597d..b62808d 100644
--- a/ucloud/docs/source/admin-guide
+++ b/docs/source/admin-guide.rst
@@ -56,40 +56,13 @@ To start host we created earlier, execute the following command
ucloud host ungleich.ch
-Create OS Image
----------------
+File & image scanners
+--------------------------
-Create ucloud-init ready OS image (Optional)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This step is optional if you just want to test ucloud. However, sooner or later
-you want to create OS images with ucloud-init to properly
-contexualize VMs.
-
-1. Start a VM with OS image on which you want to install ucloud-init
-2. Execute the following command on the started VM
-
- .. code-block:: sh
-
- apk add git
- git clone https://code.ungleich.ch/ucloud/ucloud-init.git
- cd ucloud-init
- sh ./install.sh
-3. Congratulations. Your image is now ucloud-init ready.
-
-
-Upload Sample OS Image
-~~~~~~~~~~~~~~~~~~~~~~
-Execute the following to get the sample OS image file.
-
-.. code-block:: sh
-
- mkdir /var/www/admin
- (cd /var/www/admin && wget https://cloud.ungleich.ch/s/qTb5dFYW5ii8KsD/download)
-
-Run File Scanner and Image Scanner
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Currently, our uploaded file *alpine-untouched.qcow2* is not tracked by ucloud. We can only make
-images from tracked files. So, we need to track the file by running File Scanner
+Let's assume we have uploaded an *alpine-uploaded.qcow2* disk images to our
+uncloud server. Currently, our *alpine-untouched.qcow2* is not tracked by
+ucloud. We can only make images from tracked files. So, we need to track the
+file by running File Scanner
.. code-block:: sh
diff --git a/ucloud/docs/source/conf.py b/docs/source/conf.py
similarity index 90%
rename from ucloud/docs/source/conf.py
rename to docs/source/conf.py
index 9b133f9..c8138a7 100644
--- a/ucloud/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -17,9 +17,9 @@
# -- Project information -----------------------------------------------------
-project = 'ucloud'
-copyright = '2019, ungleich'
-author = 'ungleich'
+project = "uncloud"
+copyright = "2019, ungleich"
+author = "ungleich"
# -- General configuration ---------------------------------------------------
@@ -27,12 +27,12 @@ author = 'ungleich'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx_rtd_theme',
+ "sphinx.ext.autodoc",
+ "sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -50,4 +50,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
diff --git a/ucloud/docs/source/diagram-code/ucloud b/docs/source/diagram-code/ucloud
similarity index 100%
rename from ucloud/docs/source/diagram-code/ucloud
rename to docs/source/diagram-code/ucloud
diff --git a/docs/source/hacking.rst b/docs/source/hacking.rst
new file mode 100644
index 0000000..1c750d6
--- /dev/null
+++ b/docs/source/hacking.rst
@@ -0,0 +1,36 @@
+Hacking
+=======
+Using uncloud in hacking (aka development) mode.
+
+
+Get the code
+------------
+.. code-block:: sh
+ :linenos:
+
+ git clone https://code.ungleich.ch/uncloud/uncloud.git
+
+
+
+Install python requirements
+---------------------------
+You need to have python3 installed.
+
+.. code-block:: sh
+ :linenos:
+
+ cd uncloud!
+ python -m venv venv
+ . ./venv/bin/activate
+ ./bin/uncloud-run-reinstall
+
+
+
+Install os requirements
+-----------------------
+Install the following software packages: **dnsmasq**.
+
+If you already have a working IPv6 SLAAC and DNS setup,
+this step can be skipped.
+
+Note that you need at least one /64 IPv6 network to run uncloud.
diff --git a/ucloud/docs/source/images/ucloud.svg b/docs/source/images/ucloud.svg
similarity index 100%
rename from ucloud/docs/source/images/ucloud.svg
rename to docs/source/images/ucloud.svg
diff --git a/ucloud/docs/source/index.rst b/docs/source/index.rst
similarity index 90%
rename from ucloud/docs/source/index.rst
rename to docs/source/index.rst
index 879ac32..fad1f88 100644
--- a/ucloud/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -11,12 +11,12 @@ Welcome to ucloud's documentation!
:caption: Contents:
introduction
- user-guide
setup-install
+ vm-images
+ user-guide
admin-guide
- user-guide/how-to-create-an-os-image-for-ucloud
troubleshooting
-
+ hacking
Indices and tables
==================
diff --git a/ucloud/docs/source/introduction.rst b/docs/source/introduction.rst
similarity index 100%
rename from ucloud/docs/source/introduction.rst
rename to docs/source/introduction.rst
diff --git a/ucloud/docs/source/misc/todo.rst b/docs/source/misc/todo.rst
similarity index 100%
rename from ucloud/docs/source/misc/todo.rst
rename to docs/source/misc/todo.rst
diff --git a/ucloud/docs/source/setup-install.rst b/docs/source/setup-install.rst
similarity index 100%
rename from ucloud/docs/source/setup-install.rst
rename to docs/source/setup-install.rst
diff --git a/ucloud/docs/source/theory/summary.rst b/docs/source/theory/summary.rst
similarity index 100%
rename from ucloud/docs/source/theory/summary.rst
rename to docs/source/theory/summary.rst
diff --git a/ucloud/docs/source/troubleshooting.rst b/docs/source/troubleshooting.rst
similarity index 100%
rename from ucloud/docs/source/troubleshooting.rst
rename to docs/source/troubleshooting.rst
diff --git a/ucloud/docs/source/user-guide.rst b/docs/source/user-guide.rst
similarity index 100%
rename from ucloud/docs/source/user-guide.rst
rename to docs/source/user-guide.rst
diff --git a/ucloud/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst b/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
similarity index 100%
rename from ucloud/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
rename to docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
diff --git a/docs/source/vm-images.rst b/docs/source/vm-images.rst
new file mode 100644
index 0000000..4b2758a
--- /dev/null
+++ b/docs/source/vm-images.rst
@@ -0,0 +1,66 @@
+VM images
+==================================
+
+Overview
+---------
+
+ucloud tries to be least invasise towards VMs and only require
+strictly necessary changes for running in a virtualised
+environment. This includes configurations for:
+
+* Configuring the network
+* Managing access via ssh keys
+* Resizing the attached disk(s)
+
+Upstream images
+---------------
+
+The 'official' uncloud images are defined in the `uncloud/images
+`_ repository.
+
+How to make you own Uncloud images
+----------------------------------
+
+.. note::
+ It is fairly easy to create your own images for uncloud, as the common
+ operations (which are detailed below) can be automatically handled by the
+ `uncloud/uncloud-init `_ tool.
+
+Network configuration
+~~~~~~~~~~~~~~~~~~~~~
+All VMs in ucloud are required to support IPv6. The primary network
+configuration is always done using SLAAC. A VM thus needs only to be
+configured to
+
+* accept router advertisements on all network interfaces
+* use the router advertisements to configure the network interfaces
+* accept the DNS entries from the router advertisements
+
+
+Configuring SSH keys
+~~~~~~~~~~~~~~~~~~~~
+
+To be able to access the VM, ucloud support provisioning SSH keys.
+
+To accept ssh keys in your VM, request the URL
+*http://metadata/ssh_keys*. Add the content to the appropriate user's
+**authorized_keys** file. Below you find sample code to accomplish
+this task:
+
+.. code-block:: sh
+
+ tmp=$(mktemp)
+ curl -s http://metadata/ssk_keys > "$tmp"
+ touch ~/.ssh/authorized_keys # ensure it exists
+ cat ~/.ssh/authorized_keys >> "$tmp"
+ sort "$tmp" | uniq > ~/.ssh/authorized_keys
+
+
+Disk resize
+~~~~~~~~~~~
+In virtualised environments, the disk sizes might grow. The operating
+system should detect disks that are bigger than the existing partition
+table and resize accordingly. This task is os specific.
+
+ucloud does not support shrinking disks due to the complexity and
+intra OS dependencies.
diff --git a/scripts/ucloud b/scripts/ucloud
deleted file mode 100755
index 7be6b24..0000000
--- a/scripts/ucloud
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-import multiprocessing as mp
-import logging
-
-from os.path import join as join_path
-from ucloud.sanity_checks import check
-
-if __name__ == "__main__":
- arg_parser = argparse.ArgumentParser(prog='ucloud',
- description='Open Source Cloud Management Software')
- arg_parser.add_argument('component',
- choices=['api', 'scheduler', 'host',
- 'filescanner', 'imagescanner',
- 'metadata'])
- arg_parser.add_argument('component_args', nargs='*')
- args = arg_parser.parse_args()
-
- logging.basicConfig(
- level=logging.DEBUG,
- filename=join_path("/", "etc", "ucloud", "log.txt"),
- filemode="a",
- format="%(name)s %(asctime)s: %(levelname)s - %(message)s",
- datefmt="%d-%b-%y %H:%M:%S",
- )
- try:
- check()
-
- if args.component == 'api':
- from ucloud.api.main import main
-
- main()
- elif args.component == 'host':
- from ucloud.host.main import main
-
- hostname = args.component_args
- mp.set_start_method('spawn')
- main(*hostname)
- elif args.component == 'scheduler':
- from ucloud.scheduler.main import main
-
- main()
- elif args.component == 'filescanner':
- from ucloud.filescanner.main import main
-
- main()
- elif args.component == 'imagescanner':
- from ucloud.imagescanner.main import main
-
- main()
- elif args.component == 'metadata':
- from ucloud.metadata.main import main
-
- main()
-
- except Exception as e:
- logging.exception(e)
- print(e)
\ No newline at end of file
diff --git a/scripts/uncloud b/scripts/uncloud
new file mode 100755
index 0000000..7d38e42
--- /dev/null
+++ b/scripts/uncloud
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+import logging
+import sys
+import importlib
+import argparse
+import os
+
+from etcd3.exceptions import ConnectionFailedError
+
+from uncloud.common import settings
+from uncloud import UncloudException
+from uncloud.common.cli import resolve_otp_credentials
+
+# Components that use etcd
+ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
+ 'imagescanner', 'metadata', 'configure', 'hack']
+
+ALL_COMPONENTS = ETCD_COMPONENTS.copy()
+ALL_COMPONENTS.append('oneshot')
+#ALL_COMPONENTS.append('cli')
+
+
+if __name__ == '__main__':
+ arg_parser = argparse.ArgumentParser()
+ subparsers = arg_parser.add_subparsers(dest='command')
+
+ parent_parser = argparse.ArgumentParser(add_help=False)
+ parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
+ help='More verbose logging')
+ parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
+ default=os.path.expanduser('~/uncloud'))
+
+ etcd_parser = argparse.ArgumentParser(add_help=False)
+ etcd_parser.add_argument('--etcd-host')
+ etcd_parser.add_argument('--etcd-port')
+ etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
+ etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
+ etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
+
+ for component in ALL_COMPONENTS:
+ mod = importlib.import_module('uncloud.{}.main'.format(component))
+ parser = getattr(mod, 'arg_parser')
+
+ if component in ETCD_COMPONENTS:
+ subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
+ else:
+ subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
+
+ arguments = vars(arg_parser.parse_args())
+ etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
+ etcd_arguments = {
+ 'etcd': {
+ key.replace('etcd_', ''): arguments[key]
+ for key in etcd_arguments
+ }
+ }
+ if not arguments['command']:
+ arg_parser.print_help()
+ else:
+ # Initializing Settings and resolving otp_credentials
+ # It is neccessary to resolve_otp_credentials after argument parsing is done because
+ # previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
+ # providing the default values for --name, --realm and --seed arguments from the values
+ # we read from file. But, now we are asking user about where the config file lives. So,
+ # to providing default value is not possible before parsing arguments. So, we are doing
+ # it after..
+# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
+# resolve_otp_credentials(arguments)
+
+ name = arguments.pop('command')
+ mod = importlib.import_module('uncloud.{}.main'.format(name))
+ main = getattr(mod, 'main')
+
+ if arguments['debug']:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ log = logging.getLogger()
+
+ try:
+ main(arguments)
+ except UncloudException as err:
+ log.error(err)
+# except ConnectionFailedError as err:
+# log.error('Cannot connect to etcd: {}'.format(err))
+ except Exception as err:
+ log.exception(err)
diff --git a/setup.py b/setup.py
index 9a35f27..12da6b8 100644
--- a/setup.py
+++ b/setup.py
@@ -1,35 +1,50 @@
+import os
+
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
-setup(name='ucloud',
- version='0.0.1',
- description='All ucloud server components.',
- url='https://code.ungleich.ch/ucloud/ucloud',
- long_description=long_description,
- long_description_content_type='text/markdown',
- classifiers=[
- 'Development Status :: 3 - Alpha',
- 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
- 'Programming Language :: Python :: 3'
- ],
- author='ungleich',
- author_email='technik@ungleich.ch',
- packages=find_packages(),
- install_requires=[
- 'requests',
- 'python-decouple',
- 'flask',
- 'flask-restful',
- 'bitmath',
- 'pyotp',
- 'sshtunnel',
- 'sphinx',
- 'pynetbox',
- 'sphinx-rtd-theme',
- 'etcd3_wrapper @ https://code.ungleich.ch/ungleich-public/etcd3_wrapper/repository/master/archive.tar.gz#egg=etcd3_wrapper',
- 'etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3',
- ],
- scripts=['scripts/ucloud'],
- zip_safe=False)
+try:
+ import uncloud.version
+
+ version = uncloud.version.VERSION
+except:
+ import subprocess
+
+ c = subprocess.check_output(["git", "describe"])
+ version = c.decode("utf-8").strip()
+
+
+setup(
+ name="uncloud",
+ version=version,
+ description="uncloud cloud management",
+ url="https://code.ungleich.ch/uncloud/uncloud",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ classifiers=[
+ "Development Status :: 3 - Alpha",
+ "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
+ "Programming Language :: Python :: 3",
+ ],
+ author="ungleich",
+ author_email="technik@ungleich.ch",
+ packages=find_packages(),
+ install_requires=[
+ "requests",
+ "Flask>=1.1.1",
+ "flask-restful",
+ "bitmath",
+ "pyotp",
+ "pynetbox",
+ "colorama",
+ "etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
+ "marshmallow"
+ ],
+ scripts=["scripts/uncloud"],
+ data_files=[
+ (os.path.expanduser("~/uncloud/"), ["conf/uncloud.conf"])
+ ],
+ zip_safe=False,
+)
diff --git a/ucloud/api/create_image_store.py b/ucloud/api/create_image_store.py
deleted file mode 100755
index 17fa63c..0000000
--- a/ucloud/api/create_image_store.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import json
-import os
-
-from uuid import uuid4
-
-from ucloud.config import etcd_client, env_vars
-
-data = {
- "is_public": True,
- "type": "ceph",
- "name": "images",
- "description": "first ever public image-store",
- "attributes": {"list": [], "key": [], "pool": "images"},
-}
-
-etcd_client.put(os.path.join(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
diff --git a/ucloud/api/main.py b/ucloud/api/main.py
deleted file mode 100644
index 1475fb0..0000000
--- a/ucloud/api/main.py
+++ /dev/null
@@ -1,517 +0,0 @@
-import json
-import pynetbox
-
-from uuid import uuid4
-from os.path import join as join_path
-
-from flask import Flask, request
-from flask_restful import Resource, Api
-
-from ucloud.common import counters
-from ucloud.common.vm import VMStatus
-from ucloud.common.request import RequestEntry, RequestType
-from ucloud.config import (etcd_client, request_pool, vm_pool, host_pool, env_vars, image_storage_handler)
-from . import schemas
-from .helper import generate_mac, mac2ipv6
-from . import logger
-
-app = Flask(__name__)
-api = Api(app)
-
-
-class CreateVM(Resource):
- """API Request to Handle Creation of VM"""
-
- @staticmethod
- def post():
- data = request.json
- validator = schemas.CreateVMSchema(data)
- if validator.is_valid():
- vm_uuid = uuid4().hex
- vm_key = join_path(env_vars.get("VM_PREFIX"), vm_uuid)
- specs = {
- "cpu": validator.specs["cpu"],
- "ram": validator.specs["ram"],
- "os-ssd": validator.specs["os-ssd"],
- "hdd": validator.specs["hdd"],
- }
- macs = [generate_mac() for _ in range(len(data["network"]))]
- tap_ids = [counters.increment_etcd_counter(etcd_client, "/v1/counter/tap")
- for _ in range(len(data["network"]))]
- vm_entry = {
- "name": data["vm_name"],
- "owner": data["name"],
- "owner_realm": data["realm"],
- "specs": specs,
- "hostname": "",
- "status": VMStatus.stopped,
- "image_uuid": validator.image_uuid,
- "log": [],
- "vnc_socket": "",
- "network": list(zip(data["network"], macs, tap_ids)),
- "metadata": {"ssh-keys": []},
- }
- etcd_client.put(vm_key, vm_entry, value_in_json=True)
-
- # Create ScheduleVM Request
- r = RequestEntry.from_scratch(
- type=RequestType.ScheduleVM, uuid=vm_uuid,
- request_prefix=env_vars.get("REQUEST_PREFIX")
- )
- request_pool.put(r)
-
- return {"message": "VM Creation Queued"}, 200
- return validator.get_errors(), 400
-
-
-class VmStatus(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.VMStatusSchema(data)
- if validator.is_valid():
- vm = vm_pool.get(
- join_path(env_vars.get("VM_PREFIX"), data["uuid"])
- )
- vm_value = vm.value.copy()
- vm_value["ip"] = []
- for network_mac_and_tap in vm.network:
- network_name, mac, tap = network_mac_and_tap
- network = etcd_client.get(
- join_path(
- env_vars.get("NETWORK_PREFIX"),
- data["name"],
- network_name,
- ),
- value_in_json=True,
- )
- ipv6_addr = network.value.get("ipv6").split("::")[0] + "::"
- vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
- vm.value = vm_value
- return vm.value
- else:
- return validator.get_errors(), 400
-
-
-class CreateImage(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.CreateImageSchema(data)
- if validator.is_valid():
- file_entry = etcd_client.get(
- join_path(env_vars.get("FILE_PREFIX"), data["uuid"])
- )
- file_entry_value = json.loads(file_entry.value)
-
- image_entry_json = {
- "status": "TO_BE_CREATED",
- "owner": file_entry_value["owner"],
- "filename": file_entry_value["filename"],
- "name": data["name"],
- "store_name": data["image_store"],
- "visibility": "public",
- }
- etcd_client.put(
- join_path(env_vars.get("IMAGE_PREFIX"), data["uuid"]),
- json.dumps(image_entry_json),
- )
-
- return {"message": "Image queued for creation."}
- return validator.get_errors(), 400
-
-
-class ListPublicImages(Resource):
- @staticmethod
- def get():
- images = etcd_client.get_prefix(
- env_vars.get("IMAGE_PREFIX"), value_in_json=True
- )
- r = {
- "images": []
- }
- for image in images:
- image_key = "{}:{}".format(
- image.value["store_name"], image.value["name"]
- )
- r["images"].append(
- {"name": image_key, "status": image.value["status"]}
- )
- return r, 200
-
-
-class VMAction(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.VmActionSchema(data)
-
- if validator.is_valid():
- vm_entry = vm_pool.get(
- join_path(env_vars.get("VM_PREFIX"), data["uuid"])
- )
- action = data["action"]
-
- if action == "start":
- action = "schedule"
-
- if action == "delete" and vm_entry.hostname == "":
- if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
- r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
- if r_status:
- etcd_client.client.delete(vm_entry.key)
- return {"message": "VM successfully deleted"}
- else:
- logger.error("Some Error Occurred while deleting VM")
- return {"message": "VM deletion unsuccessfull"}
- else:
- etcd_client.client.delete(vm_entry.key)
- return {"message": "VM successfully deleted"}
-
- r = RequestEntry.from_scratch(
- type="{}VM".format(action.title()),
- uuid=data["uuid"],
- hostname=vm_entry.hostname,
- request_prefix=env_vars.get("REQUEST_PREFIX")
- )
- request_pool.put(r)
- return {"message": "VM {} Queued".format(action.title())}, 200
- else:
- return validator.get_errors(), 400
-
-
-class VMMigration(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.VmMigrationSchema(data)
-
- if validator.is_valid():
- vm = vm_pool.get(data["uuid"])
-
- r = RequestEntry.from_scratch(
- type=RequestType.ScheduleVM,
- uuid=vm.uuid,
- destination=join_path(
- env_vars.get("HOST_PREFIX"), validator.destination.value
- ),
- migration=True,
- request_prefix=env_vars.get("REQUEST_PREFIX")
- )
- request_pool.put(r)
- return {"message": "VM Migration Initialization Queued"}, 200
- else:
- return validator.get_errors(), 400
-
-
-class ListUserVM(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.OTPSchema(data)
-
- if validator.is_valid():
- vms = etcd_client.get_prefix(
- env_vars.get("VM_PREFIX"), value_in_json=True
- )
- return_vms = []
- user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
- for vm in user_vms:
- return_vms.append(
- {
- "name": vm.value["name"],
- "vm_uuid": vm.key.split("/")[-1],
- "specs": vm.value["specs"],
- "status": vm.value["status"],
- "hostname": vm.value["hostname"],
- # "mac": vm.value["mac"],
- "vnc_socket": None
- if vm.value.get("vnc_socket", None) is None
- else vm.value["vnc_socket"],
- }
- )
- if return_vms:
- return {"message": return_vms}, 200
- return {"message": "No VM found"}, 404
-
- else:
- return validator.get_errors(), 400
-
-
-class ListUserFiles(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.OTPSchema(data)
-
- if validator.is_valid():
- files = etcd_client.get_prefix(
- env_vars.get("FILE_PREFIX"), value_in_json=True
- )
- return_files = []
- user_files = list(
- filter(lambda f: f.value["owner"] == data["name"], files)
- )
- for file in user_files:
- return_files.append(
- {
- "filename": file.value["filename"],
- "uuid": file.key.split("/")[-1],
- }
- )
- return {"message": return_files}, 200
- else:
- return validator.get_errors(), 400
-
-
-class CreateHost(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.CreateHostSchema(data)
- if validator.is_valid():
- host_key = join_path(env_vars.get("HOST_PREFIX"), uuid4().hex)
- host_entry = {
- "specs": data["specs"],
- "hostname": data["hostname"],
- "status": "DEAD",
- "last_heartbeat": "",
- }
- etcd_client.put(host_key, host_entry, value_in_json=True)
-
- return {"message": "Host Created"}, 200
-
- return validator.get_errors(), 400
-
-
-class ListHost(Resource):
- @staticmethod
- def get():
- hosts = host_pool.hosts
- r = {
- host.key: {
- "status": host.status,
- "specs": host.specs,
- "hostname": host.hostname,
- }
- for host in hosts
- }
- return r, 200
-
-
-class GetSSHKeys(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.GetSSHSchema(data)
- if validator.is_valid():
- if not validator.key_name.value:
-
- # {user_prefix}/{realm}/{name}/key/
- etcd_key = join_path(
- env_vars.get('USER_PREFIX'),
- data["realm"],
- data["name"],
- "key",
- )
- etcd_entry = etcd_client.get_prefix(
- etcd_key, value_in_json=True
- )
-
- keys = {
- key.key.split("/")[-1]: key.value for key in etcd_entry
- }
- return {"keys": keys}
- else:
-
- # {user_prefix}/{realm}/{name}/key/{key_name}
- etcd_key = join_path(
- env_vars.get('USER_PREFIX'),
- data["realm"],
- data["name"],
- "key",
- data["key_name"],
- )
- etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
-
- if etcd_entry:
- return {
- "keys": {
- etcd_entry.key.split("/")[-1]: etcd_entry.value
- }
- }
- else:
- return {"keys": {}}
- else:
- return validator.get_errors(), 400
-
-
-class AddSSHKey(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.AddSSHSchema(data)
- if validator.is_valid():
-
- # {user_prefix}/{realm}/{name}/key/{key_name}
- etcd_key = join_path(
- env_vars.get("USER_PREFIX"),
- data["realm"],
- data["name"],
- "key",
- data["key_name"],
- )
- etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
- if etcd_entry:
- return {
- "message": "Key with name '{}' already exists".format(
- data["key_name"]
- )
- }
- else:
- # Key Not Found. It implies user' haven't added any key yet.
- etcd_client.put(etcd_key, data["key"], value_in_json=True)
- return {"message": "Key added successfully"}
- else:
- return validator.get_errors(), 400
-
-
-class RemoveSSHKey(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.RemoveSSHSchema(data)
- if validator.is_valid():
-
- # {user_prefix}/{realm}/{name}/key/{key_name}
- etcd_key = join_path(
- env_vars.get("USER_PREFIX"),
- data["realm"],
- data["name"],
- "key",
- data["key_name"],
- )
- etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
- if etcd_entry:
- etcd_client.client.delete(etcd_key)
- return {"message": "Key successfully removed."}
- else:
- return {
- "message": "No Key with name '{}' Exists at all.".format(
- data["key_name"]
- )
- }
- else:
- return validator.get_errors(), 400
-
-
-class CreateNetwork(Resource):
- @staticmethod
- def post():
- data = request.json
- validator = schemas.CreateNetwork(data)
-
- if validator.is_valid():
-
- network_entry = {
- "id": counters.increment_etcd_counter(
- etcd_client, "/v1/counter/vxlan"
- ),
- "type": data["type"],
- }
- if validator.user.value:
- nb = pynetbox.api(
- url=env_vars.get("NETBOX_URL"),
- token=env_vars.get("NETBOX_TOKEN"),
- )
- nb_prefix = nb.ipam.prefixes.get(
- prefix=env_vars.get("PREFIX")
- )
-
- prefix = nb_prefix.available_prefixes.create(
- data={
- "prefix_length": env_vars.get(
- "PREFIX_LENGTH", cast=int
- ),
- "description": '{}\'s network "{}"'.format(
- data["name"], data["network_name"]
- ),
- "is_pool": True,
- }
- )
- network_entry["ipv6"] = prefix["prefix"]
- else:
- network_entry["ipv6"] = "fd00::/64"
-
- network_key = join_path(
- env_vars.get("NETWORK_PREFIX"),
- data["name"],
- data["network_name"],
- )
- etcd_client.put(network_key, network_entry, value_in_json=True)
- return {"message": "Network successfully added."}
- else:
- return validator.get_errors(), 400
-
-
-class ListUserNetwork(Resource):
- @staticmethod
- def get():
- data = request.json
- validator = schemas.OTPSchema(data)
-
- if validator.is_valid():
- prefix = join_path(
- env_vars.get("NETWORK_PREFIX"), data["name"]
- )
- networks = etcd_client.get_prefix(prefix, value_in_json=True)
- user_networks = []
- for net in networks:
- net.value["name"] = net.key.split("/")[-1]
- user_networks.append(net.value)
- return {"networks": user_networks}, 200
- else:
- return validator.get_errors(), 400
-
-
-api.add_resource(CreateVM, "/vm/create")
-api.add_resource(VmStatus, "/vm/status")
-
-api.add_resource(VMAction, "/vm/action")
-api.add_resource(VMMigration, "/vm/migrate")
-
-api.add_resource(CreateImage, "/image/create")
-api.add_resource(ListPublicImages, "/image/list-public")
-
-api.add_resource(ListUserVM, "/user/vms")
-api.add_resource(ListUserFiles, "/user/files")
-api.add_resource(ListUserNetwork, "/user/networks")
-
-api.add_resource(AddSSHKey, "/user/add-ssh")
-api.add_resource(RemoveSSHKey, "/user/remove-ssh")
-api.add_resource(GetSSHKeys, "/user/get-ssh")
-
-api.add_resource(CreateHost, "/host/create")
-api.add_resource(ListHost, "/host/list")
-
-api.add_resource(CreateNetwork, "/network/create")
-
-
-def main():
- image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True))
- if len(image_stores) == 0:
- data = {
- "is_public": True,
- "type": "ceph",
- "name": "images",
- "description": "first ever public image-store",
- "attributes": {"list": [], "key": [], "pool": "images"},
- }
-
- etcd_client.put(join_path(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
-
- app.run(host="::", debug=True)
-
-
-if __name__ == "__main__":
- main()
diff --git a/ucloud/common/helpers.py b/ucloud/common/helpers.py
deleted file mode 100644
index 1bdf0b4..0000000
--- a/ucloud/common/helpers.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import logging
-import socket
-import requests
-import json
-
-from ipaddress import ip_address
-
-from os.path import join as join_path
-
-
-def create_package_loggers(packages, base_path, mode="a"):
- loggers = {}
- for pkg in packages:
- logger = logging.getLogger(pkg)
- logger_handler = logging.FileHandler(
- join_path(base_path, "{}.txt".format(pkg)),
- mode=mode
- )
- logger.setLevel(logging.DEBUG)
- logger_handler.setFormatter(logging.Formatter(fmt="%(asctime)s: %(levelname)s - %(message)s",
- datefmt="%d-%b-%y %H:%M:%S"))
- logger.addHandler(logger_handler)
- loggers[pkg] = logger
-
-
-# TODO: Should be removed as soon as migration
-# mechanism is finalized inside ucloud
-def get_ipv4_address():
- # If host is connected to internet
- # Return IPv4 address of machine
- # Otherwise, return 127.0.0.1
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
- try:
- s.connect(("8.8.8.8", 80))
- except socket.timeout:
- address = "127.0.0.1"
- except Exception as e:
- logging.getLogger().exception(e)
- address = "127.0.0.1"
- else:
- address = s.getsockname()[0]
-
- return address
-
-
-def get_ipv6_address():
- try:
- r = requests.get("https://api6.ipify.org?format=json")
- content = json.loads(r.content.decode("utf-8"))
- ip = ip_address(content["ip"]).exploded
- except Exception as e:
- logging.exception(e)
- else:
- return ip
diff --git a/ucloud/config.py b/ucloud/config.py
deleted file mode 100644
index 7c141a3..0000000
--- a/ucloud/config.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from etcd3_wrapper import Etcd3Wrapper
-
-from ucloud.common.host import HostPool
-from ucloud.common.request import RequestPool
-from ucloud.common.vm import VmPool
-from ucloud.common.storage_handlers import FileSystemBasedImageStorageHandler, CEPHBasedImageStorageHandler
-from decouple import Config, RepositoryEnv, RepositoryEmpty
-
-
-# Try importing config, but don't fail if it does not exist
-try:
- env_vars = Config(RepositoryEnv('/etc/ucloud/ucloud.conf'))
-except FileNotFoundError:
- env_vars = Config(RepositoryEmpty())
-
-
-etcd_wrapper_args = ()
-etcd_wrapper_kwargs = {
- 'host': env_vars.get('ETCD_URL', 'localhost'),
- 'port': env_vars.get('ETCD_PORT', 2379),
- 'ca_cert': env_vars.get('CA_CERT', None),
- 'cert_cert': env_vars.get('CERT_CERT', None),
- 'cert_key': env_vars.get('CERT_KEY', None)
-}
-
-etcd_client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
-
-host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
-vm_pool = VmPool(etcd_client, env_vars.get('VM_PREFIX'))
-request_pool = RequestPool(etcd_client, env_vars.get('REQUEST_PREFIX'))
-
-running_vms = []
-
-__storage_backend = env_vars.get("STORAGE_BACKEND")
-if __storage_backend == "filesystem":
- image_storage_handler = FileSystemBasedImageStorageHandler(vm_base=env_vars.get("VM_DIR"),
- image_base=env_vars.get("IMAGE_DIR"))
-elif __storage_backend == "ceph":
- image_storage_handler = CEPHBasedImageStorageHandler(vm_base="ssd", image_base="ssd")
-else:
- raise Exception("Unknown Image Storage Handler")
diff --git a/ucloud/filescanner/main.py b/ucloud/filescanner/main.py
deleted file mode 100755
index b70cb5b..0000000
--- a/ucloud/filescanner/main.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import glob
-import os
-import pathlib
-import subprocess as sp
-import time
-from uuid import uuid4
-
-from . import logger
-from ucloud.config import env_vars, etcd_client
-
-
-def getxattr(file, attr):
- """Get specified user extended attribute (arg:attr) of a file (arg:file)"""
- try:
- attr = "user." + attr
- value = sp.check_output(['getfattr', file,
- '--name', attr,
- '--only-values',
- '--absolute-names'], stderr=sp.DEVNULL)
- value = value.decode("utf-8")
- except sp.CalledProcessError as e:
- logger.exception(e)
- value = None
-
- return value
-
-
-def setxattr(file, attr, value):
- """Set specified user extended attribute (arg:attr) equal to (arg:value)
- of a file (arg:file)"""
-
- attr = "user." + attr
- sp.check_output(['setfattr', file,
- '--name', attr,
- '--value', str(value)])
-
-
-def sha512sum(file: str):
- """Use sha512sum utility to compute sha512 sum of arg:file
-
- IF arg:file does not exists:
- raise FileNotFoundError exception
- ELSE IF sum successfully computer:
- return computed sha512 sum
- ELSE:
- return None
- """
- if not isinstance(file, str): raise TypeError
- try:
- output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
- except sp.CalledProcessError as e:
- error = e.stderr.decode("utf-8")
- if "No such file or directory" in error:
- raise FileNotFoundError from None
- else:
- output = output.decode("utf-8").strip()
- output = output.split(" ")
- return output[0]
- return None
-
-
-try:
- sp.check_output(['which', 'getfattr'])
- sp.check_output(['which', 'setfattr'])
-except Exception as e:
- logger.exception(e)
- print('Make sure you have getfattr and setfattr available')
- exit(1)
-
-
-def main():
- BASE_DIR = env_vars.get("BASE_DIR")
-
- FILE_PREFIX = env_vars.get("FILE_PREFIX")
-
- # Recursively Get All Files and Folder below BASE_DIR
- files = glob.glob("{}/**".format(BASE_DIR), recursive=True)
-
- # Retain only Files
- files = list(filter(os.path.isfile, files))
-
- untracked_files = list(
- filter(lambda f: not bool(getxattr(f, "user.utracked")), files)
- )
-
- tracked_files = list(
- filter(lambda f: f not in untracked_files, files)
- )
- for file in untracked_files:
- file_id = uuid4()
-
- # Get Username
- owner = pathlib.Path(file).parts[3]
- # Get Creation Date of File
- # Here, we are assuming that ctime is creation time
- # which is mostly not true.
- creation_date = time.ctime(os.stat(file).st_ctime)
-
- # Get File Size
- size = os.path.getsize(file)
-
- # Compute sha512 sum
- sha_sum = sha512sum(file)
-
- # File Path excluding base and username
- file_path = pathlib.Path(file).parts[4:]
- file_path = os.path.join(*file_path)
-
- # Create Entry
- entry_key = os.path.join(FILE_PREFIX, str(file_id))
- entry_value = {
- "filename": file_path,
- "owner": owner,
- "sha512sum": sha_sum,
- "creation_date": creation_date,
- "size": size
- }
-
- print("Tracking {}".format(file))
- # Insert Entry
- etcd_client.put(entry_key, entry_value, value_in_json=True)
- setxattr(file, "user.utracked", True)
-
-
-if __name__ == "__main__":
- main()
diff --git a/ucloud/host/helper.py b/ucloud/host/helper.py
deleted file mode 100644
index edcb82d..0000000
--- a/ucloud/host/helper.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import socket
-from contextlib import closing
-
-
-def find_free_port():
- with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
- try:
- s.bind(('', 0))
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except Exception:
- return None
- else:
- return s.getsockname()[1]
diff --git a/ucloud/host/main.py b/ucloud/host/main.py
deleted file mode 100755
index ccf0a8d..0000000
--- a/ucloud/host/main.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import argparse
-import multiprocessing as mp
-import time
-
-from etcd3_wrapper import Etcd3Wrapper
-
-from ucloud.common.request import RequestEntry, RequestType
-from ucloud.config import (vm_pool, request_pool,
- etcd_client, running_vms,
- etcd_wrapper_args, etcd_wrapper_kwargs,
- HostPool, env_vars)
-
-from .helper import find_free_port
-from . import virtualmachine
-from ucloud.host import logger
-
-
-def update_heartbeat(hostname):
- """Update Last HeartBeat Time for :param hostname: in etcd"""
- client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
- host_pool = HostPool(client, env_vars.get('HOST_PREFIX'))
- this_host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
-
- while True:
- this_host.update_heartbeat()
- host_pool.put(this_host)
- time.sleep(10)
-
-
-def maintenance(host):
- # To capture vm running according to running_vms list
-
- # This is to capture successful migration of a VM.
- # Suppose, this host is running "vm1" and user initiated
- # request to migrate this "vm1" to some other host. On,
- # successful migration the destination host would set
- # the vm hostname to itself. Thus, we are checking
- # whether this host vm is successfully migrated. If yes
- # then we shutdown "vm1" on this host.
-
- to_be_removed = []
- for running_vm in running_vms:
- with vm_pool.get_put(running_vm.key) as vm_entry:
- if vm_entry.hostname != host.key and not vm_entry.in_migration:
- running_vm.handle.shutdown()
- logger.info("VM migration not completed successfully.")
- to_be_removed.append(running_vm)
-
- for r in to_be_removed:
- running_vms.remove(r)
-
- # To check vm running according to etcd entries
- alleged_running_vms = vm_pool.by_status("RUNNING", vm_pool.by_host(host.key))
-
- for vm_entry in alleged_running_vms:
- _vm = virtualmachine.get_vm(running_vms, vm_entry.key)
- # Whether, the allegedly running vm is in our
- # running_vms list or not if it is said to be
- # running on this host but it is not then we
- # need to shut it down
-
- # This is to capture poweroff/shutdown of a VM
- # initiated by user inside VM. OR crash of VM by some
- # user running process
- if (_vm and not _vm.handle.is_running()) or not _vm:
- logger.debug("_vm = %s, is_running() = %s" % (_vm, _vm.handle.is_running()))
- vm_entry.add_log("""{} is not running but is said to be running.
- So, shutting it down and declare it killed""".format(vm_entry.key))
- vm_entry.declare_killed()
- vm_pool.put(vm_entry)
- if _vm:
- running_vms.remove(_vm)
-
-
-def main(hostname):
- heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
-
- host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
- host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
- assert host is not None, "No such host with name = {}".format(hostname)
-
- try:
- heartbeat_updating_process.start()
- except Exception as e:
- logger.info("No Need To Go Further. Our heartbeat updating mechanism is not working")
- logger.exception(e)
- exit(-1)
-
- logger.info("%s Session Started %s", '*' * 5, '*' * 5)
-
- # It is seen that under heavy load, timeout event doesn't come
- # in a predictive manner (which is intentional because we give
- # higher priority to customer's requests) which delays heart
- # beat update which in turn misunderstood by scheduler that the
- # host is dead when it is actually alive. So, to ensure that we
- # update the heart beat in a predictive manner we start Heart
- # beat updating mechanism in separated thread
-
- for events_iterator in [
- etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
- etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=10, value_in_json=True),
- ]:
- for request_event in events_iterator:
- request_event = RequestEntry(request_event)
-
- if request_event.type == "TIMEOUT":
- maintenance(host)
- continue
-
- # If the event is directed toward me OR I am destination of a InitVMMigration
- if request_event.hostname == host.key or request_event.destination == host.key:
- logger.debug("VM Request: %s", request_event)
-
- request_pool.client.client.delete(request_event.key)
- vm_entry = vm_pool.get(request_event.uuid)
-
- if vm_entry:
- if request_event.type == RequestType.StartVM:
- virtualmachine.start(vm_entry)
-
- elif request_event.type == RequestType.StopVM:
- virtualmachine.stop(vm_entry)
-
- elif request_event.type == RequestType.DeleteVM:
- virtualmachine.delete(vm_entry)
-
- elif request_event.type == RequestType.InitVMMigration:
- virtualmachine.start(vm_entry, host.key, find_free_port())
-
- elif request_event.type == RequestType.TransferVM:
- virtualmachine.transfer(request_event)
- else:
- logger.info("VM Entry missing")
-
- logger.info("Running VMs %s", running_vms)
-
-
-if __name__ == "__main__":
- argparser = argparse.ArgumentParser()
- argparser.add_argument("hostname", help="Name of this host. e.g /v1/host/1")
- args = argparser.parse_args()
- mp.set_start_method('spawn')
- main(args.hostname)
diff --git a/ucloud/host/qmp/__init__.py b/ucloud/host/qmp/__init__.py
deleted file mode 100755
index 775b397..0000000
--- a/ucloud/host/qmp/__init__.py
+++ /dev/null
@@ -1,537 +0,0 @@
-# QEMU library
-#
-# Copyright (C) 2015-2016 Red Hat Inc.
-# Copyright (C) 2012 IBM Corp.
-#
-# Authors:
-# Fam Zheng
-#
-# This work is licensed under the terms of the GNU GPL, version 2. See
-# the COPYING file in the top-level directory.
-#
-# Based on qmp.py.
-#
-
-import errno
-import logging
-import os
-import shutil
-import socket
-import subprocess
-import tempfile
-
-from . import qmp
-
-LOG = logging.getLogger(__name__)
-
-# Mapping host architecture to any additional architectures it can
-# support which often includes its 32 bit cousin.
-ADDITIONAL_ARCHES = {
- "x86_64": "i386",
- "aarch64": "armhf"
-}
-
-
-def kvm_available(target_arch=None):
- host_arch = os.uname()[4]
- if target_arch and target_arch != host_arch:
- if target_arch != ADDITIONAL_ARCHES.get(host_arch):
- return False
- return os.access("/dev/kvm", os.R_OK | os.W_OK)
-
-
-class QEMUMachineError(Exception):
- """
- Exception called when an error in QEMUMachine happens.
- """
-
-
-class QEMUMachineAddDeviceError(QEMUMachineError):
- """
- Exception raised when a request to add a device can not be fulfilled
-
- The failures are caused by limitations, lack of information or conflicting
- requests on the QEMUMachine methods. This exception does not represent
- failures reported by the QEMU binary itself.
- """
-
-
-class MonitorResponseError(qmp.QMPError):
- """
- Represents erroneous QMP monitor reply
- """
-
- def __init__(self, reply):
- try:
- desc = reply["error"]["desc"]
- except KeyError:
- desc = reply
- super(MonitorResponseError, self).__init__(desc)
- self.reply = reply
-
-
-class QEMUMachine(object):
- """
- A QEMU VM
-
- Use this object as a context manager to ensure the QEMU process terminates::
-
- with VM(binary) as vm:
- ...
- # vm is guaranteed to be shut down here
- """
-
- def __init__(self, binary, args=None, wrapper=None, name=None,
- test_dir="/var/tmp", monitor_address=None,
- socket_scm_helper=None):
- '''
- Initialize a QEMUMachine
-
- @param binary: path to the qemu binary
- @param args: list of extra arguments
- @param wrapper: list of arguments used as prefix to qemu binary
- @param name: prefix for socket and log file names (default: qemu-PID)
- @param test_dir: where to create socket and log file
- @param monitor_address: address for QMP monitor
- @param socket_scm_helper: helper program, required for send_fd_scm()
- @note: Qemu process is not started until launch() is used.
- '''
- if args is None:
- args = []
- if wrapper is None:
- wrapper = []
- if name is None:
- name = "qemu-%d" % os.getpid()
- self._name = name
- self._monitor_address = monitor_address
- self._vm_monitor = None
- self._qemu_log_path = None
- self._qemu_log_file = None
- self._popen = None
- self._binary = binary
- self._args = list(args) # Force copy args in case we modify them
- self._wrapper = wrapper
- self._events = []
- self._iolog = None
- self._socket_scm_helper = socket_scm_helper
- self._qmp = None
- self._qemu_full_args = None
- self._test_dir = test_dir
- self._temp_dir = None
- self._launched = False
- self._machine = None
- self._console_set = False
- self._console_device_type = None
- self._console_address = None
- self._console_socket = None
-
- # just in case logging wasn't configured by the main script:
- logging.basicConfig(level=logging.DEBUG)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.shutdown()
- return False
-
- # This can be used to add an unused monitor instance.
- def add_monitor_null(self):
- self._args.append('-monitor')
- self._args.append('null')
-
- def add_fd(self, fd, fdset, opaque, opts=''):
- """
- Pass a file descriptor to the VM
- """
- options = ['fd=%d' % fd,
- 'set=%d' % fdset,
- 'opaque=%s' % opaque]
- if opts:
- options.append(opts)
-
- # This did not exist before 3.4, but since then it is
- # mandatory for our purpose
- if hasattr(os, 'set_inheritable'):
- os.set_inheritable(fd, True)
-
- self._args.append('-add-fd')
- self._args.append(','.join(options))
- return self
-
- # Exactly one of fd and file_path must be given.
- # (If it is file_path, the helper will open that file and pass its
- # own fd)
- def send_fd_scm(self, fd=None, file_path=None):
- # In iotest.py, the qmp should always use unix socket.
- assert self._qmp.is_scm_available()
- if self._socket_scm_helper is None:
- raise QEMUMachineError("No path to socket_scm_helper set")
- if not os.path.exists(self._socket_scm_helper):
- raise QEMUMachineError("%s does not exist" %
- self._socket_scm_helper)
-
- # This did not exist before 3.4, but since then it is
- # mandatory for our purpose
- if hasattr(os, 'set_inheritable'):
- os.set_inheritable(self._qmp.get_sock_fd(), True)
- if fd is not None:
- os.set_inheritable(fd, True)
-
- fd_param = ["%s" % self._socket_scm_helper,
- "%d" % self._qmp.get_sock_fd()]
-
- if file_path is not None:
- assert fd is None
- fd_param.append(file_path)
- else:
- assert fd is not None
- fd_param.append(str(fd))
-
- devnull = open(os.path.devnull, 'rb')
- proc = subprocess.Popen(fd_param, stdin=devnull, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, close_fds=False)
- output = proc.communicate()[0]
- if output:
- LOG.debug(output)
-
- return proc.returncode
-
- @staticmethod
- def _remove_if_exists(path):
- """
- Remove file object at path if it exists
- """
- try:
- os.remove(path)
- except OSError as exception:
- if exception.errno == errno.ENOENT:
- return
- raise
-
- def is_running(self):
- return self._popen is not None and self._popen.poll() is None
-
- def exitcode(self):
- if self._popen is None:
- return None
- return self._popen.poll()
-
- def get_pid(self):
- if not self.is_running():
- return None
- return self._popen.pid
-
- def _load_io_log(self):
- if self._qemu_log_path is not None:
- with open(self._qemu_log_path, "r") as iolog:
- self._iolog = iolog.read()
-
- def _base_args(self):
- if isinstance(self._monitor_address, tuple):
- moncdev = "socket,id=mon,host=%s,port=%s" % (
- self._monitor_address[0],
- self._monitor_address[1])
- else:
- moncdev = 'socket,id=mon,path=%s' % self._vm_monitor
- args = ['-chardev', moncdev,
- '-mon', 'chardev=mon,mode=control']
- if self._machine is not None:
- args.extend(['-machine', self._machine])
- if self._console_set:
- self._console_address = os.path.join(self._temp_dir,
- self._name + "-console.sock")
- chardev = ('socket,id=console,path=%s,server,nowait' %
- self._console_address)
- args.extend(['-chardev', chardev])
- if self._console_device_type is None:
- args.extend(['-serial', 'chardev:console'])
- else:
- device = '%s,chardev=console' % self._console_device_type
- args.extend(['-device', device])
- return args
-
- def _pre_launch(self):
- self._temp_dir = tempfile.mkdtemp(dir=self._test_dir)
- if self._monitor_address is not None:
- self._vm_monitor = self._monitor_address
- else:
- self._vm_monitor = os.path.join(self._temp_dir,
- self._name + "-monitor.sock")
- self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
- self._qemu_log_file = open(self._qemu_log_path, 'wb')
-
- self._qmp = qmp.QEMUMonitorProtocol(self._vm_monitor,
- server=True)
-
- def _post_launch(self):
- self._qmp.accept()
-
- def _post_shutdown(self):
- if self._qemu_log_file is not None:
- self._qemu_log_file.close()
- self._qemu_log_file = None
-
- self._qemu_log_path = None
-
- if self._console_socket is not None:
- self._console_socket.close()
- self._console_socket = None
-
- if self._temp_dir is not None:
- shutil.rmtree(self._temp_dir)
- self._temp_dir = None
-
- def launch(self):
- """
- Launch the VM and make sure we cleanup and expose the
- command line/output in case of exception
- """
-
- if self._launched:
- raise QEMUMachineError('VM already launched')
-
- self._iolog = None
- self._qemu_full_args = None
- try:
- self._launch()
- self._launched = True
- except:
- self.shutdown()
-
- LOG.debug('Error launching VM')
- if self._qemu_full_args:
- LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
- if self._iolog:
- LOG.debug('Output: %r', self._iolog)
- raise Exception(self._iolog)
- raise
-
- def _launch(self):
- """
- Launch the VM and establish a QMP connection
- """
- devnull = open(os.path.devnull, 'rb')
- self._pre_launch()
- self._qemu_full_args = (self._wrapper + [self._binary] +
- self._base_args() + self._args)
- LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
- self._popen = subprocess.Popen(self._qemu_full_args,
- stdin=devnull,
- stdout=self._qemu_log_file,
- stderr=subprocess.STDOUT,
- shell=False,
- close_fds=False)
- self._post_launch()
-
- def wait(self):
- """
- Wait for the VM to power off
- """
- self._popen.wait()
- self._qmp.close()
- self._load_io_log()
- self._post_shutdown()
-
- def shutdown(self):
- """
- Terminate the VM and clean up
- """
- if self.is_running():
- try:
- self._qmp.cmd('quit')
- self._qmp.close()
- except:
- self._popen.kill()
- self._popen.wait()
-
- self._load_io_log()
- self._post_shutdown()
-
- exitcode = self.exitcode()
- if exitcode is not None and exitcode < 0:
- msg = 'qemu received signal %i: %s'
- if self._qemu_full_args:
- command = ' '.join(self._qemu_full_args)
- else:
- command = ''
- LOG.warn(msg, -exitcode, command)
-
- self._launched = False
-
- def qmp(self, cmd, conv_keys=True, **args):
- """
- Invoke a QMP command and return the response dict
- """
- qmp_args = dict()
- for key, value in args.items():
- if conv_keys:
- qmp_args[key.replace('_', '-')] = value
- else:
- qmp_args[key] = value
-
- return self._qmp.cmd(cmd, args=qmp_args)
-
- def command(self, cmd, conv_keys=True, **args):
- """
- Invoke a QMP command.
- On success return the response dict.
- On failure raise an exception.
- """
- reply = self.qmp(cmd, conv_keys, **args)
- if reply is None:
- raise qmp.QMPError("Monitor is closed")
- if "error" in reply:
- raise MonitorResponseError(reply)
- return reply["return"]
-
- def get_qmp_event(self, wait=False):
- """
- Poll for one queued QMP events and return it
- """
- if len(self._events) > 0:
- return self._events.pop(0)
- return self._qmp.pull_event(wait=wait)
-
- def get_qmp_events(self, wait=False):
- """
- Poll for queued QMP events and return a list of dicts
- """
- events = self._qmp.get_events(wait=wait)
- events.extend(self._events)
- del self._events[:]
- self._qmp.clear_events()
- return events
-
- @staticmethod
- def event_match(event, match=None):
- """
- Check if an event matches optional match criteria.
-
- The match criteria takes the form of a matching subdict. The event is
- checked to be a superset of the subdict, recursively, with matching
- values whenever the subdict values are not None.
-
- This has a limitation that you cannot explicitly check for None values.
-
- Examples, with the subdict queries on the left:
- - None matches any object.
- - {"foo": None} matches {"foo": {"bar": 1}}
- - {"foo": None} matches {"foo": 5}
- - {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
- - {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
- """
- if match is None:
- return True
-
- try:
- for key in match:
- if key in event:
- if not QEMUMachine.event_match(event[key], match[key]):
- return False
- else:
- return False
- return True
- except TypeError:
- # either match or event wasn't iterable (not a dict)
- return match == event
-
- def event_wait(self, name, timeout=60.0, match=None):
- """
- event_wait waits for and returns a named event from QMP with a timeout.
-
- name: The event to wait for.
- timeout: QEMUMonitorProtocol.pull_event timeout parameter.
- match: Optional match criteria. See event_match for details.
- """
- return self.events_wait([(name, match)], timeout)
-
- def events_wait(self, events, timeout=60.0):
- """
- events_wait waits for and returns a named event from QMP with a timeout.
-
- events: a sequence of (name, match_criteria) tuples.
- The match criteria are optional and may be None.
- See event_match for details.
- timeout: QEMUMonitorProtocol.pull_event timeout parameter.
- """
-
- def _match(event):
- for name, match in events:
- if (event['event'] == name and
- self.event_match(event, match)):
- return True
- return False
-
- # Search cached events
- for event in self._events:
- if _match(event):
- self._events.remove(event)
- return event
-
- # Poll for new events
- while True:
- event = self._qmp.pull_event(wait=timeout)
- if _match(event):
- return event
- self._events.append(event)
-
- return None
-
- def get_log(self):
- """
- After self.shutdown or failed qemu execution, this returns the output
- of the qemu process.
- """
- return self._iolog
-
- def add_args(self, *args):
- """
- Adds to the list of extra arguments to be given to the QEMU binary
- """
- self._args.extend(args)
-
- def set_machine(self, machine_type):
- """
- Sets the machine type
-
- If set, the machine type will be added to the base arguments
- of the resulting QEMU command line.
- """
- self._machine = machine_type
-
- def set_console(self, device_type=None):
- """
- Sets the device type for a console device
-
- If set, the console device and a backing character device will
- be added to the base arguments of the resulting QEMU command
- line.
-
- This is a convenience method that will either use the provided
- device type, or default to a "-serial chardev:console" command
- line argument.
-
- The actual setting of command line arguments will be be done at
- machine launch time, as it depends on the temporary directory
- to be created.
-
- @param device_type: the device type, such as "isa-serial". If
- None is given (the default value) a "-serial
- chardev:console" command line argument will
- be used instead, resorting to the machine's
- default device type.
- """
- self._console_set = True
- self._console_device_type = device_type
-
- @property
- def console_socket(self):
- """
- Returns a socket connected to the console
- """
- if self._console_socket is None:
- self._console_socket = socket.socket(socket.AF_UNIX,
- socket.SOCK_STREAM)
- self._console_socket.connect(self._console_address)
- return self._console_socket
diff --git a/ucloud/host/qmp/qmp.py b/ucloud/host/qmp/qmp.py
deleted file mode 100755
index bf35d71..0000000
--- a/ucloud/host/qmp/qmp.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# QEMU Monitor Protocol Python class
-#
-# Copyright (C) 2009, 2010 Red Hat Inc.
-#
-# Authors:
-# Luiz Capitulino
-#
-# This work is licensed under the terms of the GNU GPL, version 2. See
-# the COPYING file in the top-level directory.
-
-import errno
-import json
-import logging
-import socket
-
-
-class QMPError(Exception):
- pass
-
-
-class QMPConnectError(QMPError):
- pass
-
-
-class QMPCapabilitiesError(QMPError):
- pass
-
-
-class QMPTimeoutError(QMPError):
- pass
-
-
-class QEMUMonitorProtocol(object):
- #: Logger object for debugging messages
- logger = logging.getLogger('QMP')
- #: Socket's error class
- error = socket.error
- #: Socket's timeout
- timeout = socket.timeout
-
- def __init__(self, address, server=False):
- """
- Create a QEMUMonitorProtocol class.
-
- @param address: QEMU address, can be either a unix socket path (string)
- or a tuple in the form ( address, port ) for a TCP
- connection
- @param server: server mode listens on the socket (bool)
- @raise socket.error on socket connection errors
- @note No connection is established, this is done by the connect() or
- accept() methods
- """
- self.__events = []
- self.__address = address
- self.__sock = self.__get_sock()
- self.__sockfile = None
- if server:
- self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.__sock.bind(self.__address)
- self.__sock.listen(1)
-
- def __get_sock(self):
- if isinstance(self.__address, tuple):
- family = socket.AF_INET
- else:
- family = socket.AF_UNIX
- return socket.socket(family, socket.SOCK_STREAM)
-
- def __negotiate_capabilities(self):
- greeting = self.__json_read()
- if greeting is None or "QMP" not in greeting:
- raise QMPConnectError
- # Greeting seems ok, negotiate capabilities
- resp = self.cmd('qmp_capabilities')
- if "return" in resp:
- return greeting
- raise QMPCapabilitiesError
-
- def __json_read(self, only_event=False):
- while True:
- data = self.__sockfile.readline()
- if not data:
- return
- resp = json.loads(data)
- if 'event' in resp:
- self.logger.debug("<<< %s", resp)
- self.__events.append(resp)
- if not only_event:
- continue
- return resp
-
- def __get_events(self, wait=False):
- """
- Check for new events in the stream and cache them in __events.
-
- @param wait (bool): block until an event is available.
- @param wait (float): If wait is a float, treat it as a timeout value.
-
- @raise QMPTimeoutError: If a timeout float is provided and the timeout
- period elapses.
- @raise QMPConnectError: If wait is True but no events could be
- retrieved or if some other error occurred.
- """
-
- # Check for new events regardless and pull them into the cache:
- self.__sock.setblocking(0)
- try:
- self.__json_read()
- except socket.error as err:
- if err[0] == errno.EAGAIN:
- # No data available
- pass
- self.__sock.setblocking(1)
-
- # Wait for new events, if needed.
- # if wait is 0.0, this means "no wait" and is also implicitly false.
- if not self.__events and wait:
- if isinstance(wait, float):
- self.__sock.settimeout(wait)
- try:
- ret = self.__json_read(only_event=True)
- except socket.timeout:
- raise QMPTimeoutError("Timeout waiting for event")
- except:
- raise QMPConnectError("Error while reading from socket")
- if ret is None:
- raise QMPConnectError("Error while reading from socket")
- self.__sock.settimeout(None)
-
- def connect(self, negotiate=True):
- """
- Connect to the QMP Monitor and perform capabilities negotiation.
-
- @return QMP greeting dict
- @raise socket.error on socket connection errors
- @raise QMPConnectError if the greeting is not received
- @raise QMPCapabilitiesError if fails to negotiate capabilities
- """
- self.__sock.connect(self.__address)
- self.__sockfile = self.__sock.makefile()
- if negotiate:
- return self.__negotiate_capabilities()
-
- def accept(self):
- """
- Await connection from QMP Monitor and perform capabilities negotiation.
-
- @return QMP greeting dict
- @raise socket.error on socket connection errors
- @raise QMPConnectError if the greeting is not received
- @raise QMPCapabilitiesError if fails to negotiate capabilities
- """
- self.__sock.settimeout(15)
- self.__sock, _ = self.__sock.accept()
- self.__sockfile = self.__sock.makefile()
- return self.__negotiate_capabilities()
-
- def cmd_obj(self, qmp_cmd):
- """
- Send a QMP command to the QMP Monitor.
-
- @param qmp_cmd: QMP command to be sent as a Python dict
- @return QMP response as a Python dict or None if the connection has
- been closed
- """
- self.logger.debug(">>> %s", qmp_cmd)
- try:
- self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
- except socket.error as err:
- if err[0] == errno.EPIPE:
- return
- raise socket.error(err)
- resp = self.__json_read()
- self.logger.debug("<<< %s", resp)
- return resp
-
- def cmd(self, name, args=None, cmd_id=None):
- """
- Build a QMP command and send it to the QMP Monitor.
-
- @param name: command name (string)
- @param args: command arguments (dict)
- @param cmd_id: command id (dict, list, string or int)
- """
- qmp_cmd = {'execute': name}
- if args:
- qmp_cmd['arguments'] = args
- if cmd_id:
- qmp_cmd['id'] = cmd_id
- return self.cmd_obj(qmp_cmd)
-
- def command(self, cmd, **kwds):
- """
- Build and send a QMP command to the monitor, report errors if any
- """
- ret = self.cmd(cmd, kwds)
- if "error" in ret:
- raise Exception(ret['error']['desc'])
- return ret['return']
-
- def pull_event(self, wait=False):
- """
- Pulls a single event.
-
- @param wait (bool): block until an event is available.
- @param wait (float): If wait is a float, treat it as a timeout value.
-
- @raise QMPTimeoutError: If a timeout float is provided and the timeout
- period elapses.
- @raise QMPConnectError: If wait is True but no events could be
- retrieved or if some other error occurred.
-
- @return The first available QMP event, or None.
- """
- self.__get_events(wait)
-
- if self.__events:
- return self.__events.pop(0)
- return None
-
- def get_events(self, wait=False):
- """
- Get a list of available QMP events.
-
- @param wait (bool): block until an event is available.
- @param wait (float): If wait is a float, treat it as a timeout value.
-
- @raise QMPTimeoutError: If a timeout float is provided and the timeout
- period elapses.
- @raise QMPConnectError: If wait is True but no events could be
- retrieved or if some other error occurred.
-
- @return The list of available QMP events.
- """
- self.__get_events(wait)
- return self.__events
-
- def clear_events(self):
- """
- Clear current list of pending events.
- """
- self.__events = []
-
- def close(self):
- self.__sock.close()
- self.__sockfile.close()
-
- def settimeout(self, timeout):
- self.__sock.settimeout(timeout)
-
- def get_sock_fd(self):
- return self.__sock.fileno()
-
- def is_scm_available(self):
- return self.__sock.family == socket.AF_UNIX
diff --git a/ucloud/host/virtualmachine.py b/ucloud/host/virtualmachine.py
deleted file mode 100755
index 7524083..0000000
--- a/ucloud/host/virtualmachine.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# QEMU Manual
-# https://qemu.weilnetz.de/doc/qemu-doc.html
-
-# For QEMU Monitor Protocol Commands Information, See
-# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
-
-import os
-import random
-import subprocess as sp
-import tempfile
-import time
-
-from functools import wraps
-from string import Template
-from typing import Union
-from os.path import join as join_path
-
-import bitmath
-import sshtunnel
-
-from ucloud.common.helpers import get_ipv6_address
-from ucloud.common.request import RequestEntry, RequestType
-from ucloud.common.vm import VMEntry, VMStatus
-from ucloud.config import (etcd_client, request_pool,
- running_vms, vm_pool, env_vars,
- image_storage_handler)
-from . import qmp
-from ucloud.host import logger
-
-
-class VM:
- def __init__(self, key, handle, vnc_socket_file):
- self.key = key # type: str
- self.handle = handle # type: qmp.QEMUMachine
- self.vnc_socket_file = vnc_socket_file # type: tempfile.NamedTemporaryFile
-
- def __repr__(self):
- return "VM({})".format(self.key)
-
-
-def delete_network_interface(iface):
- try:
- sp.check_output(['ip', 'link', 'del', iface])
- except Exception:
- pass
-
-
-def resolve_network(network_name, network_owner):
- network = etcd_client.get(join_path(env_vars.get("NETWORK_PREFIX"),
- network_owner,
- network_name),
- value_in_json=True)
- return network
-
-
-def delete_vm_network(vm_entry):
- try:
- for network in vm_entry.network:
- network_name = network[0]
- tap_mac = network[1]
- tap_id = network[2]
-
- delete_network_interface('tap{}'.format(tap_id))
-
- owners_vms = vm_pool.by_owner(vm_entry.owner)
- owners_running_vms = vm_pool.by_status(VMStatus.running,
- _vms=owners_vms)
-
- networks = map(lambda n: n[0],
- map(lambda vm: vm.network, owners_running_vms)
- )
- networks_in_use_by_user_vms = [vm[0] for vm in networks]
- if network_name not in networks_in_use_by_user_vms:
- network_entry = resolve_network(network[0], vm_entry.owner)
- if network_entry:
- network_type = network_entry.value["type"]
- network_id = network_entry.value["id"]
- if network_type == "vxlan":
- delete_network_interface('br{}'.format(network_id))
- delete_network_interface('vxlan{}'.format(network_id))
- except Exception:
- logger.exception("Exception in network interface deletion")
-
-
-def create_dev(script, _id, dev, ip=None):
- command = [script, _id, dev]
- if ip:
- command.append(ip)
- try:
- output = sp.check_output(command, stderr=sp.PIPE)
- except Exception as e:
- print(e.stderr)
- return None
- else:
- return output.decode("utf-8").strip()
-
-
-def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
- network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
- vxlan = create_dev(script=os.path.join(network_script_base, 'create-vxlan.sh'),
- _id=_id, dev=_dev)
- if vxlan:
- bridge = create_dev(script=os.path.join(network_script_base, 'create-bridge.sh'),
- _id=_id, dev=vxlan, ip=ip)
- if bridge:
- tap = create_dev(script=os.path.join(network_script_base, 'create-tap.sh'),
- _id=str(tap_id), dev=bridge)
- if tap:
- return tap
-
-
-def random_bytes(num=6):
- return [random.randrange(256) for _ in range(num)]
-
-
-def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
- mac = random_bytes()
- if oui:
- if type(oui) == str:
- oui = [int(chunk) for chunk in oui.split(separator)]
- mac = oui + random_bytes(num=6 - len(oui))
- else:
- if multicast:
- mac[0] |= 1 # set bit 0
- else:
- mac[0] &= ~1 # clear bit 0
- if uaa:
- mac[0] &= ~(1 << 1) # clear bit 1
- else:
- mac[0] |= 1 << 1 # set bit 1
- return separator.join(byte_fmt % b for b in mac)
-
-
-def update_radvd_conf(etcd_client):
- network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
-
- networks = {
- net.value['ipv6']: net.value['id']
- for net in etcd_client.get_prefix('/v1/network/', value_in_json=True)
- if net.value.get('ipv6')
- }
- radvd_template = open(os.path.join(network_script_base,
- 'radvd-template.conf'), 'r').read()
- radvd_template = Template(radvd_template)
-
- content = [radvd_template.safe_substitute(bridge='br{}'.format(networks[net]),
- prefix=net)
- for net in networks if networks.get(net)]
-
- with open('/etc/radvd.conf', 'w') as radvd_conf:
- radvd_conf.writelines(content)
- try:
- sp.check_output(['systemctl', 'restart', 'radvd'])
- except Exception:
- sp.check_output(['service', 'radvd', 'restart'])
-
-
-def get_start_command_args(vm_entry, vnc_sock_filename: str, migration=False, migration_port=None):
- threads_per_core = 1
- vm_memory = int(bitmath.parse_string_unsafe(vm_entry.specs["ram"]).to_MB())
- vm_cpus = int(vm_entry.specs["cpu"])
- vm_uuid = vm_entry.uuid
- vm_networks = vm_entry.network
-
- command = "-name {}_{}".format(vm_entry.owner, vm_entry.name)
-
- command += " -drive file={},format=raw,if=virtio,cache=none".format(
- image_storage_handler.qemu_path_string(vm_uuid)
- )
- command += " -device virtio-rng-pci -vnc unix:{}".format(vnc_sock_filename)
- command += " -m {} -smp cores={},threads={}".format(
- vm_memory, vm_cpus, threads_per_core
- )
-
- if migration:
- command += " -incoming tcp:[::]:{}".format(migration_port)
-
- tap = None
- for network_mac_and_tap in vm_networks:
- network_name, mac, tap = network_mac_and_tap
-
- _key = os.path.join(env_vars.get('NETWORK_PREFIX'), vm_entry.owner, network_name)
- network = etcd_client.get(_key, value_in_json=True)
- network_type = network.value["type"]
- network_id = str(network.value["id"])
- network_ipv6 = network.value["ipv6"]
-
- if network_type == "vxlan":
- tap = create_vxlan_br_tap(_id=network_id,
- _dev=env_vars.get("VXLAN_PHY_DEV"),
- tap_id=tap,
- ip=network_ipv6)
- update_radvd_conf(etcd_client)
-
- command += " -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no" \
- " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}" \
- .format(tap=tap, net_id=network_id, mac=mac)
-
- return command.split(" ")
-
-
-def create_vm_object(vm_entry, migration=False, migration_port=None):
- # NOTE: If migration suddenly stop working, having different
- # VNC unix filename on source and destination host can
- # be a possible cause of it.
-
- # REQUIREMENT: Use Unix Socket instead of TCP Port for VNC
- vnc_sock_file = tempfile.NamedTemporaryFile()
-
- qemu_args = get_start_command_args(
- vm_entry=vm_entry,
- vnc_sock_filename=vnc_sock_file.name,
- migration=migration,
- migration_port=migration_port,
- )
- qemu_machine = qmp.QEMUMachine("/usr/bin/qemu-system-x86_64", args=qemu_args)
- return VM(vm_entry.key, qemu_machine, vnc_sock_file)
-
-
-def get_vm(vm_list: list, vm_key) -> Union[VM, None]:
- return next((vm for vm in vm_list if vm.key == vm_key), None)
-
-
-def need_running_vm(func):
- @wraps(func)
- def wrapper(e):
- vm = get_vm(running_vms, e.key)
- if vm:
- try:
- status = vm.handle.command("query-status")
- logger.debug("VM Status Check - %s", status)
- except Exception as exception:
- logger.info("%s failed - VM %s %s", func.__name__, e, exception)
- else:
- return func(e)
-
- return None
- else:
- logger.info("%s failed because VM %s is not running", func.__name__, e.key)
- return None
-
- return wrapper
-
-
-def create(vm_entry: VMEntry):
- if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
- # File Already exists. No Problem Continue
- logger.debug("Image for vm %s exists", vm_entry.uuid)
- else:
- vm_hdd = int(bitmath.parse_string_unsafe(vm_entry.specs["os-ssd"]).to_MB())
- if image_storage_handler.make_vm_image(src=vm_entry.image_uuid, dest=vm_entry.uuid):
- if not image_storage_handler.resize_vm_image(path=vm_entry.uuid, size=vm_hdd):
- vm_entry.status = VMStatus.error
- else:
- logger.info("New VM Created")
-
-
-def start(vm_entry: VMEntry, destination_host_key=None, migration_port=None):
- _vm = get_vm(running_vms, vm_entry.key)
-
- # VM already running. No need to proceed further.
- if _vm:
- logger.info("VM %s already running" % vm_entry.uuid)
- return
- else:
- logger.info("Trying to start %s" % vm_entry.uuid)
- if destination_host_key:
- launch_vm(vm_entry, migration=True, migration_port=migration_port,
- destination_host_key=destination_host_key)
- else:
- create(vm_entry)
- launch_vm(vm_entry)
-
-
-@need_running_vm
-def stop(vm_entry):
- vm = get_vm(running_vms, vm_entry.key)
- vm.handle.shutdown()
- if not vm.handle.is_running():
- vm_entry.add_log("Shutdown successfully")
- vm_entry.declare_stopped()
- vm_pool.put(vm_entry)
- running_vms.remove(vm)
- delete_vm_network(vm_entry)
-
-
-def delete(vm_entry):
- logger.info("Deleting VM | %s", vm_entry)
- stop(vm_entry)
-
- if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
- r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
- if r_status:
- etcd_client.client.delete(vm_entry.key)
- else:
- etcd_client.client.delete(vm_entry.key)
-
-def transfer(request_event):
- # This function would run on source host i.e host on which the vm
- # is running initially. This host would be responsible for transferring
- # vm state to destination host.
-
- _host, _port = request_event.parameters["host"], request_event.parameters["port"]
- _uuid = request_event.uuid
- _destination = request_event.destination_host_key
- vm = get_vm(running_vms, join_path(env_vars.get('VM_PREFIX'), _uuid))
-
- if vm:
- tunnel = sshtunnel.SSHTunnelForwarder(
- _host,
- ssh_username=env_vars.get("ssh_username"),
- ssh_pkey=env_vars.get("ssh_pkey"),
- remote_bind_address=("127.0.0.1", _port),
- ssh_proxy_enabled=True,
- ssh_proxy=(_host, 22)
- )
- try:
- tunnel.start()
- except sshtunnel.BaseSSHTunnelForwarderError:
- logger.exception("Couldn't establish connection to (%s, 22)", _host)
- else:
- vm.handle.command(
- "migrate", uri="tcp:0.0.0.0:{}".format(tunnel.local_bind_port)
- )
-
- status = vm.handle.command("query-migrate")["status"]
- while status not in ["failed", "completed"]:
- time.sleep(2)
- status = vm.handle.command("query-migrate")["status"]
-
- with vm_pool.get_put(request_event.uuid) as source_vm:
- if status == "failed":
- source_vm.add_log("Migration Failed")
- elif status == "completed":
- # If VM is successfully migrated then shutdown the VM
- # on this host and update hostname to destination host key
- source_vm.add_log("Successfully migrated")
- source_vm.hostname = _destination
- running_vms.remove(vm)
- vm.handle.shutdown()
- source_vm.in_migration = False # VM transfer finished
- finally:
- tunnel.close()
-
-
-def launch_vm(vm_entry, migration=False, migration_port=None, destination_host_key=None):
- logger.info("Starting %s" % vm_entry.key)
-
- vm = create_vm_object(vm_entry, migration=migration, migration_port=migration_port)
- try:
- vm.handle.launch()
- except Exception:
- logger.exception("Error Occured while starting VM")
- vm.handle.shutdown()
-
- if migration:
- # We don't care whether MachineError or any other error occurred
- pass
- else:
- # Error during typical launch of a vm
- vm.handle.shutdown()
- vm_entry.declare_killed()
- vm_pool.put(vm_entry)
- else:
- vm_entry.vnc_socket = vm.vnc_socket_file.name
- running_vms.append(vm)
-
- if migration:
- vm_entry.in_migration = True
- r = RequestEntry.from_scratch(
- type=RequestType.TransferVM,
- hostname=vm_entry.hostname,
- parameters={"host": get_ipv6_address(), "port": migration_port},
- uuid=vm_entry.uuid,
- destination_host_key=destination_host_key,
- request_prefix=env_vars.get("REQUEST_PREFIX")
- )
- request_pool.put(r)
- else:
- # Typical launching of a vm
- vm_entry.status = VMStatus.running
- vm_entry.add_log("Started successfully")
-
- vm_pool.put(vm_entry)
diff --git a/ucloud/imagescanner/main.py b/ucloud/imagescanner/main.py
deleted file mode 100755
index 20ce9d5..0000000
--- a/ucloud/imagescanner/main.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import json
-import os
-import subprocess
-
-from os.path import join as join_path
-from ucloud.config import etcd_client, env_vars, image_storage_handler
-from ucloud.imagescanner import logger
-
-
-def qemu_img_type(path):
- qemu_img_info_command = ["qemu-img", "info", "--output", "json", path]
- try:
- qemu_img_info = subprocess.check_output(qemu_img_info_command)
- except Exception as e:
- logger.exception(e)
- return None
- else:
- qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
- return qemu_img_info["format"]
-
-
-def main():
- # We want to get images entries that requests images to be created
- images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
- images_to_be_created = list(filter(lambda im: im.value['status'] == 'TO_BE_CREATED', images))
-
- for image in images_to_be_created:
- try:
- image_uuid = image.key.split('/')[-1]
- image_owner = image.value['owner']
- image_filename = image.value['filename']
- image_store_name = image.value['store_name']
- image_full_path = join_path(env_vars.get('BASE_DIR'), image_owner, image_filename)
-
- image_stores = etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True)
- user_image_store = next(filter(
- lambda s, store_name=image_store_name: s.value["name"] == store_name,
- image_stores
- ))
-
- image_store_pool = user_image_store.value['attributes']['pool']
-
- except Exception as e:
- logger.exception(e)
- else:
- # At least our basic data is available
- qemu_img_convert_command = ["qemu-img", "convert", "-f", "qcow2",
- "-O", "raw", image_full_path, "image.raw"]
-
- if qemu_img_type(image_full_path) == "qcow2":
- try:
- # Convert .qcow2 to .raw
- subprocess.check_output(qemu_img_convert_command)
- except Exception as e:
- logger.exception(e)
- else:
- # Import and Protect
- r_status = image_storage_handler.import_image(src="image.raw",
- dest=image_uuid,
- protect=True)
- if r_status:
- # Everything is successfully done
- image.value["status"] = "CREATED"
- etcd_client.put(image.key, json.dumps(image.value))
-
- else:
- # The user provided image is either not found or of invalid format
- image.value["status"] = "INVALID_IMAGE"
- etcd_client.put(image.key, json.dumps(image.value))
-
- try:
- os.remove("image.raw")
- except Exception:
- pass
-
-
-if __name__ == "__main__":
- main()
diff --git a/ucloud/metadata/main.py b/ucloud/metadata/main.py
deleted file mode 100644
index e7cb33b..0000000
--- a/ucloud/metadata/main.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import os
-
-from flask import Flask, request
-from flask_restful import Resource, Api
-
-from ucloud.config import etcd_client, env_vars, vm_pool
-
-app = Flask(__name__)
-api = Api(app)
-
-
-def get_vm_entry(mac_addr):
- return next(filter(lambda vm: mac_addr in list(zip(*vm.network))[1], vm_pool.vms), None)
-
-
-# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
-def ipv62mac(ipv6):
- # remove subnet info if given
- subnet_index = ipv6.find('/')
- if subnet_index != -1:
- ipv6 = ipv6[:subnet_index]
-
- ipv6_parts = ipv6.split(':')
- mac_parts = list()
- for ipv6_part in ipv6_parts[-4:]:
- while len(ipv6_part) < 4:
- ipv6_part = '0' + ipv6_part
- mac_parts.append(ipv6_part[:2])
- mac_parts.append(ipv6_part[-2:])
-
- # modify parts to match MAC value
- mac_parts[0] = '%02x' % (int(mac_parts[0], 16) ^ 2)
- del mac_parts[4]
- del mac_parts[3]
- return ':'.join(mac_parts)
-
-
-class Root(Resource):
- @staticmethod
- def get():
- data = get_vm_entry(ipv62mac(request.remote_addr))
-
- if not data:
- return {'message': 'Metadata for such VM does not exists.'}, 404
- else:
-
- # {env_vars.get('USER_PREFIX')}/{realm}/{name}/key
- etcd_key = os.path.join(env_vars.get('USER_PREFIX'), data.value['owner_realm'],
- data.value['owner'], 'key')
- etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
- user_personal_ssh_keys = [key.value for key in etcd_entry]
- data.value['metadata']['ssh-keys'] += user_personal_ssh_keys
- return data.value['metadata'], 200
-
- @staticmethod
- def post():
- return {'message': 'Previous Implementation is deprecated.'}
- # data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
- # print(data)
- # if data:
- # for k in request.json:
- # if k not in data.value:
- # data.value[k] = request.json[k]
- # if k.endswith("-list"):
- # data.value[k] = [request.json[k]]
- # else:
- # if k.endswith("-list"):
- # data.value[k].append(request.json[k])
- # else:
- # data.value[k] = request.json[k]
- # etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
- # data.value, value_in_json=True)
- # else:
- # data = {}
- # for k in request.json:
- # data[k] = request.json[k]
- # if k.endswith("-list"):
- # data[k] = [request.json[k]]
- # etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
- # data, value_in_json=True)
-
-
-api.add_resource(Root, '/')
-
-
-def main():
- app.run(debug=True, host="::", port="80")
-
-
-if __name__ == '__main__':
- main()
diff --git a/ucloud/sanity_checks.py b/ucloud/sanity_checks.py
deleted file mode 100644
index 143f767..0000000
--- a/ucloud/sanity_checks.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import sys
-import subprocess as sp
-
-from os.path import isdir
-from ucloud.config import env_vars
-
-
-def check():
- #########################
- # ucloud-image-scanner #
- #########################
- if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('IMAGE_DIR')):
- print("You have set STORAGE_BACKEND to filesystem. So,"
- "the {} must exists. But, it don't".format(env_vars.get('IMAGE_DIR')))
- sys.exit(1)
-
- try:
- sp.check_output(['which', 'qemu-img'])
- except Exception:
- print("qemu-img missing")
- sys.exit(1)
-
- ###############
- # ucloud-host #
- ###############
-
- if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('VM_DIR')):
- print("You have set STORAGE_BACKEND to filesystem. So, the vm directory mentioned"
- " in .env file must exists. But, it don't.")
- sys.exit(1)
-
-if __name__ == "__main__":
- check()
\ No newline at end of file
diff --git a/ucloud/scheduler/__init__.py b/ucloud/scheduler/__init__.py
deleted file mode 100644
index 95e1be0..0000000
--- a/ucloud/scheduler/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import logging
-
-logger = logging.getLogger(__name__)
\ No newline at end of file
diff --git a/ucloud/scheduler/main.py b/ucloud/scheduler/main.py
deleted file mode 100755
index e2c975a..0000000
--- a/ucloud/scheduler/main.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# TODO
-# 1. send an email to an email address defined by env['admin-email']
-# if resources are finished
-# 2. Introduce a status endpoint of the scheduler -
-# maybe expose a prometheus compatible output
-
-from ucloud.common.request import RequestEntry, RequestType
-from ucloud.config import etcd_client
-from ucloud.config import host_pool, request_pool, vm_pool, env_vars
-from .helper import (get_suitable_host, dead_host_mitigation, dead_host_detection,
- assign_host, NoSuitableHostFound)
-from . import logger
-
-
-def main():
- logger.info("%s SESSION STARTED %s", '*' * 5, '*' * 5)
-
- pending_vms = []
-
- for request_iterator in [
- etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
- etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=5, value_in_json=True),
- ]:
- for request_event in request_iterator:
- request_entry = RequestEntry(request_event)
- # Never Run time critical mechanism inside timeout
- # mechanism because timeout mechanism only comes
- # when no other event is happening. It means under
- # heavy load there would not be a timeout event.
- if request_entry.type == "TIMEOUT":
-
- # Detect hosts that are dead and set their status
- # to "DEAD", and their VMs' status to "KILLED"
- dead_hosts = dead_host_detection()
- if dead_hosts:
- logger.debug("Dead hosts: %s", dead_hosts)
- dead_host_mitigation(dead_hosts)
-
- # If there are VMs that weren't assigned a host
- # because there wasn't a host available which
- # meets requirement of that VM then we would
- # create a new ScheduleVM request for that VM
- # on our behalf.
- while pending_vms:
- pending_vm_entry = pending_vms.pop()
- r = RequestEntry.from_scratch(type="ScheduleVM",
- uuid=pending_vm_entry.uuid,
- hostname=pending_vm_entry.hostname,
- request_prefix=env_vars.get("REQUEST_PREFIX"))
- request_pool.put(r)
-
- elif request_entry.type == RequestType.ScheduleVM:
- logger.debug("%s, %s", request_entry.key, request_entry.value)
-
- vm_entry = vm_pool.get(request_entry.uuid)
- if vm_entry is None:
- logger.info("Trying to act on {} but it is deleted".format(request_entry.uuid))
- continue
- etcd_client.client.delete(request_entry.key) # consume Request
-
- # If the Request is about a VM which is labelled as "migration"
- # and has a destination
- if hasattr(request_entry, "migration") and request_entry.migration \
- and hasattr(request_entry, "destination") and request_entry.destination:
- try:
- get_suitable_host(vm_specs=vm_entry.specs,
- hosts=[host_pool.get(request_entry.destination)])
- except NoSuitableHostFound:
- logger.info("Requested destination host doesn't have enough capacity"
- "to hold %s" % vm_entry.uuid)
- else:
- r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
- uuid=request_entry.uuid,
- destination=request_entry.destination,
- request_prefix=env_vars.get("REQUEST_PREFIX"))
- request_pool.put(r)
-
- # If the Request is about a VM that just want to get started/created
- else:
- # assign_host only returns None when we couldn't be able to assign
- # a host to a VM because of resource constraints
- try:
- assign_host(vm_entry)
- except NoSuitableHostFound:
- vm_entry.add_log("Can't schedule VM. No Resource Left.")
- vm_pool.put(vm_entry)
-
- pending_vms.append(vm_entry)
- logger.info("No Resource Left. Emailing admin....")
-
-
-if __name__ == "__main__":
- main()
diff --git a/uncloud/__init__.py b/uncloud/__init__.py
new file mode 100644
index 0000000..2920f47
--- /dev/null
+++ b/uncloud/__init__.py
@@ -0,0 +1,2 @@
+class UncloudException(Exception):
+ pass
diff --git a/ucloud/api/README.md b/uncloud/api/README.md
similarity index 100%
rename from ucloud/api/README.md
rename to uncloud/api/README.md
diff --git a/ucloud/api/__init__.py b/uncloud/api/__init__.py
similarity index 100%
rename from ucloud/api/__init__.py
rename to uncloud/api/__init__.py
diff --git a/ucloud/api/common_fields.py b/uncloud/api/common_fields.py
similarity index 72%
rename from ucloud/api/common_fields.py
rename to uncloud/api/common_fields.py
index e9903ac..ba9fb37 100755
--- a/ucloud/api/common_fields.py
+++ b/uncloud/api/common_fields.py
@@ -1,6 +1,6 @@
import os
-from ucloud.config import etcd_client, env_vars
+from uncloud.common.shared import shared
class Optional:
@@ -19,12 +19,16 @@ class Field:
def is_valid(self):
if self.value == KeyError:
- self.add_error("'{}' field is a required field".format(self.name))
+ self.add_error(
+ "'{}' field is a required field".format(self.name)
+ )
else:
if isinstance(self.value, Optional):
pass
elif not isinstance(self.value, self.type):
- self.add_error("Incorrect Type for '{}' field".format(self.name))
+ self.add_error(
+ "Incorrect Type for '{}' field".format(self.name)
+ )
else:
self.validation()
@@ -48,6 +52,8 @@ class VmUUIDField(Field):
self.validation = self.vm_uuid_validation
def vm_uuid_validation(self):
- r = etcd_client.get(os.path.join(env_vars.get('VM_PREFIX'), self.uuid))
+ r = shared.etcd_client.get(
+ os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
+ )
if not r:
self.add_error("VM with uuid {} does not exists".format(self.uuid))
diff --git a/uncloud/api/create_image_store.py b/uncloud/api/create_image_store.py
new file mode 100755
index 0000000..90e0f92
--- /dev/null
+++ b/uncloud/api/create_image_store.py
@@ -0,0 +1,19 @@
+import json
+import os
+
+from uuid import uuid4
+
+from uncloud.common.shared import shared
+
+data = {
+ 'is_public': True,
+ 'type': 'ceph',
+ 'name': 'images',
+ 'description': 'first ever public image-store',
+ 'attributes': {'list': [], 'key': [], 'pool': 'images'},
+}
+
+shared.etcd_client.put(
+ os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
+ json.dumps(data),
+)
diff --git a/ucloud/api/helper.py b/uncloud/api/helper.py
similarity index 55%
rename from ucloud/api/helper.py
rename to uncloud/api/helper.py
index 63d2f90..8ceb3a6 100755
--- a/ucloud/api/helper.py
+++ b/uncloud/api/helper.py
@@ -1,48 +1,51 @@
import binascii
import ipaddress
import random
-import subprocess as sp
-
+import logging
import requests
from pyotp import TOTP
-from ucloud.config import vm_pool, env_vars
+from uncloud.common.shared import shared
+
+logger = logging.getLogger(__name__)
def check_otp(name, realm, token):
try:
data = {
- "auth_name": env_vars.get("AUTH_NAME"),
- "auth_token": TOTP(env_vars.get("AUTH_SEED")).now(),
- "auth_realm": env_vars.get("AUTH_REALM"),
+ "auth_name": shared.settings["otp"]["auth_name"],
+ "auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
+ "auth_realm": shared.settings["otp"]["auth_realm"],
"name": name,
"realm": realm,
"token": token,
}
- except binascii.Error:
+ except binascii.Error as err:
+ logger.error(
+ "Cannot compute OTP for seed: {}".format(
+ shared.settings["otp"]["auth_seed"]
+ )
+ )
return 400
response = requests.post(
- "{OTP_SERVER}{OTP_VERIFY_ENDPOINT}".format(
- OTP_SERVER=env_vars.get("OTP_SERVER", ""),
- OTP_VERIFY_ENDPOINT=env_vars.get("OTP_VERIFY_ENDPOINT", "verify/"),
- ),
- json=data,
+ shared.settings["otp"]["verification_controller_url"], json=data
)
return response.status_code
def resolve_vm_name(name, owner):
"""Return UUID of Virtual Machine of name == name and owner == owner
-
+
Input: name of vm, owner of vm.
Output: uuid of vm if found otherwise None
"""
result = next(
filter(
- lambda vm: vm.value["owner"] == owner and vm.value["name"] == name,
- vm_pool.vms,
+ lambda vm: vm.value["owner"] == owner
+ and vm.value["name"] == name,
+ shared.vm_pool.vms,
),
None,
)
@@ -54,7 +57,7 @@ def resolve_vm_name(name, owner):
def resolve_image_name(name, etcd_client):
"""Return image uuid given its name and its store
-
+
* If the provided name is not in correct format
i.e {store_name}:{image_name} return ValueError
* If no such image found then return KeyError
@@ -70,26 +73,35 @@ def resolve_image_name(name, etcd_client):
"""
Examples, where it would work and where it would raise exception
"images:alpine" --> ["images", "alpine"]
-
+
"images" --> ["images"] it would raise Exception as non enough value to unpack
-
+
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
as too many values to unpack
"""
store_name, image_name = store_name_and_image_name
except Exception:
- raise ValueError("Image name not in correct format i.e {store_name}:{image_name}")
+ raise ValueError(
+ "Image name not in correct format i.e {store_name}:{image_name}"
+ )
- images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
+ images = etcd_client.get_prefix(
+ shared.settings["etcd"]["image_prefix"], value_in_json=True
+ )
# Try to find image with name == image_name and store_name == store_name
try:
- image = next(filter(lambda im: im.value['name'] == image_name
- and im.value['store_name'] == store_name, images))
+ image = next(
+ filter(
+ lambda im: im.value["name"] == image_name
+ and im.value["store_name"] == store_name,
+ images,
+ )
+ )
except StopIteration:
raise KeyError("No image with name {} found.".format(name))
else:
- image_uuid = image.key.split('/')[-1]
+ image_uuid = image.key.split("/")[-1]
return image_uuid
@@ -98,7 +110,7 @@ def random_bytes(num=6):
return [random.randrange(256) for _ in range(num)]
-def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
+def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
mac = random_bytes()
if oui:
if type(oui) == str:
@@ -116,36 +128,6 @@ def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='
return separator.join(byte_fmt % b for b in mac)
-def get_ip_addr(mac_address, device):
- """Return IP address of a device provided its mac address / link local address
- and the device with which it is connected.
-
- For Example, if we call get_ip_addr(mac_address="52:54:00:12:34:56", device="br0")
- the following two scenarios can happen
- 1. It would return None if we can't be able to find device whose mac_address is equal
- to the arg:mac_address or the mentioned arg:device does not exists or the ip address
- we found is local.
- 2. It would return ip_address of device whose mac_address is equal to arg:mac_address
- and is connected/neighbor of arg:device
- """
- try:
- output = sp.check_output(['ip', '-6', 'neigh', 'show', 'dev', device], stderr=sp.PIPE)
- except sp.CalledProcessError:
- return None
- else:
- result = []
- output = output.strip().decode("utf-8")
- output = output.split("\n")
- for entry in output:
- entry = entry.split()
- if entry:
- ip = ipaddress.ip_address(entry[0])
- mac = entry[2]
- if ip.is_global and mac_address == mac:
- result.append(ip)
- return result
-
-
def mac2ipv6(mac, prefix):
# only accept MACs separated by a colon
parts = mac.split(":")
@@ -158,8 +140,9 @@ def mac2ipv6(mac, prefix):
# format output
ipv6_parts = [str(0)] * 4
for i in range(0, len(parts), 2):
- ipv6_parts.append("".join(parts[i:i + 2]))
+ ipv6_parts.append("".join(parts[i : i + 2]))
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
prefix = ipaddress.IPv6Address(prefix)
return str(prefix + int(lower_part))
+
diff --git a/uncloud/api/main.py b/uncloud/api/main.py
new file mode 100644
index 0000000..73e8e21
--- /dev/null
+++ b/uncloud/api/main.py
@@ -0,0 +1,600 @@
+import json
+import pynetbox
+import logging
+import argparse
+
+from uuid import uuid4
+from os.path import join as join_path
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+from werkzeug.exceptions import HTTPException
+
+from uncloud.common.shared import shared
+
+from uncloud.common import counters
+from uncloud.common.vm import VMStatus
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.api import schemas
+from uncloud.api.helper import generate_mac, mac2ipv6
+from uncloud import UncloudException
+
+logger = logging.getLogger(__name__)
+
+app = Flask(__name__)
+api = Api(app)
+app.logger.handlers.clear()
+
+arg_parser = argparse.ArgumentParser('api', add_help=False)
+arg_parser.add_argument('--port', '-p')
+
+
+@app.errorhandler(Exception)
+def handle_exception(e):
+ app.logger.error(e)
+ # pass through HTTP errors
+ if isinstance(e, HTTPException):
+ return e
+
+ # now you're handling non-HTTP exceptions only
+ return {'message': 'Server Error'}, 500
+
+
+class CreateVM(Resource):
+ """API Request to Handle Creation of VM"""
+
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateVMSchema(data)
+ if validator.is_valid():
+ vm_uuid = uuid4().hex
+ vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
+ specs = {
+ 'cpu': validator.specs['cpu'],
+ 'ram': validator.specs['ram'],
+ 'os-ssd': validator.specs['os-ssd'],
+ 'hdd': validator.specs['hdd'],
+ }
+ macs = [generate_mac() for _ in range(len(data['network']))]
+ tap_ids = [
+ counters.increment_etcd_counter(
+ shared.etcd_client, shared.settings['etcd']['tap_counter']
+ )
+ for _ in range(len(data['network']))
+ ]
+ vm_entry = {
+ 'name': data['vm_name'],
+ 'owner': data['name'],
+ 'owner_realm': data['realm'],
+ 'specs': specs,
+ 'hostname': '',
+ 'status': VMStatus.stopped,
+ 'image_uuid': validator.image_uuid,
+ 'log': [],
+ 'vnc_socket': '',
+ 'network': list(zip(data['network'], macs, tap_ids)),
+ 'metadata': {'ssh-keys': []},
+ 'in_migration': False,
+ }
+ shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
+
+ # Create ScheduleVM Request
+ r = RequestEntry.from_scratch(
+ type=RequestType.ScheduleVM,
+ uuid=vm_uuid,
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+ shared.request_pool.put(r)
+
+ return {'message': 'VM Creation Queued'}, 200
+ return validator.get_errors(), 400
+
+
+class VmStatus(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VMStatusSchema(data)
+ if validator.is_valid():
+ vm = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
+ )
+ vm_value = vm.value.copy()
+ vm_value['ip'] = []
+ for network_mac_and_tap in vm.network:
+ network_name, mac, tap = network_mac_and_tap
+ network = shared.etcd_client.get(
+ join_path(
+ shared.settings['etcd']['network_prefix'],
+ data['name'],
+ network_name,
+ ),
+ value_in_json=True,
+ )
+ ipv6_addr = (
+ network.value.get('ipv6').split('::')[0] + '::'
+ )
+ vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
+ vm.value = vm_value
+ return vm.value
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateImage(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateImageSchema(data)
+ if validator.is_valid():
+ file_entry = shared.etcd_client.get(
+ join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
+ )
+ file_entry_value = json.loads(file_entry.value)
+
+ image_entry_json = {
+ 'status': 'TO_BE_CREATED',
+ 'owner': file_entry_value['owner'],
+ 'filename': file_entry_value['filename'],
+ 'name': data['name'],
+ 'store_name': data['image_store'],
+ 'visibility': 'public',
+ }
+ shared.etcd_client.put(
+ join_path(
+ shared.settings['etcd']['image_prefix'], data['uuid']
+ ),
+ json.dumps(image_entry_json),
+ )
+
+ return {'message': 'Image queued for creation.'}
+ return validator.get_errors(), 400
+
+
+class ListPublicImages(Resource):
+ @staticmethod
+ def get():
+ images = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['image_prefix'], value_in_json=True
+ )
+ r = {'images': []}
+ for image in images:
+ image_key = '{}:{}'.format(
+ image.value['store_name'], image.value['name']
+ )
+ r['images'].append(
+ {'name': image_key, 'status': image.value['status']}
+ )
+ return r, 200
+
+
+class VMAction(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmActionSchema(data)
+
+ if validator.is_valid():
+ vm_entry = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
+ )
+ action = data['action']
+
+ if action == 'start':
+ action = 'schedule'
+
+ if action == 'delete' and vm_entry.hostname == '':
+ if shared.storage_handler.is_vm_image_exists(
+ vm_entry.uuid
+ ):
+ r_status = shared.storage_handler.delete_vm_image(
+ vm_entry.uuid
+ )
+ if r_status:
+ shared.etcd_client.client.delete(vm_entry.key)
+ return {'message': 'VM successfully deleted'}
+ else:
+ logger.error(
+ 'Some Error Occurred while deleting VM'
+ )
+ return {'message': 'VM deletion unsuccessfull'}
+ else:
+ shared.etcd_client.client.delete(vm_entry.key)
+ return {'message': 'VM successfully deleted'}
+
+ r = RequestEntry.from_scratch(
+ type='{}VM'.format(action.title()),
+ uuid=data['uuid'],
+ hostname=vm_entry.hostname,
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+ shared.request_pool.put(r)
+ return (
+ {'message': 'VM {} Queued'.format(action.title())},
+ 200,
+ )
+ else:
+ return validator.get_errors(), 400
+
+
+class VMMigration(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmMigrationSchema(data)
+
+ if validator.is_valid():
+ vm = shared.vm_pool.get(data['uuid'])
+ r = RequestEntry.from_scratch(
+ type=RequestType.InitVMMigration,
+ uuid=vm.uuid,
+ hostname=join_path(
+ shared.settings['etcd']['host_prefix'],
+ validator.destination.value,
+ ),
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+
+ shared.request_pool.put(r)
+ return (
+ {'message': 'VM Migration Initialization Queued'},
+ 200,
+ )
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserVM(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ vms = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['vm_prefix'], value_in_json=True
+ )
+ return_vms = []
+ user_vms = filter(
+ lambda v: v.value['owner'] == data['name'], vms
+ )
+ for vm in user_vms:
+ return_vms.append(
+ {
+ 'name': vm.value['name'],
+ 'vm_uuid': vm.key.split('/')[-1],
+ 'specs': vm.value['specs'],
+ 'status': vm.value['status'],
+ 'hostname': vm.value['hostname'],
+ 'vnc_socket': vm.value.get('vnc_socket', None),
+ }
+ )
+ if return_vms:
+ return {'message': return_vms}, 200
+ return {'message': 'No VM found'}, 404
+
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserFiles(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ files = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['file_prefix'], value_in_json=True
+ )
+ return_files = []
+ user_files = [f for f in files if f.value['owner'] == data['name']]
+ for file in user_files:
+ file_uuid = file.key.split('/')[-1]
+ file = file.value
+ file['uuid'] = file_uuid
+
+ file.pop('sha512sum', None)
+ file.pop('owner', None)
+
+ return_files.append(file)
+ return {'message': return_files}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateHost(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateHostSchema(data)
+ if validator.is_valid():
+ host_key = join_path(
+ shared.settings['etcd']['host_prefix'], uuid4().hex
+ )
+ host_entry = {
+ 'specs': data['specs'],
+ 'hostname': data['hostname'],
+ 'status': 'DEAD',
+ 'last_heartbeat': '',
+ }
+ shared.etcd_client.put(
+ host_key, host_entry, value_in_json=True
+ )
+
+ return {'message': 'Host Created'}, 200
+
+ return validator.get_errors(), 400
+
+
+class ListHost(Resource):
+ @staticmethod
+ def get():
+ hosts = shared.host_pool.hosts
+ r = {
+ host.key: {
+ 'status': host.status,
+ 'specs': host.specs,
+ 'hostname': host.hostname,
+ }
+ for host in hosts
+ }
+ return r, 200
+
+
+class GetSSHKeys(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.GetSSHSchema(data)
+ if validator.is_valid():
+ if not validator.key_name.value:
+
+ # {user_prefix}/{realm}/{name}/key/
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ )
+ etcd_entry = shared.etcd_client.get_prefix(
+ etcd_key, value_in_json=True
+ )
+
+ keys = {
+ key.key.split('/')[-1]: key.value
+ for key in etcd_entry
+ }
+ return {'keys': keys}
+ else:
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+
+ if etcd_entry:
+ return {
+ 'keys': {
+ etcd_entry.key.split('/')[
+ -1
+ ]: etcd_entry.value
+ }
+ }
+ else:
+ return {'keys': {}}
+ else:
+ return validator.get_errors(), 400
+
+
+class AddSSHKey(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.AddSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+ if etcd_entry:
+ return {
+ 'message': 'Key with name "{}" already exists'.format(
+ data['key_name']
+ )
+ }
+ else:
+ # Key Not Found. It implies user' haven't added any key yet.
+ shared.etcd_client.put(
+ etcd_key, data['key'], value_in_json=True
+ )
+ return {'message': 'Key added successfully'}
+ else:
+ return validator.get_errors(), 400
+
+
+class RemoveSSHKey(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.RemoveSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+ if etcd_entry:
+ shared.etcd_client.client.delete(etcd_key)
+ return {'message': 'Key successfully removed.'}
+ else:
+ return {
+ 'message': 'No Key with name "{}" Exists at all.'.format(
+ data['key_name']
+ )
+ }
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateNetwork(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateNetwork(data)
+
+ if validator.is_valid():
+
+ network_entry = {
+ 'id': counters.increment_etcd_counter(
+ shared.etcd_client, shared.settings['etcd']['vxlan_counter']
+ ),
+ 'type': data['type'],
+ }
+ if validator.user.value:
+ try:
+ nb = pynetbox.api(
+ url=shared.settings['netbox']['url'],
+ token=shared.settings['netbox']['token'],
+ )
+ nb_prefix = nb.ipam.prefixes.get(
+ prefix=shared.settings['network']['prefix']
+ )
+ prefix = nb_prefix.available_prefixes.create(
+ data={
+ 'prefix_length': int(
+ shared.settings['network']['prefix_length']
+ ),
+ 'description': '{}\'s network "{}"'.format(
+ data['name'], data['network_name']
+ ),
+ 'is_pool': True,
+ }
+ )
+ except Exception as err:
+ app.logger.error(err)
+ return {
+ 'message': 'Error occured while creating network.'
+ }
+ else:
+ network_entry['ipv6'] = prefix['prefix']
+ else:
+ network_entry['ipv6'] = 'fd00::/64'
+
+ network_key = join_path(
+ shared.settings['etcd']['network_prefix'],
+ data['name'],
+ data['network_name'],
+ )
+ shared.etcd_client.put(
+ network_key, network_entry, value_in_json=True
+ )
+ return {'message': 'Network successfully added.'}
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserNetwork(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ prefix = join_path(
+ shared.settings['etcd']['network_prefix'], data['name']
+ )
+ networks = shared.etcd_client.get_prefix(
+ prefix, value_in_json=True
+ )
+ user_networks = []
+ for net in networks:
+ net.value['name'] = net.key.split('/')[-1]
+ user_networks.append(net.value)
+ return {'networks': user_networks}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+api.add_resource(CreateVM, '/vm/create')
+api.add_resource(VmStatus, '/vm/status')
+
+api.add_resource(VMAction, '/vm/action')
+api.add_resource(VMMigration, '/vm/migrate')
+
+api.add_resource(CreateImage, '/image/create')
+api.add_resource(ListPublicImages, '/image/list-public')
+
+api.add_resource(ListUserVM, '/user/vms')
+api.add_resource(ListUserFiles, '/user/files')
+api.add_resource(ListUserNetwork, '/user/networks')
+
+api.add_resource(AddSSHKey, '/user/add-ssh')
+api.add_resource(RemoveSSHKey, '/user/remove-ssh')
+api.add_resource(GetSSHKeys, '/user/get-ssh')
+
+api.add_resource(CreateHost, '/host/create')
+api.add_resource(ListHost, '/host/list')
+
+api.add_resource(CreateNetwork, '/network/create')
+
+
+def main(arguments):
+ debug = arguments['debug']
+ port = arguments['port']
+
+ try:
+ image_stores = list(
+ shared.etcd_client.get_prefix(
+ shared.settings['etcd']['image_store_prefix'], value_in_json=True
+ )
+ )
+ except KeyError:
+ image_stores = False
+
+ # Do not inject default values that might be very wrong
+ # fail when required, not before
+ #
+ # if not image_stores:
+ # data = {
+ # 'is_public': True,
+ # 'type': 'ceph',
+ # 'name': 'images',
+ # 'description': 'first ever public image-store',
+ # 'attributes': {'list': [], 'key': [], 'pool': 'images'},
+ # }
+
+ # shared.etcd_client.put(
+ # join_path(
+ # shared.settings['etcd']['image_store_prefix'], uuid4().hex
+ # ),
+ # json.dumps(data),
+ # )
+
+ try:
+ app.run(host='::', port=port, debug=debug)
+ except OSError as e:
+ raise UncloudException('Failed to start Flask: {}'.format(e))
diff --git a/ucloud/api/schemas.py b/uncloud/api/schemas.py
similarity index 58%
rename from ucloud/api/schemas.py
rename to uncloud/api/schemas.py
index c4f60ca..87f20c9 100755
--- a/ucloud/api/schemas.py
+++ b/uncloud/api/schemas.py
@@ -1,6 +1,6 @@
"""
This module contain classes thats validates and intercept/modify
-data coming from ucloud-cli (user)
+data coming from uncloud-cli (user)
It was primarily developed as an alternative to argument parser
of Flask_Restful which is going to be deprecated. I also tried
@@ -19,10 +19,10 @@ import os
import bitmath
-from ucloud.common.host import HostStatus
-from ucloud.common.vm import VMStatus
-from ucloud.config import etcd_client, env_vars, vm_pool, host_pool
-from . import helper
+from uncloud.common.host import HostStatus
+from uncloud.common.vm import VMStatus
+from uncloud.common.shared import shared
+from . import helper, logger
from .common_fields import Field, VmUUIDField
from .helper import check_otp, resolve_vm_name
@@ -79,7 +79,12 @@ class OTPSchema(BaseSchema):
super().__init__(data=data, fields=_fields)
def validation(self):
- if check_otp(self.name.value, self.realm.value, self.token.value) != 200:
+ if (
+ check_otp(
+ self.name.value, self.realm.value, self.token.value
+ )
+ != 200
+ ):
self.add_error("Wrong Credentials")
@@ -91,7 +96,9 @@ class CreateImageSchema(BaseSchema):
# Fields
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
self.name = Field("name", str, data.get("name", KeyError))
- self.image_store = Field("image_store", str, data.get("image_store", KeyError))
+ self.image_store = Field(
+ "image_store", str, data.get("image_store", KeyError)
+ )
# Validations
self.uuid.validation = self.file_uuid_validation
@@ -102,34 +109,51 @@ class CreateImageSchema(BaseSchema):
super().__init__(data, fields)
def file_uuid_validation(self):
- file_entry = etcd_client.get(os.path.join(env_vars.get('FILE_PREFIX'), self.uuid.value))
+ file_entry = shared.etcd_client.get(
+ os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
+ )
+ )
if file_entry is None:
self.add_error(
- "Image File with uuid '{}' Not Found".format(self.uuid.value)
+ "Image File with uuid '{}' Not Found".format(
+ self.uuid.value
+ )
)
def image_store_name_validation(self):
- image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX')))
+ image_stores = list(
+ shared.etcd_client.get_prefix(
+ shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
+ )
+ )
image_store = next(
filter(
- lambda s: json.loads(s.value)["name"] == self.image_store.value,
+ lambda s: json.loads(s.value)["name"]
+ == self.image_store.value,
image_stores,
),
None,
)
if not image_store:
- self.add_error("Store '{}' does not exists".format(self.image_store.value))
+ self.add_error(
+ "Store '{}' does not exists".format(
+ self.image_store.value
+ )
+ )
# Host Operations
+
class CreateHostSchema(OTPSchema):
def __init__(self, data):
- self.parsed_specs = {}
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
- self.hostname = Field("hostname", str, data.get("hostname", KeyError))
+ self.hostname = Field(
+ "hostname", str, data.get("hostname", KeyError)
+ )
# Validation
self.specs.validation = self.specs_validation
@@ -141,22 +165,28 @@ class CreateHostSchema(OTPSchema):
def specs_validation(self):
ALLOWED_BASE = 10
- _cpu = self.specs.value.get('cpu', KeyError)
- _ram = self.specs.value.get('ram', KeyError)
- _os_ssd = self.specs.value.get('os-ssd', KeyError)
- _hdd = self.specs.value.get('hdd', KeyError)
+ _cpu = self.specs.value.get("cpu", KeyError)
+ _ram = self.specs.value.get("ram", KeyError)
+ _os_ssd = self.specs.value.get("os-ssd", KeyError)
+ _hdd = self.specs.value.get("hdd", KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
- self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
+ self.add_error(
+ "You must specify CPU, RAM and OS-SSD in your specs"
+ )
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
- self.add_error("Your specified RAM is not in correct units")
+ self.add_error(
+ "Your specified RAM is not in correct units"
+ )
if parsed_os_ssd.base != ALLOWED_BASE:
- self.add_error("Your specified OS-SSD is not in correct units")
+ self.add_error(
+ "Your specified OS-SSD is not in correct units"
+ )
if _cpu < 1:
self.add_error("CPU must be atleast 1")
@@ -171,7 +201,9 @@ class CreateHostSchema(OTPSchema):
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
- self.add_error("Your specified HDD is not in correct units")
+ self.add_error(
+ "Your specified HDD is not in correct units"
+ )
break
else:
parsed_hdd.append(str(_parsed_hdd))
@@ -182,15 +214,17 @@ class CreateHostSchema(OTPSchema):
else:
if self.get_errors():
self.specs = {
- 'cpu': _cpu,
- 'ram': str(parsed_ram),
- 'os-ssd': str(parsed_os_ssd),
- 'hdd': parsed_hdd
+ "cpu": _cpu,
+ "ram": str(parsed_ram),
+ "os-ssd": str(parsed_os_ssd),
+ "hdd": parsed_hdd,
}
def validation(self):
if self.realm.value != "ungleich-admin":
- self.add_error("Invalid Credentials/Insufficient Permission")
+ self.add_error(
+ "Invalid Credentials/Insufficient Permission"
+ )
# VM Operations
@@ -198,13 +232,15 @@ class CreateHostSchema(OTPSchema):
class CreateVMSchema(OTPSchema):
def __init__(self, data):
- self.parsed_specs = {}
-
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
- self.vm_name = Field("vm_name", str, data.get("vm_name", KeyError))
+ self.vm_name = Field(
+ "vm_name", str, data.get("vm_name", KeyError)
+ )
self.image = Field("image", str, data.get("image", KeyError))
- self.network = Field("network", list, data.get("network", KeyError))
+ self.network = Field(
+ "network", list, data.get("network", KeyError)
+ )
# Validation
self.image.validation = self.image_validation
@@ -218,16 +254,25 @@ class CreateVMSchema(OTPSchema):
def image_validation(self):
try:
- image_uuid = helper.resolve_image_name(self.image.value, etcd_client)
+ image_uuid = helper.resolve_image_name(
+ self.image.value, shared.etcd_client
+ )
except Exception as e:
+ logger.exception(
+ "Cannot resolve image name = %s", self.image.value
+ )
self.add_error(str(e))
else:
self.image_uuid = image_uuid
def vm_name_validation(self):
- if resolve_vm_name(name=self.vm_name.value, owner=self.name.value):
+ if resolve_vm_name(
+ name=self.vm_name.value, owner=self.name.value
+ ):
self.add_error(
- 'VM with same name "{}" already exists'.format(self.vm_name.value)
+ 'VM with same name "{}" already exists'.format(
+ self.vm_name.value
+ )
)
def network_validation(self):
@@ -235,34 +280,48 @@ class CreateVMSchema(OTPSchema):
if _network:
for net in _network:
- network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
- self.name.value,
- net), value_in_json=True)
+ network = shared.etcd_client.get(
+ os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
+ self.name.value,
+ net,
+ ),
+ value_in_json=True,
+ )
if not network:
- self.add_error("Network with name {} does not exists" \
- .format(net))
+ self.add_error(
+ "Network with name {} does not exists".format(
+ net
+ )
+ )
def specs_validation(self):
ALLOWED_BASE = 10
- _cpu = self.specs.value.get('cpu', KeyError)
- _ram = self.specs.value.get('ram', KeyError)
- _os_ssd = self.specs.value.get('os-ssd', KeyError)
- _hdd = self.specs.value.get('hdd', KeyError)
+ _cpu = self.specs.value.get("cpu", KeyError)
+ _ram = self.specs.value.get("ram", KeyError)
+ _os_ssd = self.specs.value.get("os-ssd", KeyError)
+ _hdd = self.specs.value.get("hdd", KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
- self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
+ self.add_error(
+ "You must specify CPU, RAM and OS-SSD in your specs"
+ )
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
- self.add_error("Your specified RAM is not in correct units")
+ self.add_error(
+ "Your specified RAM is not in correct units"
+ )
if parsed_os_ssd.base != ALLOWED_BASE:
- self.add_error("Your specified OS-SSD is not in correct units")
+ self.add_error(
+ "Your specified OS-SSD is not in correct units"
+ )
- if _cpu < 1:
+ if int(_cpu) < 1:
self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1):
@@ -275,7 +334,9 @@ class CreateVMSchema(OTPSchema):
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
- self.add_error("Your specified HDD is not in correct units")
+ self.add_error(
+ "Your specified HDD is not in correct units"
+ )
break
else:
parsed_hdd.append(str(_parsed_hdd))
@@ -286,21 +347,24 @@ class CreateVMSchema(OTPSchema):
else:
if self.get_errors():
self.specs = {
- 'cpu': _cpu,
- 'ram': str(parsed_ram),
- 'os-ssd': str(parsed_os_ssd),
- 'hdd': parsed_hdd
+ "cpu": _cpu,
+ "ram": str(parsed_ram),
+ "os-ssd": str(parsed_os_ssd),
+ "hdd": parsed_hdd,
}
class VMStatusSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(data.get("in_support_of", None) or data.get("name", None)),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
@@ -309,9 +373,10 @@ class VMStatusSchema(OTPSchema):
super().__init__(data, fields)
def validation(self):
- vm = vm_pool.get(self.uuid.value)
+ vm = shared.vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
@@ -319,11 +384,14 @@ class VMStatusSchema(OTPSchema):
class VmActionSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(data.get("in_support_of", None) or data.get("name", None)),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
self.action = Field("action", str, data.get("action", KeyError))
@@ -338,20 +406,23 @@ class VmActionSchema(OTPSchema):
allowed_actions = ["start", "stop", "delete"]
if self.action.value not in allowed_actions:
self.add_error(
- "Invalid Action. Allowed Actions are {}".format(allowed_actions)
+ "Invalid Action. Allowed Actions are {}".format(
+ allowed_actions
+ )
)
def validation(self):
- vm = vm_pool.get(self.uuid.value)
+ vm = shared.vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if (
- self.action.value == "start"
- and vm.status == VMStatus.running
- and vm.hostname != ""
+ self.action.value == "start"
+ and vm.status == VMStatus.running
+ and vm.hostname != ""
):
self.add_error("VM Already Running")
@@ -365,15 +436,20 @@ class VmActionSchema(OTPSchema):
class VmMigrationSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(data.get("in_support_of", None) or data.get("name", None)),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
- self.destination = Field("destination", str, data.get("destination", KeyError))
+ self.destination = Field(
+ "destination", str, data.get("destination", KeyError)
+ )
self.destination.validation = self.destination_validation
@@ -382,31 +458,47 @@ class VmMigrationSchema(OTPSchema):
def destination_validation(self):
hostname = self.destination.value
- host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+ host = next(
+ filter(
+ lambda h: h.hostname == hostname, shared.host_pool.hosts
+ ),
+ None,
+ )
if not host:
- self.add_error("No Such Host ({}) exists".format(self.destination.value))
+ self.add_error(
+ "No Such Host ({}) exists".format(
+ self.destination.value
+ )
+ )
elif host.status != HostStatus.alive:
self.add_error("Destination Host is dead")
else:
self.destination.value = host.key
def validation(self):
- vm = vm_pool.get(self.uuid.value)
+ vm = shared.vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if vm.status != VMStatus.running:
self.add_error("Can't migrate non-running VM")
- if vm.hostname == os.path.join(env_vars.get('HOST_PREFIX'), self.destination.value):
- self.add_error("Destination host couldn't be same as Source Host")
+ if vm.hostname == os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
+ ):
+ self.add_error(
+ "Destination host couldn't be same as Source Host"
+ )
class AddSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field("key_name", str, data.get("key_name", KeyError))
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", KeyError)
+ )
self.key = Field("key", str, data.get("key_name", KeyError))
fields = [self.key_name, self.key]
@@ -415,7 +507,9 @@ class AddSSHSchema(OTPSchema):
class RemoveSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field("key_name", str, data.get("key_name", KeyError))
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", KeyError)
+ )
fields = [self.key_name]
super().__init__(data=data, fields=fields)
@@ -423,7 +517,9 @@ class RemoveSSHSchema(OTPSchema):
class GetSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field("key_name", str, data.get("key_name", None))
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", None)
+ )
fields = [self.key_name]
super().__init__(data=data, fields=fields)
@@ -442,15 +538,20 @@ class CreateNetwork(OTPSchema):
super().__init__(data, fields=fields)
def network_name_validation(self):
- network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
- self.name.value,
- self.network_name.value),
- value_in_json=True)
+ key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
+ network = shared.etcd_client.get(key, value_in_json=True)
if network:
- self.add_error("Network with name {} already exists" \
- .format(self.network_name.value))
+ self.add_error(
+ "Network with name {} already exists".format(
+ self.network_name.value
+ )
+ )
def network_type_validation(self):
supported_network_types = ["vxlan"]
if self.type.value not in supported_network_types:
- self.add_error("Unsupported Network Type. Supported network types are {}".format(supported_network_types))
+ self.add_error(
+ "Unsupported Network Type. Supported network types are {}".format(
+ supported_network_types
+ )
+ )
diff --git a/ucloud/docs/source/__init__.py b/uncloud/cli/__init__.py
similarity index 100%
rename from ucloud/docs/source/__init__.py
rename to uncloud/cli/__init__.py
diff --git a/uncloud/cli/helper.py b/uncloud/cli/helper.py
new file mode 100644
index 0000000..51a4355
--- /dev/null
+++ b/uncloud/cli/helper.py
@@ -0,0 +1,46 @@
+import requests
+import json
+import argparse
+import binascii
+
+from pyotp import TOTP
+from os.path import join as join_path
+from uncloud.common.shared import shared
+
+
+def get_otp_parser():
+ otp_parser = argparse.ArgumentParser('otp')
+ otp_parser.add_argument('--name')
+ otp_parser.add_argument('--realm')
+ otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
+
+ return otp_parser
+
+
+def load_dump_pretty(content):
+ if isinstance(content, bytes):
+ content = content.decode('utf-8')
+ parsed = json.loads(content)
+ return json.dumps(parsed, indent=4, sort_keys=True)
+
+
+def make_request(*args, data=None, request_method=requests.post):
+ try:
+ r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
+ except requests.exceptions.RequestException:
+ print('Error occurred while connecting to API server.')
+ else:
+ try:
+ print(load_dump_pretty(r.content))
+ except Exception:
+ print('Error occurred while getting output from api server.')
+
+
+def get_token(seed):
+ if seed is not None:
+ try:
+ token = TOTP(seed).now()
+ except binascii.Error:
+ raise argparse.ArgumentTypeError('Invalid seed')
+ else:
+ return token
diff --git a/uncloud/cli/host.py b/uncloud/cli/host.py
new file mode 100644
index 0000000..e912567
--- /dev/null
+++ b/uncloud/cli/host.py
@@ -0,0 +1,45 @@
+import requests
+
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class HostParser(BaseParser):
+ def __init__(self):
+ super().__init__('host')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--hostname', required=True)
+ p.add_argument('--cpu', required=True, type=int)
+ p.add_argument('--ram', required=True)
+ p.add_argument('--os-ssd', required=True)
+ p.add_argument('--hdd', default=list())
+
+ def list(self, **kwargs):
+ self.subparser.add_parser('list', **kwargs)
+
+
+parser = HostParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('host_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ request_method = requests.post
+ data = None
+ if subcommand == 'create':
+ kwargs['specs'] = {
+ 'cpu': kwargs.pop('cpu'),
+ 'ram': kwargs.pop('ram'),
+ 'os-ssd': kwargs.pop('os_ssd'),
+ 'hdd': kwargs.pop('hdd')
+ }
+ data = kwargs
+ elif subcommand == 'list':
+ request_method = requests.get
+
+ make_request('host', subcommand, data=data, request_method=request_method)
diff --git a/uncloud/cli/image.py b/uncloud/cli/image.py
new file mode 100644
index 0000000..2f59c32
--- /dev/null
+++ b/uncloud/cli/image.py
@@ -0,0 +1,38 @@
+import requests
+
+from uncloud.cli.helper import make_request
+from uncloud.common.parser import BaseParser
+
+
+class ImageParser(BaseParser):
+ def __init__(self):
+ super().__init__('image')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', **kwargs)
+ p.add_argument('--name', required=True)
+ p.add_argument('--uuid', required=True)
+ p.add_argument('--image-store', required=True, dest='image_store')
+
+ def list(self, **kwargs):
+ self.subparser.add_parser('list', **kwargs)
+
+
+parser = ImageParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('image_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = None
+ request_method = requests.post
+ if subcommand == 'list':
+ subcommand = 'list-public'
+ request_method = requests.get
+ elif subcommand == 'create':
+ data = kwargs
+
+ make_request('image', subcommand, data=data, request_method=request_method)
diff --git a/uncloud/cli/main.py b/uncloud/cli/main.py
new file mode 100644
index 0000000..9a42497
--- /dev/null
+++ b/uncloud/cli/main.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+
+import argparse
+import importlib
+
+arg_parser = argparse.ArgumentParser('cli', add_help=False)
+subparser = arg_parser.add_subparsers(dest='subcommand')
+
+for component in ['user', 'host', 'image', 'network', 'vm']:
+ module = importlib.import_module('uncloud.cli.{}'.format(component))
+ parser = getattr(module, 'arg_parser')
+ subparser.add_parser(name=parser.prog, parents=[parser])
+
+
+def main(arguments):
+ if not arguments['subcommand']:
+ arg_parser.print_help()
+ else:
+ name = arguments.pop('subcommand')
+ arguments.pop('debug')
+ mod = importlib.import_module('uncloud.cli.{}'.format(name))
+ _main = getattr(mod, 'main')
+ _main(**arguments)
diff --git a/uncloud/cli/network.py b/uncloud/cli/network.py
new file mode 100644
index 0000000..55798bf
--- /dev/null
+++ b/uncloud/cli/network.py
@@ -0,0 +1,32 @@
+import requests
+
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class NetworkParser(BaseParser):
+ def __init__(self):
+ super().__init__('network')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--network-name', required=True)
+ p.add_argument('--network-type', required=True, dest='type')
+ p.add_argument('--user', action='store_true')
+
+
+parser = NetworkParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('network_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = None
+ request_method = requests.post
+ if subcommand == 'create':
+ data = kwargs
+
+ make_request('network', subcommand, data=data, request_method=request_method)
diff --git a/uncloud/cli/user.py b/uncloud/cli/user.py
new file mode 100755
index 0000000..3a4cc4e
--- /dev/null
+++ b/uncloud/cli/user.py
@@ -0,0 +1,41 @@
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class UserParser(BaseParser):
+ def __init__(self):
+ super().__init__('user')
+
+ def files(self, **kwargs):
+ self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
+
+ def vms(self, **kwargs):
+ self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
+
+ def networks(self, **kwargs):
+ self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
+
+ def add_ssh(self, **kwargs):
+ p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', required=True)
+ p.add_argument('--key', required=True)
+
+ def get_ssh(self, **kwargs):
+ p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', default='')
+
+ def remove_ssh(self, **kwargs):
+ p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', required=True)
+
+
+parser = UserParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('user_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ make_request('user', subcommand, data=kwargs)
diff --git a/uncloud/cli/vm.py b/uncloud/cli/vm.py
new file mode 100644
index 0000000..396530e
--- /dev/null
+++ b/uncloud/cli/vm.py
@@ -0,0 +1,62 @@
+from uncloud.common.parser import BaseParser
+from uncloud.cli.helper import make_request, get_otp_parser
+
+
+class VMParser(BaseParser):
+ def __init__(self):
+ super().__init__('vm')
+
+ def start(self, **args):
+ p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def stop(self, **args):
+ p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def status(self, **args):
+ p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def delete(self, **args):
+ p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def migrate(self, **args):
+ p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+ p.add_argument('--destination', required=True)
+
+ def create(self, **args):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
+ p.add_argument('--cpu', required=True)
+ p.add_argument('--ram', required=True)
+ p.add_argument('--os-ssd', required=True)
+ p.add_argument('--hdd', action='append', default=list())
+ p.add_argument('--image', required=True)
+ p.add_argument('--network', action='append', default=[])
+ p.add_argument('--vm-name', required=True)
+
+
+parser = VMParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('vm_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = kwargs
+ endpoint = subcommand
+ if subcommand in ['start', 'stop', 'delete']:
+ endpoint = 'action'
+ data['action'] = subcommand
+ elif subcommand == 'create':
+ kwargs['specs'] = {
+ 'cpu': kwargs.pop('cpu'),
+ 'ram': kwargs.pop('ram'),
+ 'os-ssd': kwargs.pop('os_ssd'),
+ 'hdd': kwargs.pop('hdd')
+ }
+ make_request('vm', endpoint, data=data)
diff --git a/ucloud/common/__init__.py b/uncloud/common/__init__.py
similarity index 100%
rename from ucloud/common/__init__.py
rename to uncloud/common/__init__.py
diff --git a/ucloud/common/classes.py b/uncloud/common/classes.py
similarity index 93%
rename from ucloud/common/classes.py
rename to uncloud/common/classes.py
index 2eae809..29dffd4 100644
--- a/ucloud/common/classes.py
+++ b/uncloud/common/classes.py
@@ -1,4 +1,4 @@
-from etcd3_wrapper import EtcdEntry
+from .etcd_wrapper import EtcdEntry
class SpecificEtcdEntryBase:
diff --git a/uncloud/common/cli.py b/uncloud/common/cli.py
new file mode 100644
index 0000000..3d3c248
--- /dev/null
+++ b/uncloud/common/cli.py
@@ -0,0 +1,26 @@
+from uncloud.common.shared import shared
+from pyotp import TOTP
+
+
+def get_token(seed):
+ if seed is not None:
+ try:
+ token = TOTP(seed).now()
+ except Exception:
+ raise Exception('Invalid seed')
+ else:
+ return token
+
+
+def resolve_otp_credentials(kwargs):
+ d = {
+ 'name': shared.settings['client']['name'],
+ 'realm': shared.settings['client']['realm'],
+ 'token': get_token(shared.settings['client']['seed'])
+ }
+
+ for k, v in d.items():
+ if k in kwargs and kwargs[k] is None:
+ kwargs.update({k: v})
+
+ return d
diff --git a/ucloud/common/counters.py b/uncloud/common/counters.py
similarity index 91%
rename from ucloud/common/counters.py
rename to uncloud/common/counters.py
index 066a870..2d4a8e9 100644
--- a/ucloud/common/counters.py
+++ b/uncloud/common/counters.py
@@ -1,4 +1,4 @@
-from etcd3_wrapper import Etcd3Wrapper
+from .etcd_wrapper import Etcd3Wrapper
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
diff --git a/uncloud/common/etcd_wrapper.py b/uncloud/common/etcd_wrapper.py
new file mode 100644
index 0000000..38471ab
--- /dev/null
+++ b/uncloud/common/etcd_wrapper.py
@@ -0,0 +1,75 @@
+import etcd3
+import json
+
+from functools import wraps
+
+from uncloud import UncloudException
+from uncloud.common import logger
+
+
+class EtcdEntry:
+ def __init__(self, meta_or_key, value, value_in_json=False):
+ if hasattr(meta_or_key, 'key'):
+ # if meta has attr 'key' then get it
+ self.key = meta_or_key.key.decode('utf-8')
+ else:
+ # otherwise meta is the 'key'
+ self.key = meta_or_key
+ self.value = value.decode('utf-8')
+
+ if value_in_json:
+ self.value = json.loads(self.value)
+
+
+def readable_errors(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except etcd3.exceptions.ConnectionFailedError:
+ raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
+ except etcd3.exceptions.ConnectionTimeoutError as err:
+ raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
+ except Exception:
+ logger.exception('Some etcd error occured. See syslog for details.')
+
+ return wrapper
+
+
+class Etcd3Wrapper:
+ @readable_errors
+ def __init__(self, *args, **kwargs):
+ self.client = etcd3.client(*args, **kwargs)
+
+ @readable_errors
+ def get(self, *args, value_in_json=False, **kwargs):
+ _value, _key = self.client.get(*args, **kwargs)
+ if _key is None or _value is None:
+ return None
+ return EtcdEntry(_key, _value, value_in_json=value_in_json)
+
+ @readable_errors
+ def put(self, *args, value_in_json=False, **kwargs):
+ _key, _value = args
+ if value_in_json:
+ _value = json.dumps(_value)
+
+ if not isinstance(_key, str):
+ _key = _key.decode('utf-8')
+
+ return self.client.put(_key, _value, **kwargs)
+
+ @readable_errors
+ def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
+ event_iterator = self.client.get_prefix(*args, **kwargs)
+ for e in event_iterator:
+ yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
+
+ @readable_errors
+ def watch_prefix(self, key, raise_exception=True, value_in_json=False):
+ event_iterator, cancel = self.client.watch_prefix(key)
+ for e in event_iterator:
+ if hasattr(e, '_event'):
+ e = e._event
+ if e.type == e.PUT:
+ yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
diff --git a/ucloud/common/host.py b/uncloud/common/host.py
similarity index 85%
rename from ucloud/common/host.py
rename to uncloud/common/host.py
index ccbf7a8..f7bb7d5 100644
--- a/ucloud/common/host.py
+++ b/uncloud/common/host.py
@@ -7,7 +7,7 @@ from .classes import SpecificEtcdEntryBase
class HostStatus:
- """Possible Statuses of ucloud host."""
+ """Possible Statuses of uncloud host."""
alive = "ALIVE"
dead = "DEAD"
@@ -26,11 +26,13 @@ class HostEntry(SpecificEtcdEntryBase):
def update_heartbeat(self):
self.status = HostStatus.alive
- self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
+ self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
def is_alive(self):
- last_heartbeat = datetime.strptime(self.last_heartbeat, "%Y-%m-%d %H:%M:%S")
- delta = datetime.now() - last_heartbeat
+ last_heartbeat = datetime.strptime(
+ self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
+ )
+ delta = datetime.utcnow() - last_heartbeat
if delta.total_seconds() > 60:
return False
return True
diff --git a/uncloud/common/network.py b/uncloud/common/network.py
new file mode 100644
index 0000000..32f6951
--- /dev/null
+++ b/uncloud/common/network.py
@@ -0,0 +1,70 @@
+import subprocess as sp
+import random
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def random_bytes(num=6):
+ return [random.randrange(256) for _ in range(num)]
+
+
+def generate_mac(
+ uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
+):
+ mac = random_bytes()
+ if oui:
+ if type(oui) == str:
+ oui = [int(chunk) for chunk in oui.split(separator)]
+ mac = oui + random_bytes(num=6 - len(oui))
+ else:
+ if multicast:
+ mac[0] |= 1 # set bit 0
+ else:
+ mac[0] &= ~1 # clear bit 0
+ if uaa:
+ mac[0] &= ~(1 << 1) # clear bit 1
+ else:
+ mac[0] |= 1 << 1 # set bit 1
+ return separator.join(byte_fmt % b for b in mac)
+
+
+def create_dev(script, _id, dev, ip=None):
+ command = [
+ "sudo",
+ "-p",
+ "Enter password to create network devices for vm: ",
+ script,
+ str(_id),
+ dev,
+ ]
+ if ip:
+ command.append(ip)
+ try:
+ output = sp.check_output(command, stderr=sp.PIPE)
+ except Exception:
+ logger.exception("Creation of interface %s failed.", dev)
+ return None
+ else:
+ return output.decode("utf-8").strip()
+
+
+def delete_network_interface(iface):
+ try:
+ sp.check_output(
+ [
+ "sudo",
+ "-p",
+ "Enter password to remove {} network device: ".format(
+ iface
+ ),
+ "ip",
+ "link",
+ "del",
+ iface,
+ ],
+ stderr=sp.PIPE,
+ )
+ except Exception:
+ logger.exception("Interface %s Deletion failed", iface)
+
diff --git a/uncloud/common/parser.py b/uncloud/common/parser.py
new file mode 100644
index 0000000..576f0e7
--- /dev/null
+++ b/uncloud/common/parser.py
@@ -0,0 +1,13 @@
+import argparse
+
+
+class BaseParser:
+ def __init__(self, command):
+ self.arg_parser = argparse.ArgumentParser(command, add_help=False)
+ self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
+ self.common_args = {'add_help': False}
+
+ methods = [attr for attr in dir(self) if not attr.startswith('__')
+ and type(getattr(self, attr)).__name__ == 'method']
+ for method in methods:
+ getattr(self, method)(**self.common_args)
diff --git a/ucloud/common/request.py b/uncloud/common/request.py
similarity index 74%
rename from ucloud/common/request.py
rename to uncloud/common/request.py
index cadac80..cb0add5 100644
--- a/ucloud/common/request.py
+++ b/uncloud/common/request.py
@@ -2,9 +2,8 @@ import json
from os.path import join
from uuid import uuid4
-from etcd3_wrapper.etcd3_wrapper import PsuedoEtcdEntry
-
-from .classes import SpecificEtcdEntryBase
+from uncloud.common.etcd_wrapper import EtcdEntry
+from uncloud.common.classes import SpecificEtcdEntryBase
class RequestType:
@@ -18,8 +17,9 @@ class RequestType:
class RequestEntry(SpecificEtcdEntryBase):
-
def __init__(self, e):
+ self.destination_sock_path = None
+ self.destination_host_key = None
self.type = None # type: str
self.migration = None # type: bool
self.destination = None # type: str
@@ -29,8 +29,8 @@ class RequestEntry(SpecificEtcdEntryBase):
@classmethod
def from_scratch(cls, request_prefix, **kwargs):
- e = PsuedoEtcdEntry(join(request_prefix, uuid4().hex),
- value=json.dumps(kwargs).encode("utf-8"), value_in_json=True)
+ e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
+ value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
return cls(e)
diff --git a/uncloud/common/schemas.py b/uncloud/common/schemas.py
new file mode 100644
index 0000000..04978a5
--- /dev/null
+++ b/uncloud/common/schemas.py
@@ -0,0 +1,41 @@
+import bitmath
+
+from marshmallow import fields, Schema
+
+
+class StorageUnit(fields.Field):
+ def _serialize(self, value, attr, obj, **kwargs):
+ return str(value)
+
+ def _deserialize(self, value, attr, data, **kwargs):
+ return bitmath.parse_string_unsafe(value)
+
+
+class SpecsSchema(Schema):
+ cpu = fields.Int()
+ ram = StorageUnit()
+ os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
+ hdd = fields.List(StorageUnit())
+
+
+class VMSchema(Schema):
+ name = fields.Str()
+ owner = fields.Str()
+ owner_realm = fields.Str()
+ specs = fields.Nested(SpecsSchema)
+ status = fields.Str()
+ log = fields.List(fields.Str())
+ vnc_socket = fields.Str()
+ image_uuid = fields.Str()
+ hostname = fields.Str()
+ metadata = fields.Dict()
+ network = fields.List(
+ fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
+ )
+ in_migration = fields.Bool()
+
+
+class NetworkSchema(Schema):
+ _id = fields.Int(data_key="id", attribute="id")
+ _type = fields.Str(data_key="type", attribute="type")
+ ipv6 = fields.Str()
diff --git a/uncloud/common/settings.py b/uncloud/common/settings.py
new file mode 100644
index 0000000..8503f42
--- /dev/null
+++ b/uncloud/common/settings.py
@@ -0,0 +1,136 @@
+import configparser
+import logging
+import sys
+import os
+
+from datetime import datetime
+from uncloud.common.etcd_wrapper import Etcd3Wrapper
+from os.path import join as join_path
+
+logger = logging.getLogger(__name__)
+settings = None
+
+
+class CustomConfigParser(configparser.RawConfigParser):
+ def __getitem__(self, key):
+ try:
+ result = super().__getitem__(key)
+ except KeyError as err:
+ raise KeyError(
+ 'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
+ key
+ )
+ ) from err
+ else:
+ return result
+
+
+class Settings(object):
+ def __init__(self, conf_dir, seed_value=None):
+ conf_name = 'uncloud.conf'
+ self.config_file = join_path(conf_dir, conf_name)
+
+ # this is used to cache config from etcd for 1 minutes. Without this we
+ # would make a lot of requests to etcd which slows down everything.
+ self.last_config_update = datetime.fromtimestamp(0)
+
+ self.config_parser = CustomConfigParser(allow_no_value=True)
+ self.config_parser.add_section('etcd')
+ self.config_parser.set('etcd', 'base_prefix', '/')
+
+ if os.access(self.config_file, os.R_OK):
+ self.config_parser.read(self.config_file)
+ else:
+ raise FileNotFoundError('Config file %s not found!', self.config_file)
+ self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
+
+ self.read_internal_values()
+
+ if seed_value is None:
+ seed_value = dict()
+
+ self.config_parser.read_dict(seed_value)
+
+ def get_etcd_client(self):
+ args = tuple()
+ try:
+ kwargs = {
+ 'host': self.config_parser.get('etcd', 'url'),
+ 'port': self.config_parser.get('etcd', 'port'),
+ 'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
+ 'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
+ 'cert_key': self.config_parser.get('etcd', 'cert_key'),
+ }
+ except configparser.Error as err:
+ raise configparser.Error(
+ '{} in config file {}'.format(
+ err.message, self.config_file
+ )
+ ) from err
+ else:
+ try:
+ wrapper = Etcd3Wrapper(*args, **kwargs)
+ except Exception as err:
+ logger.error(
+ 'etcd connection not successfull. Please check your config file.'
+ '\nDetails: %s\netcd connection parameters: %s',
+ err,
+ kwargs,
+ )
+ sys.exit(1)
+ else:
+ return wrapper
+
+ def read_internal_values(self):
+ base_prefix = self['etcd']['base_prefix']
+ self.config_parser.read_dict(
+ {
+ 'etcd': {
+ 'file_prefix': join_path(base_prefix, 'files/'),
+ 'host_prefix': join_path(base_prefix, 'hosts/'),
+ 'image_prefix': join_path(base_prefix, 'images/'),
+ 'image_store_prefix': join_path(base_prefix, 'imagestore/'),
+ 'network_prefix': join_path(base_prefix, 'networks/'),
+ 'request_prefix': join_path(base_prefix, 'requests/'),
+ 'user_prefix': join_path(base_prefix, 'users/'),
+ 'vm_prefix': join_path(base_prefix, 'vms/'),
+ 'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
+ 'tap_counter': join_path(base_prefix, 'counters/tap')
+ }
+ }
+ )
+
+ def read_config_file_values(self, config_file):
+ try:
+ # Trying to read configuration file
+ with open(config_file) as config_file_handle:
+ self.config_parser.read_file(config_file_handle)
+ except FileNotFoundError:
+ sys.exit('Configuration file {} not found!'.format(config_file))
+ except Exception as err:
+ logger.exception(err)
+ sys.exit('Error occurred while reading configuration file')
+
+ def read_values_from_etcd(self):
+ etcd_client = self.get_etcd_client()
+ if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
+ config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
+ if config_from_etcd:
+ self.config_parser.read_dict(config_from_etcd.value)
+ self.last_config_update = datetime.utcnow()
+ else:
+ raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
+
+ def __getitem__(self, key):
+ # Allow failing to read from etcd if we have
+ # it locally
+ if key not in self.config_parser.sections():
+ try:
+ self.read_values_from_etcd()
+ except KeyError:
+ pass
+ return self.config_parser[key]
+
+
+def get_settings():
+ return settings
diff --git a/uncloud/common/shared.py b/uncloud/common/shared.py
new file mode 100644
index 0000000..aea7cbc
--- /dev/null
+++ b/uncloud/common/shared.py
@@ -0,0 +1,34 @@
+from uncloud.common.settings import get_settings
+from uncloud.common.vm import VmPool
+from uncloud.common.host import HostPool
+from uncloud.common.request import RequestPool
+import uncloud.common.storage_handlers as storage_handlers
+
+
+class Shared:
+ @property
+ def settings(self):
+ return get_settings()
+
+ @property
+ def etcd_client(self):
+ return self.settings.get_etcd_client()
+
+ @property
+ def host_pool(self):
+ return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
+
+ @property
+ def vm_pool(self):
+ return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
+
+ @property
+ def request_pool(self):
+ return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
+
+ @property
+ def storage_handler(self):
+ return storage_handlers.get_storage_handler()
+
+
+shared = Shared()
diff --git a/ucloud/common/storage_handlers.py b/uncloud/common/storage_handlers.py
similarity index 63%
rename from ucloud/common/storage_handlers.py
rename to uncloud/common/storage_handlers.py
index 8b1097a..58c2dc2 100644
--- a/ucloud/common/storage_handlers.py
+++ b/uncloud/common/storage_handlers.py
@@ -6,17 +6,20 @@ import stat
from abc import ABC
from . import logger
from os.path import join as join_path
+import uncloud.common.shared as shared
class ImageStorageHandler(ABC):
+ handler_name = "base"
+
def __init__(self, image_base, vm_base):
self.image_base = image_base
self.vm_base = vm_base
def import_image(self, image_src, image_dest, protect=False):
"""Put an image at the destination
- :param src: An Image file
- :param dest: A path where :param src: is to be put.
+ :param image_src: An Image file
+ :param image_dest: A path where :param src: is to be put.
:param protect: If protect is true then the dest is protect (readonly etc)
The obj must exist on filesystem.
"""
@@ -26,8 +29,8 @@ class ImageStorageHandler(ABC):
def make_vm_image(self, image_path, path):
"""Copy image from src to dest
- :param src: A path
- :param dest: A path
+ :param image_path: A path
+ :param path: A path
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
"""
@@ -43,14 +46,17 @@ class ImageStorageHandler(ABC):
def delete_vm_image(self, path):
raise NotImplementedError()
- def execute_command(self, command, report=True):
+ def execute_command(self, command, report=True, error_origin=None):
+ if not error_origin:
+ error_origin = self.handler_name
+
command = list(map(str, command))
try:
- output = sp.check_output(command, stderr=sp.PIPE)
- except Exception as e:
+ sp.check_output(command, stderr=sp.PIPE)
+ except sp.CalledProcessError as e:
+ _stderr = e.stderr.decode("utf-8").strip()
if report:
- print(e)
- logger.exception(e)
+ logger.exception("%s:- %s", error_origin, _stderr)
return False
return True
@@ -65,12 +71,16 @@ class ImageStorageHandler(ABC):
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
+ handler_name = "Filesystem"
+
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
try:
shutil.copy(src, dest)
if protect:
- os.chmod(dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
+ os.chmod(
+ dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+ )
except Exception as e:
logger.exception(e)
return False
@@ -80,7 +90,7 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
src = join_path(self.image_base, src)
dest = join_path(self.vm_base, dest)
try:
- shutil.copy(src, dest)
+ shutil.copyfile(src, dest)
except Exception as e:
logger.exception(e)
return False
@@ -88,7 +98,14 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
def resize_vm_image(self, path, size):
path = join_path(self.vm_base, path)
- command = ["qemu-img", "resize", "-f", "raw", path, "{}M".format(size)]
+ command = [
+ "qemu-img",
+ "resize",
+ "-f",
+ "raw",
+ path,
+ "{}M".format(size),
+ ]
if self.execute_command(command):
return True
else:
@@ -117,17 +134,33 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
class CEPHBasedImageStorageHandler(ImageStorageHandler):
+ handler_name = "Ceph"
+
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
- command = ["rbd", "import", src, dest]
+ import_command = ["rbd", "import", src, dest]
+ commands = [import_command]
if protect:
- snap_create_command = ["rbd", "snap", "create", "{}@protected".format(dest)]
- snap_protect_command = ["rbd", "snap", "protect", "{}@protected".format(dest)]
+ snap_create_command = [
+ "rbd",
+ "snap",
+ "create",
+ "{}@protected".format(dest),
+ ]
+ snap_protect_command = [
+ "rbd",
+ "snap",
+ "protect",
+ "{}@protected".format(dest),
+ ]
+ commands.append(snap_create_command)
+ commands.append(snap_protect_command)
- return self.execute_command(command) and self.execute_command(snap_create_command) and\
- self.execute_command(snap_protect_command)
+ result = True
+ for command in commands:
+ result = result and self.execute_command(command)
- return self.execute_command(command)
+ return result
def make_vm_image(self, src, dest):
src = join_path(self.image_base, src)
@@ -156,3 +189,19 @@ class CEPHBasedImageStorageHandler(ImageStorageHandler):
path = join_path(self.vm_base, path)
command = ["rbd", "info", path]
return self.execute_command(command, report=False)
+
+
+def get_storage_handler():
+ __storage_backend = shared.shared.settings["storage"]["storage_backend"]
+ if __storage_backend == "filesystem":
+ return FileSystemBasedImageStorageHandler(
+ vm_base=shared.shared.settings["storage"]["vm_dir"],
+ image_base=shared.shared.settings["storage"]["image_dir"],
+ )
+ elif __storage_backend == "ceph":
+ return CEPHBasedImageStorageHandler(
+ vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
+ image_base=shared.shared.settings["storage"]["ceph_image_pool"],
+ )
+ else:
+ raise Exception("Unknown Image Storage Handler")
\ No newline at end of file
diff --git a/ucloud/common/vm.py b/uncloud/common/vm.py
similarity index 92%
rename from ucloud/common/vm.py
rename to uncloud/common/vm.py
index 0fb5cea..d11046d 100644
--- a/ucloud/common/vm.py
+++ b/uncloud/common/vm.py
@@ -12,8 +12,13 @@ class VMStatus:
error = "ERROR" # An error occurred that cannot be resolved automatically
-class VMEntry(SpecificEtcdEntryBase):
+def declare_stopped(vm):
+ vm["hostname"] = ""
+ vm["in_migration"] = False
+ vm["status"] = VMStatus.stopped
+
+class VMEntry(SpecificEtcdEntryBase):
def __init__(self, e):
self.owner = None # type: str
self.specs = None # type: dict
@@ -42,7 +47,9 @@ class VMEntry(SpecificEtcdEntryBase):
def add_log(self, msg):
self.log = self.log[:5]
- self.log.append("{} - {}".format(datetime.now().isoformat(), msg))
+ self.log.append(
+ "{} - {}".format(datetime.now().isoformat(), msg)
+ )
class VmPool:
diff --git a/ucloud/metadata/__init__.py b/uncloud/configure/__init__.py
similarity index 100%
rename from ucloud/metadata/__init__.py
rename to uncloud/configure/__init__.py
diff --git a/uncloud/configure/main.py b/uncloud/configure/main.py
new file mode 100644
index 0000000..87f5752
--- /dev/null
+++ b/uncloud/configure/main.py
@@ -0,0 +1,57 @@
+import os
+import argparse
+
+from uncloud.common.shared import shared
+
+arg_parser = argparse.ArgumentParser('configure', add_help=False)
+configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
+
+otp_parser = configure_subparsers.add_parser('otp')
+otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
+otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
+otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
+otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
+
+network_parser = configure_subparsers.add_parser('network')
+network_parser.add_argument('--prefix-length', required=True, type=int)
+network_parser.add_argument('--prefix', required=True)
+network_parser.add_argument('--vxlan-phy-dev', required=True)
+
+netbox_parser = configure_subparsers.add_parser('netbox')
+netbox_parser.add_argument('--url', required=True)
+netbox_parser.add_argument('--token', required=True)
+
+ssh_parser = configure_subparsers.add_parser('ssh')
+ssh_parser.add_argument('--username', default='root')
+ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
+
+storage_parser = configure_subparsers.add_parser('storage')
+storage_parser.add_argument('--file-dir', required=True)
+storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
+
+filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
+filesystem_storage_parser.add_argument('--vm-dir', required=True)
+filesystem_storage_parser.add_argument('--image-dir', required=True)
+
+ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
+ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
+ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
+
+
+def update_config(section, kwargs):
+ uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
+ if not uncloud_config:
+ uncloud_config = {}
+ else:
+ uncloud_config = uncloud_config.value
+
+ uncloud_config[section] = kwargs
+ shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
+
+
+def main(arguments):
+ subcommand = arguments['subcommand']
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ update_config(subcommand, arguments)
diff --git a/ucloud/filescanner/__init__.py b/uncloud/filescanner/__init__.py
similarity index 100%
rename from ucloud/filescanner/__init__.py
rename to uncloud/filescanner/__init__.py
diff --git a/uncloud/filescanner/main.py b/uncloud/filescanner/main.py
new file mode 100755
index 0000000..046f915
--- /dev/null
+++ b/uncloud/filescanner/main.py
@@ -0,0 +1,85 @@
+import glob
+import os
+import pathlib
+import subprocess as sp
+import time
+import argparse
+import bitmath
+
+from uuid import uuid4
+
+from . import logger
+from uncloud.common.shared import shared
+
+arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
+arg_parser.add_argument('--hostname', required=True)
+
+
+def sha512sum(file: str):
+ """Use sha512sum utility to compute sha512 sum of arg:file
+
+ IF arg:file does not exists:
+ raise FileNotFoundError exception
+ ELSE IF sum successfully computer:
+ return computed sha512 sum
+ ELSE:
+ return None
+ """
+ if not isinstance(file, str):
+ raise TypeError
+ try:
+ output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
+ except sp.CalledProcessError as e:
+ error = e.stderr.decode('utf-8')
+ if 'No such file or directory' in error:
+ raise FileNotFoundError from None
+ else:
+ output = output.decode('utf-8').strip()
+ output = output.split(' ')
+ return output[0]
+ return None
+
+
+def track_file(file, base_dir, host):
+ file_path = file.relative_to(base_dir)
+ file_str = str(file)
+ # Get Username
+ try:
+ owner = file_path.parts[0]
+ except IndexError:
+ pass
+ else:
+ file_path = file_path.relative_to(owner)
+ creation_date = time.ctime(os.stat(file_str).st_ctime)
+
+ entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
+ entry_value = {
+ 'filename': str(file_path),
+ 'owner': owner,
+ 'sha512sum': sha512sum(file_str),
+ 'creation_date': creation_date,
+ 'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
+ 'host': host
+ }
+
+ logger.info('Tracking %s', file_str)
+
+ shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
+
+
+def main(arguments):
+ hostname = arguments['hostname']
+ base_dir = shared.settings['storage']['file_dir']
+ # Recursively Get All Files and Folder below BASE_DIR
+ files = glob.glob('{}/**'.format(base_dir), recursive=True)
+ files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
+
+ # Files that are already tracked
+ tracked_files = [
+ pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
+ for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
+ if f.value['host'] == hostname
+ ]
+ untracked_files = set(files) - set(tracked_files)
+ for file in untracked_files:
+ track_file(file, base_dir, hostname)
diff --git a/ucloud/hack/README.org b/uncloud/hack/README.org
similarity index 100%
rename from ucloud/hack/README.org
rename to uncloud/hack/README.org
diff --git a/ucloud/network/__init__.py b/uncloud/hack/__init__.py
similarity index 100%
rename from ucloud/network/__init__.py
rename to uncloud/hack/__init__.py
diff --git a/ucloud/hack/conf.d/ucloud-host b/uncloud/hack/conf.d/ucloud-host
similarity index 100%
rename from ucloud/hack/conf.d/ucloud-host
rename to uncloud/hack/conf.d/ucloud-host
diff --git a/uncloud/hack/config.py b/uncloud/hack/config.py
new file mode 100644
index 0000000..7e2655d
--- /dev/null
+++ b/uncloud/hack/config.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+#
+#
+
+class Config(object):
+ def __init__(self, arguments):
+ """ read arguments dicts as a base """
+
+ self.arguments = arguments
+
+ # Split them so *etcd_args can be used and we can
+ # iterate over etcd_hosts
+ self.etcd_hosts = [ arguments['etcd_host'] ]
+ self.etcd_args = {
+ 'ca_cert': arguments['etcd_ca_cert'],
+ 'cert_cert': arguments['etcd_cert_cert'],
+ 'cert_key': arguments['etcd_cert_key'],
+# 'user': None,
+# 'password': None
+ }
+ self.etcd_prefix = '/nicohack/'
diff --git a/uncloud/hack/db.py b/uncloud/hack/db.py
new file mode 100644
index 0000000..cb5e490
--- /dev/null
+++ b/uncloud/hack/db.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+#
+#
+
+import etcd3
+import json
+import logging
+
+from functools import wraps
+from uncloud import UncloudException
+
+log = logging.getLogger(__name__)
+
+
+def readable_errors(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except etcd3.exceptions.ConnectionFailedError as e:
+ raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
+ except etcd3.exceptions.ConnectionTimeoutError as e:
+ raise UncloudException('etcd connection timeout. {}'.format(e))
+
+ return wrapper
+
+
+class DB(object):
+ def __init__(self, config, prefix="/"):
+ self.config = config
+
+ # Root for everything
+ self.base_prefix= '/nicohack'
+
+ # Can be set from outside
+ self.prefix = prefix
+
+ self.connect()
+
+ @readable_errors
+ def connect(self):
+ self._db_clients = []
+ for endpoint in self.config.etcd_hosts:
+ client = etcd3.client(host=endpoint, **self.config.etcd_args)
+ self._db_clients.append(client)
+
+ def realkey(self, key):
+ return "{}{}/{}".format(self.base_prefix,
+ self.prefix,
+ key)
+
+ @readable_errors
+ def get(self, key, as_json=False, **kwargs):
+ value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
+
+ if as_json:
+ value = json.loads(value)
+
+ return value
+
+
+ @readable_errors
+ def set(self, key, value, as_json=False, **kwargs):
+ if as_json:
+ value = json.dumps(value)
+
+ # FIXME: iterate over clients in case of failure ?
+ return self._db_clients[0].put(self.realkey(key), value, **kwargs)
+
+ @readable_errors
+ def increment(self, key, **kwargs):
+ print(self.realkey(key))
+
+
+ print("prelock")
+ lock = self._db_clients[0].lock('/nicohack/foo')
+ print("prelockacq")
+ lock.acquire()
+ print("prelockrelease")
+ lock.release()
+
+ with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
+ print("in lock")
+ pass
+
+# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
+# self.set(self.realkey(key), str(value + 1), **kwargs)
+
+
+if __name__ == '__main__':
+ endpoints = [ "https://etcd1.ungleich.ch:2379",
+ "https://etcd2.ungleich.ch:2379",
+ "https://etcd3.ungleich.ch:2379" ]
+
+ db = DB(url=endpoints)
diff --git a/uncloud/hack/hackcloud/.gitignore b/uncloud/hack/hackcloud/.gitignore
new file mode 100644
index 0000000..0ad647b
--- /dev/null
+++ b/uncloud/hack/hackcloud/.gitignore
@@ -0,0 +1,3 @@
+*.iso
+radvdpid
+foo
diff --git a/uncloud/hack/hackcloud/__init__.py b/uncloud/hack/hackcloud/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/uncloud/hack/hackcloud/__init__.py
@@ -0,0 +1 @@
+
diff --git a/uncloud/hack/hackcloud/etcd-client.sh b/uncloud/hack/hackcloud/etcd-client.sh
new file mode 100644
index 0000000..ab102a5
--- /dev/null
+++ b/uncloud/hack/hackcloud/etcd-client.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
+ --key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
+ --cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
+ --endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
diff --git a/uncloud/hack/hackcloud/ifdown.sh b/uncloud/hack/hackcloud/ifdown.sh
new file mode 100755
index 0000000..5753099
--- /dev/null
+++ b/uncloud/hack/hackcloud/ifdown.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+echo $@
diff --git a/uncloud/hack/hackcloud/ifup.sh b/uncloud/hack/hackcloud/ifup.sh
new file mode 100755
index 0000000..e0a3ca0
--- /dev/null
+++ b/uncloud/hack/hackcloud/ifup.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+dev=$1; shift
+
+# bridge is setup from outside
+ip link set dev "$dev" master ${bridge}
+ip link set dev "$dev" up
diff --git a/uncloud/hack/hackcloud/mac-last b/uncloud/hack/hackcloud/mac-last
new file mode 100644
index 0000000..8c5f254
--- /dev/null
+++ b/uncloud/hack/hackcloud/mac-last
@@ -0,0 +1 @@
+000000000252
diff --git a/uncloud/hack/hackcloud/mac-prefix b/uncloud/hack/hackcloud/mac-prefix
new file mode 100644
index 0000000..5084a2f
--- /dev/null
+++ b/uncloud/hack/hackcloud/mac-prefix
@@ -0,0 +1 @@
+02:00
diff --git a/uncloud/hack/hackcloud/net.sh b/uncloud/hack/hackcloud/net.sh
new file mode 100755
index 0000000..4e2bfa1
--- /dev/null
+++ b/uncloud/hack/hackcloud/net.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -x
+
+netid=100
+dev=wlp2s0
+dev=wlp0s20f3
+#dev=wlan0
+
+ip=2a0a:e5c1:111:888::48/64
+vxlandev=vxlan${netid}
+bridgedev=br${netid}
+
+ip -6 link add ${vxlandev} type vxlan \
+ id ${netid} \
+ dstport 4789 \
+ group ff05::${netid} \
+ dev ${dev} \
+ ttl 5
+
+ip link set ${vxlandev} up
+
+
+ip link add ${bridgedev} type bridge
+ip link set ${bridgedev} up
+
+ip link set ${vxlandev} master ${bridgedev} up
+
+ip addr add ${ip} dev ${bridgedev}
diff --git a/uncloud/hack/hackcloud/nftrules b/uncloud/hack/hackcloud/nftrules
new file mode 100644
index 0000000..636c63d
--- /dev/null
+++ b/uncloud/hack/hackcloud/nftrules
@@ -0,0 +1,31 @@
+flush ruleset
+
+table bridge filter {
+ chain prerouting {
+ type filter hook prerouting priority 0;
+ policy accept;
+
+ ibrname br100 jump br100
+ }
+
+ chain br100 {
+ # Allow all incoming traffic from outside
+ iifname vxlan100 accept
+
+ # Default blocks: router advertisements, dhcpv6, dhcpv4
+ icmpv6 type nd-router-advert drop
+ ip6 version 6 udp sport 547 drop
+ ip version 4 udp sport 67 drop
+
+ jump br100_vmlist
+ drop
+ }
+ chain br100_vmlist {
+ # VM1
+ iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
+
+ # VM2
+ iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
+ iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
+ }
+}
diff --git a/uncloud/hack/hackcloud/radvd.conf b/uncloud/hack/hackcloud/radvd.conf
new file mode 100644
index 0000000..3d8ce4d
--- /dev/null
+++ b/uncloud/hack/hackcloud/radvd.conf
@@ -0,0 +1,13 @@
+interface br100
+{
+ AdvSendAdvert on;
+ MinRtrAdvInterval 3;
+ MaxRtrAdvInterval 5;
+ AdvDefaultLifetime 3600;
+
+ prefix 2a0a:e5c1:111:888::/64 {
+ };
+
+ RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
+ DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
+};
diff --git a/uncloud/hack/hackcloud/radvd.sh b/uncloud/hack/hackcloud/radvd.sh
new file mode 100644
index 0000000..9d0e7d1
--- /dev/null
+++ b/uncloud/hack/hackcloud/radvd.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+radvd -C ./radvd.conf -n -p ./radvdpid
diff --git a/uncloud/hack/hackcloud/vm.sh b/uncloud/hack/hackcloud/vm.sh
new file mode 100755
index 0000000..dd9be84
--- /dev/null
+++ b/uncloud/hack/hackcloud/vm.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# if [ $# -ne 1 ]; then
+# echo "$0: owner"
+# exit 1
+# fi
+
+qemu=/usr/bin/qemu-system-x86_64
+
+accel=kvm
+#accel=tcg
+
+memory=1024
+cores=2
+uuid=$(uuidgen)
+mac=$(./mac-gen.py)
+owner=nico
+
+export bridge=br100
+
+set -x
+$qemu -name "uncloud-${uuid}" \
+ -machine pc,accel=${accel} \
+ -m ${memory} \
+ -smp ${cores} \
+ -uuid ${uuid} \
+ -drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
+ -netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
+ -device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}
diff --git a/uncloud/hack/mac.py b/uncloud/hack/mac.py
new file mode 100755
index 0000000..66286dd
--- /dev/null
+++ b/uncloud/hack/mac.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2012 Nico Schottelius (nico-cinv at schottelius.org)
+#
+# This file is part of cinv.
+#
+# cinv is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# cinv is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with cinv. If not, see .
+#
+#
+
+import argparse
+import logging
+import os.path
+import os
+import re
+import json
+
+from uncloud import UncloudException
+from uncloud.hack.db import DB
+
+log = logging.getLogger(__name__)
+
+
+class MAC(object):
+ def __init__(self, config):
+ self.config = config
+ self.no_db = self.config.arguments['no_db']
+ if not self.no_db:
+ self.db = DB(config, prefix="/mac")
+
+ self.prefix = 0x420000000000
+ self._number = 0 # Not set by default
+
+ @staticmethod
+ def validate_mac(mac):
+ if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
+ raise Error("Not a valid mac address: %s" % mac)
+
+ def last_used_index(self):
+ if not self.no_db:
+ value = self.db.get("last_used_index")
+ if not value:
+ self.db.set("last_used_index", "0")
+ value = self.db.get("last_used_index")
+
+ else:
+ value = "0"
+
+ return int(value)
+
+ def last_used_mac(self):
+ return self.int_to_mac(self.prefix + self.last_used_index())
+
+ def to_colon_format(self):
+ b = self._number.to_bytes(6, byteorder="big")
+ return ':'.join(format(s, '02x') for s in b)
+
+ def to_str_format(self):
+ b = self._number.to_bytes(6, byteorder="big")
+ return ''.join(format(s, '02x') for s in b)
+
+ def create(self):
+ last_number = self.last_used_index()
+
+ if last_number == int('0xffffffff', 16):
+ raise UncloudException("Exhausted all possible mac addresses - try to free some")
+
+ next_number = last_number + 1
+ self._number = self.prefix + next_number
+
+ #next_number_string = "{:012x}".format(next_number)
+ #next_mac = self.int_to_mac(next_mac_number)
+ # db_entry = {}
+ # db_entry['vm_uuid'] = vmuuid
+ # db_entry['index'] = next_number
+ # db_entry['mac_address'] = next_mac
+
+ # should be one transaction
+ # self.db.increment("last_used_index")
+ # self.db.set("used/{}".format(next_mac),
+ # db_entry, as_json=True)
+
+ def __int__(self):
+ return self._number
+
+ def __repr__(self):
+ return self.to_str_format()
+
+ def __str__(self):
+ return self.to_colon_format()
diff --git a/uncloud/hack/main.py b/uncloud/hack/main.py
new file mode 100644
index 0000000..9607ec2
--- /dev/null
+++ b/uncloud/hack/main.py
@@ -0,0 +1,92 @@
+import argparse
+import logging
+
+from uncloud.hack.vm import VM
+from uncloud.hack.config import Config
+from uncloud.hack.mac import MAC
+from uncloud.hack.net import VXLANBridge, DNSRA
+
+from uncloud import UncloudException
+
+arg_parser = argparse.ArgumentParser('hack', add_help=False)
+ #description="Commands that are unfinished - use at own risk")
+arg_parser.add_argument('--last-used-mac', action='store_true')
+arg_parser.add_argument('--get-new-mac', action='store_true')
+
+arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
+arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
+arg_parser.add_argument('--network', help="/64 IPv6 network")
+arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
+arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
+arg_parser.add_argument('--run-dns-ra', action='store_true',
+ help="Provide router advertisements and DNS resolution via dnsmasq")
+arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
+
+arg_parser.add_argument('--create-vm', action='store_true')
+arg_parser.add_argument('--destroy-vm', action='store_true')
+arg_parser.add_argument('--get-vm-status', action='store_true')
+arg_parser.add_argument('--get-vm-vnc', action='store_true')
+arg_parser.add_argument('--list-vms', action='store_true')
+arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int)
+arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int)
+arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
+arg_parser.add_argument('--uuid', help="VM UUID")
+
+arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
+arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
+
+
+log = logging.getLogger(__name__)
+
+
+def main(arguments):
+ config = Config(arguments)
+
+ if arguments['create_vm']:
+ vm = VM(config)
+ vm.create()
+
+ if arguments['destroy_vm']:
+ vm = VM(config)
+ vm.stop()
+
+ if arguments['get_vm_status']:
+ vm = VM(config)
+ vm.status()
+
+ if arguments['get_vm_vnc']:
+ vm = VM(config)
+ vm.vnc_addr()
+
+ if arguments['list_vms']:
+ vm = VM(config)
+ vm.list()
+
+ if arguments['last_used_mac']:
+ m = MAC(config)
+ print(m.last_used_mac())
+
+ if arguments['get_new_mac']:
+ print(MAC(config).get_next())
+
+ #if arguments['init_network']:
+ if arguments['create_vxlan']:
+ if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
+ raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
+ vb = VXLANBridge(vni=arguments['vni'],
+ route=arguments['network'],
+ uplinkdev=arguments['vxlan_uplink_device'],
+ use_sudo=arguments['use_sudo'])
+ vb._setup_vxlan()
+ vb._setup_bridge()
+ vb._add_vxlan_to_bridge()
+ vb._route_network()
+
+ if arguments['run_dns_ra']:
+ if not arguments['network'] or not arguments['vni']:
+ raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
+
+ dnsra = DNSRA(route=arguments['network'],
+ vni=arguments['vni'],
+ use_sudo=arguments['use_sudo'])
+ dnsra._setup_dnsmasq()
diff --git a/uncloud/hack/net.py b/uncloud/hack/net.py
new file mode 100644
index 0000000..f28ab7f
--- /dev/null
+++ b/uncloud/hack/net.py
@@ -0,0 +1,116 @@
+import subprocess
+import ipaddress
+import logging
+
+
+from uncloud import UncloudException
+
+log = logging.getLogger(__name__)
+
+
+class VXLANBridge(object):
+ cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
+ cmd_up_dev = "{sudo}ip link set {dev} up"
+ cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
+ cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
+ cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
+ cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
+
+ # VXLAN ids are at maximum 24 bit - use a /104
+ multicast_network = ipaddress.IPv6Network("ff05::/104")
+ max_vni = (2**24)-1
+
+ def __init__(self,
+ vni,
+ uplinkdev,
+ route=None,
+ use_sudo=False):
+ self.config = {}
+
+ if vni > self.max_vni:
+ raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
+
+ if use_sudo:
+ self.config['sudo'] = 'sudo '
+ else:
+ self.config['sudo'] = ''
+
+ self.config['vni_dec'] = vni
+ self.config['vni_hex'] = "{:x}".format(vni)
+ self.config['multicast_address'] = self.multicast_network[vni]
+
+ self.config['route_network'] = ipaddress.IPv6Network(route)
+ self.config['route'] = route
+
+ self.config['uplinkdev'] = uplinkdev
+ self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
+ self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
+
+
+ def setup_networking(self):
+ pass
+
+ def _setup_vxlan(self):
+ self._execute_cmd(self.cmd_create_vxlan)
+ self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
+
+ def _setup_bridge(self):
+ self._execute_cmd(self.cmd_create_bridge)
+ self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
+
+ def _route_network(self):
+ self._execute_cmd(self.cmd_add_route_dev)
+
+ def _add_vxlan_to_bridge(self):
+ self._execute_cmd(self.cmd_add_to_bridge)
+
+ def _execute_cmd(self, cmd_string, **kwargs):
+ cmd = cmd_string.format(**self.config, **kwargs)
+ log.info("Executing: {}".format(cmd))
+ subprocess.run(cmd.split())
+
+class ManagementBridge(VXLANBridge):
+ pass
+
+
+class DNSRA(object):
+ # VXLAN ids are at maximum 24 bit
+ max_vni = (2**24)-1
+
+
+ # Command to start dnsmasq
+ cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra"
+
+ def __init__(self,
+ vni,
+ route=None,
+ use_sudo=False):
+ self.config = {}
+
+ if vni > self.max_vni:
+ raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
+
+ if use_sudo:
+ self.config['sudo'] = 'sudo '
+ else:
+ self.config['sudo'] = ''
+
+ #TODO: remove if not needed
+ #self.config['vni_dec'] = vni
+ self.config['vni_hex'] = "{:x}".format(vni)
+
+ # dnsmasq only wants the network without the prefix, therefore, cut it off
+ self.config['route'] = ipaddress.IPv6Network(route).network_address
+ self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
+
+ def _setup_dnsmasq(self):
+ self._execute_cmd(self.cmd_start_dnsmasq)
+
+ def _execute_cmd(self, cmd_string, **kwargs):
+ cmd = cmd_string.format(**self.config, **kwargs)
+ log.info("Executing: {}".format(cmd))
+ print("Executing: {}".format(cmd))
+ subprocess.run(cmd.split())
+
+class Firewall(object):
+ pass
diff --git a/ucloud/hack/nftables.conf b/uncloud/hack/nftables.conf
similarity index 100%
rename from ucloud/hack/nftables.conf
rename to uncloud/hack/nftables.conf
diff --git a/ucloud/hack/rc-scripts/ucloud-api b/uncloud/hack/rc-scripts/ucloud-api
similarity index 100%
rename from ucloud/hack/rc-scripts/ucloud-api
rename to uncloud/hack/rc-scripts/ucloud-api
diff --git a/ucloud/hack/rc-scripts/ucloud-host b/uncloud/hack/rc-scripts/ucloud-host
similarity index 100%
rename from ucloud/hack/rc-scripts/ucloud-host
rename to uncloud/hack/rc-scripts/ucloud-host
diff --git a/ucloud/hack/rc-scripts/ucloud-metadata b/uncloud/hack/rc-scripts/ucloud-metadata
similarity index 100%
rename from ucloud/hack/rc-scripts/ucloud-metadata
rename to uncloud/hack/rc-scripts/ucloud-metadata
diff --git a/ucloud/hack/rc-scripts/ucloud-scheduler b/uncloud/hack/rc-scripts/ucloud-scheduler
similarity index 100%
rename from ucloud/hack/rc-scripts/ucloud-scheduler
rename to uncloud/hack/rc-scripts/ucloud-scheduler
diff --git a/uncloud/hack/uncloud-hack-init-host b/uncloud/hack/uncloud-hack-init-host
new file mode 100644
index 0000000..787ff80
--- /dev/null
+++ b/uncloud/hack/uncloud-hack-init-host
@@ -0,0 +1,26 @@
+id=100
+rawdev=eth0
+
+# create vxlan
+ip -6 link add vxlan${id} type vxlan \
+ id ${id} \
+ dstport 4789 \
+ group ff05::${id} \
+ dev ${rawdev} \
+ ttl 5
+
+ip link set vxlan${id} up
+
+# create bridge
+ip link set vxlan${id} up
+ip link set br${id} up
+
+# Add vxlan into bridge
+ip link set vxlan${id} master br${id}
+
+
+# useradd -m uncloud
+# [18:05] tablett.place10:~# id uncloud
+# uid=1000(uncloud) gid=1000(uncloud) groups=1000(uncloud),34(kvm),36(qemu)
+# apk add qemu-system-x86_64
+# also needs group netdev
diff --git a/uncloud/hack/uncloud-run-vm b/uncloud/hack/uncloud-run-vm
new file mode 100644
index 0000000..33e5860
--- /dev/null
+++ b/uncloud/hack/uncloud-run-vm
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+if [ $# -ne 1 ]; then
+ echo $0 vmid
+ exit 1
+fi
+
+id=$1; shift
+
+memory=512
+macaddress=02:00:b9:cb:70:${id}
+netname=net${id}-1
+
+qemu-system-x86_64 \
+ -name uncloud-${id} \
+ -accel kvm \
+ -m ${memory} \
+ -smp 2,sockets=2,cores=1,threads=1 \
+ -device virtio-net-pci,netdev=net0,mac=$macaddress \
+ -netdev tap,id=net0,ifname=${netname},script=no,downscript=no \
+ -vnc [::]:0
+
+# To be changed:
+# -vnc to unix path
+# or -spice
diff --git a/uncloud/hack/vm.py b/uncloud/hack/vm.py
new file mode 100755
index 0000000..ac403d8
--- /dev/null
+++ b/uncloud/hack/vm.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+
+# This module is directly called from the hack module, and can be used as follow:
+#
+# Create a new VM with default CPU/Memory. The path of the image file is relative to $hackprefix.
+# `uncloud hack --hackprefix /tmp/hackcloud --create-vm --image mysuperimage.qcow2`
+#
+# List running VMs (returns a list of UUIDs).
+# `uncloud hack --hackprefix /tmp/hackcloud --list-vms
+#
+# Get VM status:
+# `uncloud hack --hackprefix /tmp/hackcloud --get-vm-status --uuid my-vm-uuid`
+#
+# Stop a VM:
+# `uncloud hack --hackprefix /tmp/hackcloud --destroy-vm --uuid my-vm-uuid`
+# ``
+
+import subprocess
+import uuid
+import os
+import logging
+
+from uncloud.hack.db import DB
+from uncloud.hack.mac import MAC
+from uncloud.vmm import VMM
+
+log = logging.getLogger(__name__)
+log.setLevel(logging.DEBUG)
+
+class VM(object):
+ def __init__(self, config):
+ self.config = config
+
+ #TODO: Enable etcd lookup
+ self.no_db = self.config.arguments['no_db']
+ if not self.no_db:
+ self.db = DB(self.config, prefix="/vm")
+
+ # General CLI arguments.
+ self.hackprefix = self.config.arguments['hackprefix']
+ self.uuid = self.config.arguments['uuid']
+ self.memory = self.config.arguments['memory'] or '1024M'
+ self.cores = self.config.arguments['cores'] or 1
+ if self.config.arguments['image']:
+ self.image = os.path.join(self.hackprefix, self.config.arguments['image'])
+ else:
+ self.image = None
+
+ # External components.
+ self.vmm = VMM(vmm_backend=self.hackprefix)
+ self.mac = MAC(self.config)
+
+ # Harcoded & generated values.
+ self.owner = 'uncoud'
+ self.image_format='qcow2'
+ self.accel = 'kvm'
+ self.threads = 1
+ self.ifup = os.path.join(self.hackprefix, "ifup.sh")
+ self.ifdown = os.path.join(self.hackprefix, "ifdown.sh")
+ self.ifname = "uc{}".format(self.mac.to_str_format())
+
+ def get_qemu_args(self):
+ command = (
+ "-name {owner}-{name}"
+ " -machine pc,accel={accel}"
+ " -drive file={image},format={image_format},if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname}"
+ " -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
+ ).format(
+ owner=self.owner, name=self.uuid,
+ accel=self.accel,
+ image=self.image, image_format=self.image_format,
+ memory=self.memory, cores=self.cores, threads=self.threads,
+ ifup=self.ifup, ifdown=self.ifdown, ifname=self.ifname,
+ mac=self.mac
+ )
+
+ return command.split(" ")
+
+ def create(self):
+ # New VM: new UUID, new MAC.
+ self.uuid = str(uuid.uuid4())
+ self.mac.create()
+
+ qemu_args = self.get_qemu_args()
+ log.debug("QEMU args passed to VMM: {}".format(qemu_args))
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=False,
+ *qemu_args
+ )
+
+ def stop(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ self.vmm.stop(self.uuid)
+
+ def status(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ print(self.vmm.get_status(self.uuid))
+
+ def vnc_addr(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ print(self.vmm.get_vnc(self.uuid))
+
+ def list(self):
+ print(self.vmm.discover())
+
diff --git a/ucloud/host/__init__.py b/uncloud/host/__init__.py
similarity index 100%
rename from ucloud/host/__init__.py
rename to uncloud/host/__init__.py
diff --git a/uncloud/host/main.py b/uncloud/host/main.py
new file mode 100755
index 0000000..f680991
--- /dev/null
+++ b/uncloud/host/main.py
@@ -0,0 +1,123 @@
+import argparse
+import multiprocessing as mp
+import time
+
+from uuid import uuid4
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.shared import shared
+from uncloud.common.vm import VMStatus
+from uncloud.vmm import VMM
+from os.path import join as join_path
+
+from . import virtualmachine, logger
+
+arg_parser = argparse.ArgumentParser('host', add_help=False)
+arg_parser.add_argument('--hostname', required=True)
+
+
+def update_heartbeat(hostname):
+ """Update Last HeartBeat Time for :param hostname: in etcd"""
+ host_pool = shared.host_pool
+ this_host = next(
+ filter(lambda h: h.hostname == hostname, host_pool.hosts), None
+ )
+ while True:
+ this_host.update_heartbeat()
+ host_pool.put(this_host)
+ time.sleep(10)
+
+
+def maintenance(host):
+ vmm = VMM()
+ running_vms = vmm.discover()
+ for vm_uuid in running_vms:
+ if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == 'running':
+ logger.debug('VM {} is running on {}'.format(vm_uuid, host))
+ vm = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
+ )
+ vm.status = VMStatus.running
+ vm.vnc_socket = vmm.get_vnc(vm_uuid)
+ vm.hostname = host
+ shared.vm_pool.put(vm)
+
+
+def main(arguments):
+ hostname = arguments['hostname']
+ host_pool = shared.host_pool
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+
+ # Does not yet exist, create it
+ if not host:
+ host_key = join_path(
+ shared.settings['etcd']['host_prefix'], uuid4().hex
+ )
+ host_entry = {
+ 'specs': '',
+ 'hostname': hostname,
+ 'status': 'DEAD',
+ 'last_heartbeat': '',
+ }
+ shared.etcd_client.put(
+ host_key, host_entry, value_in_json=True
+ )
+
+ # update, get ourselves now for sure
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+
+ try:
+ heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
+ heartbeat_updating_process.start()
+ except Exception as e:
+ raise Exception('uncloud-host heartbeat updating mechanism is not working') from e
+
+ # The below while True is neccessary for gracefully handling leadership transfer and temporary
+ # unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
+ # iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
+ # which ends the loop immediately. So, having it inside infinite loop we try again and again to
+ # get prefix until either success or deamon death comes.
+ while True:
+ for events_iterator in [
+ shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False)
+ ]:
+ for request_event in events_iterator:
+ request_event = RequestEntry(request_event)
+
+ maintenance(host.key)
+
+ if request_event.hostname == host.key:
+ logger.debug('VM Request: %s on Host %s', request_event, host.hostname)
+
+ shared.request_pool.client.client.delete(request_event.key)
+ vm_entry = shared.etcd_client.get(
+ join_path(shared.settings['etcd']['vm_prefix'], request_event.uuid)
+ )
+
+ logger.debug('VM hostname: {}'.format(vm_entry.value))
+
+ vm = virtualmachine.VM(vm_entry)
+ if request_event.type == RequestType.StartVM:
+ vm.start()
+
+ elif request_event.type == RequestType.StopVM:
+ vm.stop()
+
+ elif request_event.type == RequestType.DeleteVM:
+ vm.delete()
+
+ elif request_event.type == RequestType.InitVMMigration:
+ vm.start(destination_host_key=host.key)
+
+ elif request_event.type == RequestType.TransferVM:
+ destination_host = host_pool.get(request_event.destination_host_key)
+ if destination_host:
+ vm.migrate(
+ destination_host=destination_host.hostname,
+ destination_sock_path=request_event.destination_sock_path,
+ )
+ else:
+ logger.error('Host %s not found!', request_event.destination_host_key)
diff --git a/uncloud/host/virtualmachine.py b/uncloud/host/virtualmachine.py
new file mode 100755
index 0000000..a592efc
--- /dev/null
+++ b/uncloud/host/virtualmachine.py
@@ -0,0 +1,303 @@
+# QEMU Manual
+# https://qemu.weilnetz.de/doc/qemu-doc.html
+
+# For QEMU Monitor Protocol Commands Information, See
+# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
+
+import os
+import subprocess as sp
+import ipaddress
+
+from string import Template
+from os.path import join as join_path
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.vm import VMStatus, declare_stopped
+from uncloud.common.network import create_dev, delete_network_interface
+from uncloud.common.schemas import VMSchema, NetworkSchema
+from uncloud.host import logger
+from uncloud.common.shared import shared
+from uncloud.vmm import VMM
+
+from marshmallow import ValidationError
+
+
+class VM:
+ def __init__(self, vm_entry):
+ self.schema = VMSchema()
+ self.vmm = VMM()
+ self.key = vm_entry.key
+ try:
+ self.vm = self.schema.loads(vm_entry.value)
+ except ValidationError:
+ logger.exception(
+ "Couldn't validate VM Entry", vm_entry.value
+ )
+ self.vm = None
+ else:
+ self.uuid = vm_entry.key.split("/")[-1]
+ self.host_key = self.vm["hostname"]
+ logger.debug('VM Hostname {}'.format(self.host_key))
+
+ def get_qemu_args(self):
+ command = (
+ "-drive file={file},format=raw,if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -name {owner}_{name}"
+ ).format(
+ owner=self.vm["owner"],
+ name=self.vm["name"],
+ memory=int(self.vm["specs"]["ram"].to_MB()),
+ cores=self.vm["specs"]["cpu"],
+ threads=1,
+ file=shared.storage_handler.qemu_path_string(self.uuid),
+ )
+
+ return command.split(" ")
+
+ def start(self, destination_host_key=None):
+ migration = False
+ if destination_host_key:
+ migration = True
+
+ self.create()
+ try:
+ network_args = self.create_network_dev()
+ except Exception as err:
+ declare_stopped(self.vm)
+ self.vm["log"].append("Cannot Setup Network Properly")
+ logger.error("Cannot Setup Network Properly for vm %s", self.uuid, exc_info=err)
+ else:
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=migration,
+ *self.get_qemu_args(),
+ *network_args
+ )
+
+ status = self.vmm.get_status(self.uuid)
+ logger.debug('VM {} status is {}'.format(self.uuid, status))
+ if status == "running":
+ self.vm["status"] = VMStatus.running
+ self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
+ elif status == "inmigrate":
+ r = RequestEntry.from_scratch(
+ type=RequestType.TransferVM, # Transfer VM
+ hostname=self.host_key, # Which VM should get this request. It is source host
+ uuid=self.uuid, # uuid of VM
+ destination_sock_path=join_path(
+ self.vmm.socket_dir, self.uuid
+ ),
+ destination_host_key=destination_host_key, # Where source host transfer VM
+ request_prefix=shared.settings["etcd"]["request_prefix"],
+ )
+ shared.request_pool.put(r)
+ else:
+ self.stop()
+ declare_stopped(self.vm)
+ logger.debug('VM {} has hostname {}'.format(self.uuid, self.vm['hostname']))
+ self.sync()
+
+ def stop(self):
+ self.vmm.stop(self.uuid)
+ self.delete_network_dev()
+ declare_stopped(self.vm)
+ self.sync()
+
+ def migrate(self, destination_host, destination_sock_path):
+ self.vmm.transfer(
+ src_uuid=self.uuid,
+ destination_sock_path=destination_sock_path,
+ host=destination_host,
+ )
+
+ def create_network_dev(self):
+ command = ""
+ for network_mac_and_tap in self.vm["network"]:
+ network_name, mac, tap = network_mac_and_tap
+
+ _key = os.path.join(
+ shared.settings["etcd"]["network_prefix"],
+ self.vm["owner"],
+ network_name,
+ )
+ network = shared.etcd_client.get(_key, value_in_json=True)
+ network_schema = NetworkSchema()
+ try:
+ network = network_schema.load(network.value)
+ except ValidationError:
+ continue
+
+ if network["type"] == "vxlan":
+ tap = create_vxlan_br_tap(
+ _id=network["id"],
+ _dev=shared.settings["network"]["vxlan_phy_dev"],
+ tap_id=tap,
+ ip=network["ipv6"],
+ )
+
+ all_networks = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["network_prefix"],
+ value_in_json=True,
+ )
+
+ if ipaddress.ip_network(network["ipv6"]).is_global:
+ update_radvd_conf(all_networks)
+
+ command += (
+ "-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
+ " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
+ tap=tap, net_id=network["id"], mac=mac
+ )
+ )
+
+ if command:
+ command = command.split(' ')
+
+ return command
+
+ def delete_network_dev(self):
+ try:
+ for network in self.vm["network"]:
+ network_name = network[0]
+ _ = network[1] # tap_mac
+ tap_id = network[2]
+
+ delete_network_interface("tap{}".format(tap_id))
+
+ owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
+ owners_running_vms = shared.vm_pool.by_status(
+ VMStatus.running, _vms=owners_vms
+ )
+
+ networks = map(
+ lambda n: n[0],
+ map(lambda vm: vm.network, owners_running_vms),
+ )
+ networks_in_use_by_user_vms = [vm[0] for vm in networks]
+ if network_name not in networks_in_use_by_user_vms:
+ network_entry = resolve_network(
+ network[0], self.vm["owner"]
+ )
+ if network_entry:
+ network_type = network_entry.value["type"]
+ network_id = network_entry.value["id"]
+ if network_type == "vxlan":
+ delete_network_interface(
+ "br{}".format(network_id)
+ )
+ delete_network_interface(
+ "vxlan{}".format(network_id)
+ )
+ except Exception:
+ logger.exception("Exception in network interface deletion")
+
+ def create(self):
+ if shared.storage_handler.is_vm_image_exists(self.uuid):
+ # File Already exists. No Problem Continue
+ logger.debug("Image for vm %s exists", self.uuid)
+ else:
+ if shared.storage_handler.make_vm_image(
+ src=self.vm["image_uuid"], dest=self.uuid
+ ):
+ if not shared.storage_handler.resize_vm_image(
+ path=self.uuid,
+ size=int(self.vm["specs"]["os-ssd"].to_MB()),
+ ):
+ self.vm["status"] = VMStatus.error
+ else:
+ logger.info("New VM Created")
+
+ def sync(self):
+ shared.etcd_client.put(
+ self.key, self.schema.dump(self.vm), value_in_json=True
+ )
+
+ def delete(self):
+ self.stop()
+
+ if shared.storage_handler.is_vm_image_exists(self.uuid):
+ r_status = shared.storage_handler.delete_vm_image(self.uuid)
+ if r_status:
+ shared.etcd_client.client.delete(self.key)
+ else:
+ shared.etcd_client.client.delete(self.key)
+
+
+def resolve_network(network_name, network_owner):
+ network = shared.etcd_client.get(
+ join_path(
+ shared.settings["etcd"]["network_prefix"],
+ network_owner,
+ network_name,
+ ),
+ value_in_json=True,
+ )
+ return network
+
+
+def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
+ network_script_base = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), "network"
+ )
+ vxlan = create_dev(
+ script=os.path.join(network_script_base, "create-vxlan.sh"),
+ _id=_id,
+ dev=_dev,
+ )
+ if vxlan:
+ bridge = create_dev(
+ script=os.path.join(
+ network_script_base, "create-bridge.sh"
+ ),
+ _id=_id,
+ dev=vxlan,
+ ip=ip,
+ )
+ if bridge:
+ tap = create_dev(
+ script=os.path.join(
+ network_script_base, "create-tap.sh"
+ ),
+ _id=str(tap_id),
+ dev=bridge,
+ )
+ if tap:
+ return tap
+
+
+def update_radvd_conf(all_networks):
+ network_script_base = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), "network"
+ )
+
+ networks = {
+ net.value["ipv6"]: net.value["id"]
+ for net in all_networks
+ if net.value.get("ipv6")
+ and ipaddress.ip_network(net.value.get("ipv6")).is_global
+ }
+ radvd_template = open(
+ os.path.join(network_script_base, "radvd-template.conf"), "r"
+ ).read()
+ radvd_template = Template(radvd_template)
+
+ content = [
+ radvd_template.safe_substitute(
+ bridge="br{}".format(networks[net]), prefix=net
+ )
+ for net in networks
+ if networks.get(net)
+ ]
+ with open("/etc/radvd.conf", "w") as radvd_conf:
+ radvd_conf.writelines(content)
+ try:
+ sp.check_output(["systemctl", "restart", "radvd"])
+ except sp.CalledProcessError:
+ try:
+ sp.check_output(["service", "radvd", "restart"])
+ except sp.CalledProcessError as err:
+ raise err.__class__(
+ "Cannot start/restart radvd service", err.cmd
+ ) from err
diff --git a/ucloud/imagescanner/__init__.py b/uncloud/imagescanner/__init__.py
similarity index 100%
rename from ucloud/imagescanner/__init__.py
rename to uncloud/imagescanner/__init__.py
diff --git a/uncloud/imagescanner/main.py b/uncloud/imagescanner/main.py
new file mode 100755
index 0000000..ee9da2e
--- /dev/null
+++ b/uncloud/imagescanner/main.py
@@ -0,0 +1,121 @@
+import json
+import os
+import argparse
+import subprocess as sp
+
+from os.path import join as join_path
+from uncloud.common.shared import shared
+from uncloud.imagescanner import logger
+
+
+arg_parser = argparse.ArgumentParser('imagescanner', add_help=False)
+
+
+def qemu_img_type(path):
+ qemu_img_info_command = [
+ "qemu-img",
+ "info",
+ "--output",
+ "json",
+ path,
+ ]
+ try:
+ qemu_img_info = sp.check_output(qemu_img_info_command)
+ except Exception as e:
+ logger.exception(e)
+ return None
+ else:
+ qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
+ return qemu_img_info["format"]
+
+
+def main(arguments):
+ # We want to get images entries that requests images to be created
+ images = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["image_prefix"], value_in_json=True
+ )
+ images_to_be_created = list(
+ filter(lambda im: im.value["status"] == "TO_BE_CREATED", images)
+ )
+
+ for image in images_to_be_created:
+ try:
+ image_uuid = image.key.split("/")[-1]
+ image_owner = image.value["owner"]
+ image_filename = image.value["filename"]
+ image_store_name = image.value["store_name"]
+ image_full_path = join_path(
+ shared.settings["storage"]["file_dir"],
+ image_owner,
+ image_filename,
+ )
+
+ image_stores = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["image_store_prefix"],
+ value_in_json=True,
+ )
+ user_image_store = next(
+ filter(
+ lambda s, store_name=image_store_name: s.value[
+ "name"
+ ]
+ == store_name,
+ image_stores,
+ )
+ )
+
+ image_store_pool = user_image_store.value["attributes"][
+ "pool"
+ ]
+
+ except Exception as e:
+ logger.exception(e)
+ else:
+ # At least our basic data is available
+ qemu_img_convert_command = [
+ "qemu-img",
+ "convert",
+ "-f",
+ "qcow2",
+ "-O",
+ "raw",
+ image_full_path,
+ "image.raw",
+ ]
+
+ if qemu_img_type(image_full_path) == "qcow2":
+ try:
+ # Convert .qcow2 to .raw
+ sp.check_output(qemu_img_convert_command,)
+
+ except sp.CalledProcessError:
+ logger.exception(
+ "Image convertion from .qcow2 to .raw failed."
+ )
+ else:
+ # Import and Protect
+ r_status = shared.storage_handler.import_image(
+ src="image.raw", dest=image_uuid, protect=True
+ )
+ if r_status:
+ # Everything is successfully done
+ image.value["status"] = "CREATED"
+ shared.etcd_client.put(
+ image.key, json.dumps(image.value)
+ )
+ finally:
+ try:
+ os.remove("image.raw")
+ except Exception:
+ pass
+
+ else:
+ # The user provided image is either not found or of invalid format
+ image.value["status"] = "INVALID_IMAGE"
+ shared.etcd_client.put(
+ image.key, json.dumps(image.value)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/uncloud/metadata/__init__.py b/uncloud/metadata/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/uncloud/metadata/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/uncloud/metadata/main.py b/uncloud/metadata/main.py
new file mode 100644
index 0000000..374260e
--- /dev/null
+++ b/uncloud/metadata/main.py
@@ -0,0 +1,95 @@
+import os
+import argparse
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+from werkzeug.exceptions import HTTPException
+
+from uncloud.common.shared import shared
+
+app = Flask(__name__)
+api = Api(app)
+
+app.logger.handlers.clear()
+
+DEFAULT_PORT=1234
+
+arg_parser = argparse.ArgumentParser('metadata', add_help=False)
+arg_parser.add_argument('--port', '-p', default=DEFAULT_PORT, help='By default bind to port {}'.format(DEFAULT_PORT))
+
+
+@app.errorhandler(Exception)
+def handle_exception(e):
+ app.logger.error(e)
+ # pass through HTTP errors
+ if isinstance(e, HTTPException):
+ return e
+
+ # now you're handling non-HTTP exceptions only
+ return {"message": "Server Error"}, 500
+
+
+def get_vm_entry(mac_addr):
+ return next(
+ filter(
+ lambda vm: mac_addr in list(zip(*vm.network))[1],
+ shared.vm_pool.vms,
+ ),
+ None,
+ )
+
+
+# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
+def ipv62mac(ipv6):
+ # remove subnet info if given
+ subnet_index = ipv6.find("/")
+ if subnet_index != -1:
+ ipv6 = ipv6[:subnet_index]
+
+ ipv6_parts = ipv6.split(":")
+ mac_parts = list()
+ for ipv6_part in ipv6_parts[-4:]:
+ while len(ipv6_part) < 4:
+ ipv6_part = "0" + ipv6_part
+ mac_parts.append(ipv6_part[:2])
+ mac_parts.append(ipv6_part[-2:])
+
+ # modify parts to match MAC value
+ mac_parts[0] = "%02x" % (int(mac_parts[0], 16) ^ 2)
+ del mac_parts[4]
+ del mac_parts[3]
+ return ":".join(mac_parts)
+
+
+class Root(Resource):
+ @staticmethod
+ def get():
+ data = get_vm_entry(ipv62mac(request.remote_addr))
+
+ if not data:
+ return (
+ {"message": "Metadata for such VM does not exists."},
+ 404,
+ )
+ else:
+ etcd_key = os.path.join(
+ shared.settings["etcd"]["user_prefix"],
+ data.value["owner_realm"],
+ data.value["owner"],
+ "key",
+ )
+ etcd_entry = shared.etcd_client.get_prefix(
+ etcd_key, value_in_json=True
+ )
+ user_personal_ssh_keys = [key.value for key in etcd_entry]
+ data.value["metadata"]["ssh-keys"] += user_personal_ssh_keys
+ return data.value["metadata"], 200
+
+
+api.add_resource(Root, "/")
+
+
+def main(arguments):
+ port = arguments['port']
+ debug = arguments['debug']
+ app.run(debug=debug, host="::", port=port)
diff --git a/ucloud/network/README b/uncloud/network/README
similarity index 100%
rename from ucloud/network/README
rename to uncloud/network/README
diff --git a/ucloud/scheduler/tests/__init__.py b/uncloud/network/__init__.py
similarity index 100%
rename from ucloud/scheduler/tests/__init__.py
rename to uncloud/network/__init__.py
diff --git a/ucloud/network/create-bridge.sh b/uncloud/network/create-bridge.sh
similarity index 100%
rename from ucloud/network/create-bridge.sh
rename to uncloud/network/create-bridge.sh
diff --git a/ucloud/network/create-tap.sh b/uncloud/network/create-tap.sh
similarity index 100%
rename from ucloud/network/create-tap.sh
rename to uncloud/network/create-tap.sh
diff --git a/ucloud/network/create-vxlan.sh b/uncloud/network/create-vxlan.sh
similarity index 100%
rename from ucloud/network/create-vxlan.sh
rename to uncloud/network/create-vxlan.sh
diff --git a/ucloud/network/radvd-template.conf b/uncloud/network/radvd-template.conf
similarity index 100%
rename from ucloud/network/radvd-template.conf
rename to uncloud/network/radvd-template.conf
diff --git a/uncloud/oneshot/__init__.py b/uncloud/oneshot/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/uncloud/oneshot/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/uncloud/oneshot/main.py b/uncloud/oneshot/main.py
new file mode 100644
index 0000000..5b9b61c
--- /dev/null
+++ b/uncloud/oneshot/main.py
@@ -0,0 +1,123 @@
+import argparse
+import os
+
+
+from pathlib import Path
+from uncloud.vmm import VMM
+from uncloud.host.virtualmachine import update_radvd_conf, create_vxlan_br_tap
+
+from . import virtualmachine, logger
+
+###
+# Argument parser loaded by scripts/uncloud.
+arg_parser = argparse.ArgumentParser('oneshot', add_help=False)
+
+# Actions.
+arg_parser.add_argument('--list', action='store_true',
+ help='list UUID and name of running VMs')
+arg_parser.add_argument('--start', nargs=3,
+ metavar=('IMAGE', 'UPSTREAM_INTERFACE', 'NETWORK'),
+ help='start a VM using the OS IMAGE (full path), configuring networking on NETWORK IPv6 prefix')
+arg_parser.add_argument('--stop', metavar='UUID',
+ help='stop a VM')
+arg_parser.add_argument('--get-status', metavar='UUID',
+ help='return the status of the VM')
+arg_parser.add_argument('--get-vnc', metavar='UUID',
+ help='return the path of the VNC socket of the VM')
+arg_parser.add_argument('--reconfigure-radvd', metavar='NETWORK',
+ help='regenerate and reload RADVD configuration for NETWORK IPv6 prefix')
+
+# Arguments.
+arg_parser.add_argument('--workdir', default=Path.home(),
+ help='Working directory, defaulting to $HOME')
+arg_parser.add_argument('--mac',
+ help='MAC address of the VM to create (--start)')
+arg_parser.add_argument('--memory', type=int,
+ help='Memory (MB) to allocate (--start)')
+arg_parser.add_argument('--cores', type=int,
+ help='Number of cores to allocate (--start)')
+arg_parser.add_argument('--threads', type=int,
+ help='Number of threads to allocate (--start)')
+arg_parser.add_argument('--image-format', choices=['raw', 'qcow2'],
+ help='Format of OS image (--start)')
+arg_parser.add_argument('--accel', choices=['kvm', 'tcg'], default='kvm',
+ help='QEMU acceleration to use (--start)')
+arg_parser.add_argument('--upstream-interface', default='eth0',
+ help='Name of upstream interface (--start)')
+
+###
+# Helpers.
+
+# XXX: check if it is possible to use the type returned by ETCD queries.
+class UncloudEntryWrapper:
+ def __init__(self, value):
+ self.value = value
+
+ def value(self):
+ return self.value
+
+def status_line(vm):
+ return "VM: {} {} {}".format(vm.get_uuid(), vm.get_name(), vm.get_status())
+
+###
+# Entrypoint.
+
+def main(arguments):
+ # Initialize VMM.
+ workdir = arguments['workdir']
+ vmm = VMM(vmm_backend=workdir)
+
+ # Harcoded debug values.
+ net_id = 0
+
+ # Build VM configuration.
+ vm_config = {}
+ vm_options = [
+ 'mac', 'memory', 'cores', 'threads', 'image', 'image_format',
+ '--upstream_interface', 'upstream_interface', 'network', 'accel'
+ ]
+ for option in vm_options:
+ if arguments.get(option):
+ vm_config[option] = arguments[option]
+
+ vm_config['net_id'] = net_id
+
+ # Execute requested VM action.
+ if arguments['reconfigure_radvd']:
+ # TODO: check that RADVD is available.
+ prefix = arguments['reconfigure_radvd']
+ network = UncloudEntryWrapper({
+ 'id': net_id,
+ 'ipv6': prefix
+ })
+
+ # Make use of uncloud.host.virtualmachine for network configuration.
+ update_radvd_conf([network])
+ elif arguments['start']:
+ # Extract from --start positional arguments. Quite fragile.
+ vm_config['image'] = arguments['start'][0]
+ vm_config['network'] = arguments['start'][1]
+ vm_config['upstream_interface'] = arguments['start'][2]
+
+ vm_config['tap_interface'] = "uc{}".format(len(vmm.discover()))
+ vm = virtualmachine.VM(vmm, vm_config)
+ vm.start()
+ elif arguments['stop']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['stop']})
+ vm = virtualmachine.VM(vmm, vm_config)
+ vm.stop()
+ elif arguments['get_status']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['get_status']})
+ print(status_line(vm))
+ elif arguments['get_vnc']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['get_vnc']})
+ print(vm.get_vnc_addr())
+ elif arguments['list']:
+ vms = vmm.discover()
+ print("Found {} VMs.".format(len(vms)))
+ for uuid in vms:
+ vm = virtualmachine.VM(vmm, {'uuid': uuid})
+ print(status_line(vm))
+ else:
+ print('Please specify an action: --start, --stop, --list,\
+--get-status, --get-vnc, --reconfigure-radvd')
diff --git a/uncloud/oneshot/virtualmachine.py b/uncloud/oneshot/virtualmachine.py
new file mode 100644
index 0000000..c8c2909
--- /dev/null
+++ b/uncloud/oneshot/virtualmachine.py
@@ -0,0 +1,81 @@
+import uuid
+import os
+
+from uncloud.host.virtualmachine import create_vxlan_br_tap
+from uncloud.oneshot import logger
+
+class VM(object):
+ def __init__(self, vmm, config):
+ self.config = config
+ self.vmm = vmm
+
+ # Extract VM specs/metadata from configuration.
+ self.name = config.get('name', 'no-name')
+ self.memory = config.get('memory', 1024)
+ self.cores = config.get('cores', 1)
+ self.threads = config.get('threads', 1)
+ self.image_format = config.get('image_format', 'qcow2')
+ self.image = config.get('image')
+ self.uuid = config.get('uuid', str(uuid.uuid4()))
+ self.mac = config.get('mac')
+ self.accel = config.get('accel', 'kvm')
+
+ self.net_id = config.get('net_id', 0)
+ self.upstream_interface = config.get('upstream_interface', 'eth0')
+ self.tap_interface = config.get('tap_interface', 'uc0')
+ self.network = config.get('network')
+
+ def get_qemu_args(self):
+ command = (
+ "-uuid {uuid} -name {name} -machine pc,accel={accel}"
+ " -drive file={image},format={image_format},if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
+ " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}"
+ ).format(
+ uuid=self.uuid, name=self.name, accel=self.accel,
+ image=self.image, image_format=self.image_format,
+ memory=self.memory, cores=self.cores, threads=self.threads,
+ net_id=self.net_id, tap=self.tap_interface, mac=self.mac
+ )
+
+ return command.split(" ")
+
+ def start(self):
+ # Check that VM image is available.
+ if not os.path.isfile(self.image):
+ logger.error("Image {} does not exist. Aborting.".format(self.image))
+
+ # Create Bridge, VXLAN and tap interface for VM.
+ create_vxlan_br_tap(
+ self.net_id, self.upstream_interface, self.tap_interface, self.network
+ )
+
+ # Generate config for and run QEMU.
+ qemu_args = self.get_qemu_args()
+ logger.debug("QEMU args for VM {}: {}".format(self.uuid, qemu_args))
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=False,
+ *qemu_args
+ )
+
+ def stop(self):
+ self.vmm.stop(self.uuid)
+
+ def get_status(self):
+ return self.vmm.get_status(self.uuid)
+
+ def get_vnc_addr(self):
+ return self.vmm.get_vnc(self.uuid)
+
+ def get_uuid(self):
+ return self.uuid
+
+ def get_name(self):
+ success, json = self.vmm.execute_command(uuid, 'query-name')
+ if success:
+ return json['return']['name']
+
+ return None
diff --git a/uncloud/scheduler/__init__.py b/uncloud/scheduler/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/uncloud/scheduler/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/ucloud/scheduler/helper.py b/uncloud/scheduler/helper.py
similarity index 50%
rename from ucloud/scheduler/helper.py
rename to uncloud/scheduler/helper.py
index ba577d6..79db322 100755
--- a/ucloud/scheduler/helper.py
+++ b/uncloud/scheduler/helper.py
@@ -3,10 +3,10 @@ from functools import reduce
import bitmath
-from ucloud.common.host import HostStatus
-from ucloud.common.request import RequestEntry, RequestType
-from ucloud.common.vm import VMStatus
-from ucloud.config import vm_pool, host_pool, request_pool, env_vars
+from uncloud.common.host import HostStatus
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.vm import VMStatus
+from uncloud.common.shared import shared
def accumulated_specs(vms_specs):
@@ -23,17 +23,35 @@ def remaining_resources(host_specs, vms_specs):
for component in _vms_specs:
if isinstance(_vms_specs[component], str):
- _vms_specs[component] = int(bitmath.parse_string_unsafe(_vms_specs[component]).to_MB())
+ _vms_specs[component] = int(
+ bitmath.parse_string_unsafe(
+ _vms_specs[component]
+ ).to_MB()
+ )
elif isinstance(_vms_specs[component], list):
- _vms_specs[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _vms_specs[component])
- _vms_specs[component] = reduce(lambda x, y: x + y, _vms_specs[component], 0)
+ _vms_specs[component] = map(
+ lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
+ _vms_specs[component],
+ )
+ _vms_specs[component] = reduce(
+ lambda x, y: x + y, _vms_specs[component], 0
+ )
for component in _remaining:
if isinstance(_remaining[component], str):
- _remaining[component] = int(bitmath.parse_string_unsafe(_remaining[component]).to_MB())
+ _remaining[component] = int(
+ bitmath.parse_string_unsafe(
+ _remaining[component]
+ ).to_MB()
+ )
elif isinstance(_remaining[component], list):
- _remaining[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _remaining[component])
- _remaining[component] = reduce(lambda x, y: x + y, _remaining[component], 0)
+ _remaining[component] = map(
+ lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
+ _remaining[component],
+ )
+ _remaining[component] = reduce(
+ lambda x, y: x + y, _remaining[component], 0
+ )
_remaining.subtract(_vms_specs)
@@ -46,23 +64,27 @@ class NoSuitableHostFound(Exception):
def get_suitable_host(vm_specs, hosts=None):
if hosts is None:
- hosts = host_pool.by_status(HostStatus.alive)
+ hosts = shared.host_pool.by_status(HostStatus.alive)
for host in hosts:
# Filter them by host_name
- vms = vm_pool.by_host(host.key)
+ vms = shared.vm_pool.by_host(host.key)
# Filter them by status
- vms = vm_pool.by_status(VMStatus.running, vms)
+ vms = shared.vm_pool.by_status(VMStatus.running, vms)
running_vms_specs = [vm.specs for vm in vms]
# Accumulate all of their combined specs
- running_vms_accumulated_specs = accumulated_specs(running_vms_specs)
+ running_vms_accumulated_specs = accumulated_specs(
+ running_vms_specs
+ )
# Find out remaining resources after
# host_specs - already running vm_specs
- remaining = remaining_resources(host.specs, running_vms_accumulated_specs)
+ remaining = remaining_resources(
+ host.specs, running_vms_accumulated_specs
+ )
# Find out remaining - new_vm_specs
remaining = remaining_resources(remaining, vm_specs)
@@ -75,7 +97,7 @@ def get_suitable_host(vm_specs, hosts=None):
def dead_host_detection():
# Bring out your dead! - Monty Python and the Holy Grail
- hosts = host_pool.by_status(HostStatus.alive)
+ hosts = shared.host_pool.by_status(HostStatus.alive)
dead_hosts_keys = []
for host in hosts:
@@ -89,25 +111,27 @@ def dead_host_detection():
def dead_host_mitigation(dead_hosts_keys):
for host_key in dead_hosts_keys:
- host = host_pool.get(host_key)
+ host = shared.host_pool.get(host_key)
host.declare_dead()
- vms_hosted_on_dead_host = vm_pool.by_host(host_key)
+ vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
for vm in vms_hosted_on_dead_host:
- vm.declare_killed()
- vm_pool.put(vm)
- host_pool.put(host)
+ vm.status = "UNKNOWN"
+ shared.vm_pool.put(vm)
+ shared.host_pool.put(host)
def assign_host(vm):
vm.hostname = get_suitable_host(vm.specs)
- vm_pool.put(vm)
+ shared.vm_pool.put(vm)
- r = RequestEntry.from_scratch(type=RequestType.StartVM,
- uuid=vm.uuid,
- hostname=vm.hostname,
- request_prefix=env_vars.get("REQUEST_PREFIX"))
- request_pool.put(r)
+ r = RequestEntry.from_scratch(
+ type=RequestType.StartVM,
+ uuid=vm.uuid,
+ hostname=vm.hostname,
+ request_prefix=shared.settings["etcd"]["request_prefix"],
+ )
+ shared.request_pool.put(r)
vm.log.append("VM scheduled for starting")
return vm.hostname
diff --git a/uncloud/scheduler/main.py b/uncloud/scheduler/main.py
new file mode 100755
index 0000000..38c07bf
--- /dev/null
+++ b/uncloud/scheduler/main.py
@@ -0,0 +1,51 @@
+# TODO
+# 1. send an email to an email address defined by env['admin-email']
+# if resources are finished
+# 2. Introduce a status endpoint of the scheduler -
+# maybe expose a prometheus compatible output
+
+import argparse
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.shared import shared
+from uncloud.scheduler import logger
+from uncloud.scheduler.helper import (dead_host_mitigation, dead_host_detection,
+ assign_host, NoSuitableHostFound)
+
+arg_parser = argparse.ArgumentParser('scheduler', add_help=False)
+
+
+def main(arguments):
+ # The below while True is neccessary for gracefully handling leadership transfer and temporary
+ # unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
+ # iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
+ # which ends the loop immediately. So, having it inside infinite loop we try again and again to
+ # get prefix until either success or deamon death comes.
+ while True:
+ for request_iterator in [
+ shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ ]:
+ for request_event in request_iterator:
+ dead_host_mitigation(dead_host_detection())
+ request_entry = RequestEntry(request_event)
+
+ if request_entry.type == RequestType.ScheduleVM:
+ logger.debug('%s, %s', request_entry.key, request_entry.value)
+
+ vm_entry = shared.vm_pool.get(request_entry.uuid)
+ if vm_entry is None:
+ logger.info('Trying to act on {} but it is deleted'.format(request_entry.uuid))
+ continue
+
+ shared.etcd_client.client.delete(request_entry.key) # consume Request
+
+ try:
+ assign_host(vm_entry)
+ except NoSuitableHostFound:
+ vm_entry.add_log('Can\'t schedule VM. No Resource Left.')
+ shared.vm_pool.put(vm_entry)
+
+ logger.info('No Resource Left. Emailing admin....')
diff --git a/uncloud/scheduler/tests/__init__.py b/uncloud/scheduler/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ucloud/scheduler/tests/test_basics.py b/uncloud/scheduler/tests/test_basics.py
similarity index 83%
rename from ucloud/scheduler/tests/test_basics.py
rename to uncloud/scheduler/tests/test_basics.py
index 92b3a83..defeb23 100755
--- a/ucloud/scheduler/tests/test_basics.py
+++ b/uncloud/scheduler/tests/test_basics.py
@@ -15,7 +15,7 @@ from main import (
main,
)
-from ucloud.config import etcd_client
+from uncloud.config import etcd_client
class TestFunctions(unittest.TestCase):
@@ -70,9 +70,15 @@ class TestFunctions(unittest.TestCase):
"last_heartbeat": datetime.utcnow().isoformat(),
}
with self.client.client.lock("lock"):
- self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
- self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
- self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
+ self.client.put(
+ f"{self.host_prefix}/1", host1, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/2", host2, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/3", host3, value_in_json=True
+ )
def create_vms(self):
vm1 = json.dumps(
@@ -146,15 +152,17 @@ class TestFunctions(unittest.TestCase):
{"cpu": 8, "ram": 32},
]
self.assertEqual(
- accumulated_specs(vms), {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
+ accumulated_specs(vms),
+ {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
)
def test_remaining_resources(self):
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
- self.assertEqual(remaining_resources(host_specs, vms_specs),
- resultant_specs)
+ self.assertEqual(
+ remaining_resources(host_specs, vms_specs), resultant_specs
+ )
def test_vmpool(self):
self.p.join(1)
@@ -167,7 +175,12 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/1",
{
"owner": "meow",
- "specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
+ "specs": {
+ "cpu": 4,
+ "ram": 8,
+ "hdd": 100,
+ "sdd": 256,
+ },
"hostname": f"{self.host_prefix}/3",
"status": "SCHEDULED_DEPLOY",
},
@@ -182,7 +195,12 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/7",
{
"owner": "meow",
- "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
+ "specs": {
+ "cpu": 10,
+ "ram": 22,
+ "hdd": 146,
+ "sdd": 0,
+ },
"hostname": "",
"status": "REQUESTED_NEW",
},
@@ -197,7 +215,12 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/7",
{
"owner": "meow",
- "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
+ "specs": {
+ "cpu": 10,
+ "ram": 22,
+ "hdd": 146,
+ "sdd": 0,
+ },
"hostname": "",
"status": "REQUESTED_NEW",
},
diff --git a/ucloud/scheduler/tests/test_dead_host_mechanism.py b/uncloud/scheduler/tests/test_dead_host_mechanism.py
similarity index 70%
rename from ucloud/scheduler/tests/test_dead_host_mechanism.py
rename to uncloud/scheduler/tests/test_dead_host_mechanism.py
index 0b403ef..466b9ee 100755
--- a/ucloud/scheduler/tests/test_dead_host_mechanism.py
+++ b/uncloud/scheduler/tests/test_dead_host_mechanism.py
@@ -6,11 +6,7 @@ from os.path import dirname
BASE_DIR = dirname(dirname(__file__))
sys.path.insert(0, BASE_DIR)
-from main import (
- dead_host_detection,
- dead_host_mitigation,
- config
-)
+from main import dead_host_detection, dead_host_mitigation, config
class TestDeadHostMechanism(unittest.TestCase):
@@ -52,13 +48,23 @@ class TestDeadHostMechanism(unittest.TestCase):
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
}
with self.client.client.lock("lock"):
- self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
- self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
- self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
- self.client.put(f"{self.host_prefix}/4", host4, value_in_json=True)
+ self.client.put(
+ f"{self.host_prefix}/1", host1, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/2", host2, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/3", host3, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/4", host4, value_in_json=True
+ )
def test_dead_host_detection(self):
- hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
+ hosts = self.client.get_prefix(
+ self.host_prefix, value_in_json=True
+ )
deads = dead_host_detection(hosts)
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
return deads
@@ -66,7 +72,9 @@ class TestDeadHostMechanism(unittest.TestCase):
def test_dead_host_mitigation(self):
deads = self.test_dead_host_detection()
dead_host_mitigation(self.client, deads)
- hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
+ hosts = self.client.get_prefix(
+ self.host_prefix, value_in_json=True
+ )
deads = dead_host_detection(hosts)
self.assertEqual(deads, [])
diff --git a/uncloud/vmm/__init__.py b/uncloud/vmm/__init__.py
new file mode 100644
index 0000000..6db61eb
--- /dev/null
+++ b/uncloud/vmm/__init__.py
@@ -0,0 +1,284 @@
+import os
+import subprocess as sp
+import logging
+import socket
+import json
+import tempfile
+import time
+
+from contextlib import suppress
+from multiprocessing import Process
+from os.path import join as join_path
+from os.path import isdir
+
+logger = logging.getLogger(__name__)
+
+
+class VMQMPHandles:
+ def __init__(self, path):
+ self.path = path
+ self.sock = socket.socket(socket.AF_UNIX)
+ self.file = self.sock.makefile()
+
+ def __enter__(self):
+ self.sock.connect(self.path)
+
+ # eat qmp greetings
+ self.file.readline()
+
+ # init qmp
+ self.sock.sendall(b'{ "execute": "qmp_capabilities" }')
+ self.file.readline()
+
+ return self.sock, self.file
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.file.close()
+ self.sock.close()
+
+ if exc_type:
+ logger.error(
+ "Couldn't get handle for VM.", exc_type, exc_val, exc_tb
+ )
+ raise exc_type("Couldn't get handle for VM.") from exc_type
+
+
+class TransferVM(Process):
+ def __init__(self, src_uuid, dest_sock_path, host, socket_dir):
+ self.src_uuid = src_uuid
+ self.host = host
+ self.src_sock_path = os.path.join(socket_dir, self.src_uuid)
+ self.dest_sock_path = dest_sock_path
+
+ super().__init__()
+
+ def run(self):
+ with suppress(FileNotFoundError):
+ os.remove(self.src_sock_path)
+
+ command = [
+ "ssh",
+ "-nNT",
+ "-L",
+ "{}:{}".format(self.src_sock_path, self.dest_sock_path),
+ "root@{}".format(self.host),
+ ]
+
+ try:
+ p = sp.Popen(command)
+ except Exception as e:
+ logger.error(
+ "Couldn' forward unix socks over ssh.", exc_info=e
+ )
+ else:
+ time.sleep(2)
+ vmm = VMM()
+ logger.debug("Executing: ssh forwarding command: %s", command)
+ vmm.execute_command(
+ self.src_uuid,
+ command="migrate",
+ arguments={"uri": "unix:{}".format(self.src_sock_path)},
+ )
+
+ while p.poll() is None:
+ success, output = vmm.execute_command(self.src_uuid, command="query-migrate")
+ if success:
+ status = output["return"]["status"]
+ logger.info('Migration Status: {}'.format(status))
+ if status == "completed":
+ vmm.stop(self.src_uuid)
+ return
+ elif status in ['failed', 'cancelled']:
+ return
+ else:
+ logger.error("Couldn't be able to query VM {} that was in migration".format(self.src_uuid))
+ return
+
+ time.sleep(2)
+
+
+class VMM:
+ # Virtual Machine Manager
+ def __init__(
+ self,
+ qemu_path="/usr/bin/qemu-system-x86_64",
+ vmm_backend=os.path.expanduser("~/uncloud/vmm/"),
+ ):
+ self.qemu_path = qemu_path
+ self.vmm_backend = vmm_backend
+ self.socket_dir = os.path.join(self.vmm_backend, "sock")
+
+ if not os.path.isdir(self.vmm_backend):
+ logger.info(
+ "{} does not exists. Creating it...".format(
+ self.vmm_backend
+ )
+ )
+ os.makedirs(self.vmm_backend, exist_ok=True)
+
+ if not os.path.isdir(self.socket_dir):
+ logger.info(
+ "{} does not exists. Creating it...".format(
+ self.socket_dir
+ )
+ )
+ os.makedirs(self.socket_dir, exist_ok=True)
+
+ def is_running(self, uuid):
+ sock_path = os.path.join(self.socket_dir, uuid)
+ try:
+ sock = socket.socket(socket.AF_UNIX)
+ sock.connect(sock_path)
+ recv = sock.recv(4096)
+ except Exception as err:
+ # unix sock doesn't exists or it is closed
+ logger.debug(
+ "VM {} sock either don' exists or it is closed. It mean VM is stopped.".format(
+ uuid
+ ),
+ exc_info=err,
+ )
+ else:
+ # if we receive greetings from qmp it mean VM is running
+ if len(recv) > 0:
+ return True
+
+ with suppress(FileNotFoundError):
+ os.remove(sock_path)
+
+ return False
+
+ def start(self, *args, uuid, migration=False):
+ # start --> sucess?
+ migration_args = ()
+ if migration:
+ migration_args = (
+ "-incoming",
+ "unix:{}".format(os.path.join(self.socket_dir, uuid)),
+ )
+
+ if self.is_running(uuid):
+ logger.warning("Cannot start VM. It is already running.")
+ else:
+ qmp_arg = (
+ "-qmp",
+ "unix:{},server,nowait".format(
+ join_path(self.socket_dir, uuid)
+ ),
+ )
+ vnc_arg = (
+ "-vnc",
+ "unix:{}".format(tempfile.NamedTemporaryFile().name),
+ )
+
+ command = [
+ "sudo",
+ "-p",
+ "Enter password to start VM {}: ".format(uuid),
+ self.qemu_path,
+ *args,
+ *qmp_arg,
+ *migration_args,
+ *vnc_arg,
+ "-daemonize",
+ ]
+ try:
+ sp.check_output(command, stderr=sp.PIPE)
+ except sp.CalledProcessError as err:
+ logger.exception(
+ "Error occurred while starting VM.\nDetail %s",
+ err.stderr.decode("utf-8"),
+ )
+ else:
+ sp.check_output(
+ ["sudo", "-p", "Enter password to correct permission for uncloud-vmm's directory",
+ "chmod", "-R", "o=rwx,g=rwx", self.vmm_backend]
+ )
+
+ # TODO: Find some good way to check whether the virtual machine is up and
+ # running without relying on non-guarenteed ways.
+ for _ in range(10):
+ time.sleep(2)
+ status = self.get_status(uuid)
+ if status in ["running", "inmigrate"]:
+ return status
+ logger.warning(
+ "Timeout on VM's status. Shutting down VM %s", uuid
+ )
+ self.stop(uuid)
+ # TODO: What should we do more. VM can still continue to run in background.
+ # If we have pid of vm we can kill it using OS.
+
+ def execute_command(self, uuid, command, **kwargs):
+ # execute_command -> sucess?, output
+ try:
+ with VMQMPHandles(os.path.join(self.socket_dir, uuid)) as (
+ sock_handle,
+ file_handle,
+ ):
+ command_to_execute = {"execute": command, **kwargs}
+ sock_handle.sendall(
+ json.dumps(command_to_execute).encode("utf-8")
+ )
+ output = file_handle.readline()
+ except Exception:
+ logger.exception(
+ "Error occurred while executing command and getting valid output from qmp"
+ )
+ else:
+ try:
+ output = json.loads(output)
+ except Exception:
+ logger.exception(
+ "QMP Output isn't valid JSON. %s", output
+ )
+ else:
+ return "return" in output, output
+ return False, None
+
+ def stop(self, uuid):
+ success, output = self.execute_command(
+ command="quit", uuid=uuid
+ )
+ return success
+
+ def get_status(self, uuid):
+ success, output = self.execute_command(
+ command="query-status", uuid=uuid
+ )
+ if success:
+ return output["return"]["status"]
+ else:
+ # TODO: Think about this for a little more
+ return "STOPPED"
+
+ def discover(self):
+ vms = [
+ uuid
+ for uuid in os.listdir(self.socket_dir)
+ if not isdir(join_path(self.socket_dir, uuid))
+ ]
+ return vms
+
+ def get_vnc(self, uuid):
+ success, output = self.execute_command(
+ uuid, command="query-vnc"
+ )
+ if success:
+ return output["return"]["service"]
+ return None
+
+ def transfer(self, src_uuid, destination_sock_path, host):
+ p = TransferVM(
+ src_uuid,
+ destination_sock_path,
+ socket_dir=self.socket_dir,
+ host=host,
+ )
+ p.start()
+
+ # TODO: the following method should clean things that went wrong
+ # e.g If VM migration fails or didn't start for long time
+ # i.e 15 minutes we should stop the waiting VM.
+ def maintenace(self):
+ pass