Effort is made to ensure a VM always have a status and Unused VM statuses are removed

This commit is contained in:
ahmadbilalkhalid 2019-11-27 12:12:29 +05:00
parent befb22b9cb
commit f3f2f6127a
11 changed files with 86 additions and 77 deletions

View file

@ -20,6 +20,7 @@ sshtunnel = "*"
helper = "*"
sphinx = "*"
pynetbox = "*"
sphinx-rtd-theme = "*"
[requires]
python_version = "3.5"

14
Pipfile.lock generated
View file

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "f43a93c020eb20212b437fcc62882db03bfa93f4678eb930e31343d687c805ed"
"sha256": "7f5bc76f02cef7e98fa631f1954b2b7afa46a7796650386b91c9a6c591945f75"
},
"pipfile-spec": 6,
"requires": {
@ -379,10 +379,10 @@
},
"pynetbox": {
"hashes": [
"sha256:09525a29f1ac8c1a54772d6e2b94a55b1db6ba6a1c5b07f7af6a6ce232b1f7d5"
"sha256:7c2282891ab1d3a5f5b28cb3b83c30d33c7ac3da1ee928c7332a4d2fac32f283"
],
"index": "pypi",
"version": "==4.1.0"
"version": "==4.2.0"
},
"pyotp": {
"hashes": [
@ -466,6 +466,14 @@
"index": "pypi",
"version": "==2.2.1"
},
"sphinx-rtd-theme": {
"hashes": [
"sha256:00cf895504a7895ee433807c62094cf1e95f065843bf3acd17037c3e9a2becd4",
"sha256:728607e34d60456d736cc7991fd236afb828b21b82f956c5ea75f94c8414040a"
],
"index": "pypi",
"version": "==0.4.3"
},
"sphinxcontrib-applehelp": {
"hashes": [
"sha256:edaa0ab2b2bc74403149cb0209d6775c96de797dfd5b5e2a71981309efab3897",

View file

@ -1,5 +1,4 @@
import json
import subprocess
import pynetbox
from uuid import uuid4
@ -9,6 +8,7 @@ from flask import Flask, request
from flask_restful import Resource, Api
from common import counters
from common.vm import VMStatus
from common.request import RequestEntry, RequestType
from config import (etcd_client, request_pool, vm_pool, host_pool, env_vars, image_storage_handler)
from . import schemas
@ -42,7 +42,7 @@ class CreateVM(Resource):
"owner_realm": data["realm"],
"specs": specs,
"hostname": "",
"status": "",
"status": VMStatus.stopped,
"image_uuid": validator.image_uuid,
"log": [],
"vnc_socket": "",

View file

@ -6,25 +6,9 @@ from .classes import SpecificEtcdEntryBase
class VMStatus:
# Must be only assigned to brand new VM
requested_new = "REQUESTED_NEW"
# Only Assigned to already created vm
requested_start = "REQUESTED_START"
# These all are for running vms
requested_shutdown = "REQUESTED_SHUTDOWN"
requested_migrate = "REQUESTED_MIGRATE"
requested_delete = "REQUESTED_DELETE"
# either its image is not found or user requested
# to delete it
deleted = "DELETED"
stopped = "STOPPED" # After requested_shutdown
killed = "KILLED" # either host died or vm died itself
running = "RUNNING"
error = "ERROR" # An error occurred that cannot be resolved automatically

View file

@ -27,7 +27,8 @@ author = 'ungleich'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
'sphinx.ext.autodoc',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
@ -43,7 +44,8 @@ exclude_patterns = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,

View file

@ -11,8 +11,10 @@ Installation
Alpine
------
Python Wheel (Binary) Packages does not support Alpine Linux as it is using musl libc instead of glibc.
Therefore, expect longer installation times than other linux distributions.
.. note::
Python Wheel (Binary) Packages does not support Alpine Linux as it is using musl libc instead of glibc.
Therefore, expect longer installation times than other linux distributions.
Enable Edge Repos, Update and Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -196,34 +198,3 @@ profile e.g *~/.profile*
and run :code:`source ~/.profile`
Arch
-----
.. code-block:: sh
# Update/Upgrade
pacman -Syuu
pacman -S python3 qemu chrony python-pip
pip3 install pipenv
cat > /etc/chrony.conf << EOF
server 0.arch.pool.ntp.org
server 1.arch.pool.ntp.org
server 2.arch.pool.ntp.org
EOF
systemctl start chronyd
systemctl enable chronyd
# Create non-root user and allow it sudo access
# without password
useradd -m ucloud
echo "ucloud ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
sudo -H -u ucloud bash -c 'cd /home/ucloud && git clone https://aur.archlinux.org/yay.git && cd yay && makepkg -si'
sudo -H -u ucloud bash -c 'yay -S etcd'
systemctl start etcd
systemctl enable etcd

View file

@ -1,19 +1,32 @@
TODO
====
Security
--------
* **Check Authentication:** Nico reported that some endpoints
even work without providing token. (ListUserVM)
even work without providing token. (e.g ListUserVM)
Refactoring/Feature
-------------------
* Put overrides for **IMAGE_BASE**, **VM_BASE** in **ImageStorageHandler**.
* Put "Always use only one StorageHandler"
* Expose more details in ListUserFiles.
* Throw KeyError instead of returning None when some key is not found in etcd.
* Create Network Manager
* That would handle tasks like up/down an interface
* Create VXLANs, Bridges, TAPs.
* Remove them when they are no longer used.
* Check for :code:`etcd3.exceptions.ConnectionFailedError` when calling some etcd operation to
avoid crashing whole application.
* Throw KeyError instead of returning None when some key is not found in etcd.
* Expose more details in ListUserFiles.
Reliability
-----------
* What to do if some command hangs forever? e.g CEPH commands
:code:`rbd ls ssd` etc. hangs forever if CEPH isn't running
or not responding.
* What to do if etcd goes down?
Misc.
-----
* Put "Always use only one StorageHandler"

View file

@ -69,21 +69,49 @@ Then, launch your remote desktop client and connect to vnc://localhost:1234.
Create Network
--------------
Layer 2 Network with sample IPv6 range fd00::/64 (without IPAM and routing)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: sh
ucloud-cli network create --network-name mynet --network-type vxlan
.. code-block:: json
Layer 2 Network with /64 network with automatic IPAM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: sh
{
"message": "Network successfully added."
}
ucloud-cli network create --network-name mynet --network-type vxlan --user True
Create VM using this network
Attach Network to VM
--------------------
Currently, user can only attach network to his/her VM at
the time of creation. A sample command to create VM with
a network is as follow
.. code-block:: sh
ucloud-cli vm create --vm-name meow2 --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine --network mynet
.. _get-list-of-hosts:
Get List of Hosts
-----------------
.. code-block:: sh
ucloud-cli host list
Migrate VM
----------
.. code-block:: sh
ucloud-cli vm migrate --vm-name meow --destination server1.place10
.. option:: --destination
The name of destination host. You can find a list of host
using :ref:`get-list-of-hosts`

View file

@ -108,7 +108,7 @@ def main(hostname):
# If the event is directed toward me OR I am destination of a InitVMMigration
if request_event.hostname == host.key or request_event.destination == host.key:
logger.debug("EVENT: %s", request_event)
logger.debug("VM Request: %s", request_event)
request_pool.client.client.delete(request_event.key)
vm_entry = vm_pool.get(request_event.uuid)

View file

@ -114,14 +114,15 @@ def get_start_command_args(vm_entry, vnc_sock_filename: str, migration=False, mi
vm_uuid = vm_entry.uuid
vm_networks = vm_entry.network
command = "-drive file={},format=raw,if=virtio,cache=none".format(
command = "-name {}_{}".format(vm_entry.owner, vm_entry.name)
command += " -drive file={},format=raw,if=virtio,cache=none".format(
image_storage_handler.qemu_path_string(vm_uuid)
)
command += " -device virtio-rng-pci -vnc unix:{}".format(vnc_sock_filename)
command += " -m {} -smp cores={},threads={}".format(
vm_memory, vm_cpus, threads_per_core
)
command += " -name {}".format(vm_uuid)
if migration:
command += " -incoming tcp:[::]:{}".format(migration_port)
@ -198,7 +199,7 @@ def create(vm_entry: VMEntry):
vm_hdd = int(bitmath.parse_string_unsafe(vm_entry.specs["os-ssd"]).to_MB())
if image_storage_handler.make_vm_image(src=vm_entry.image_uuid, dest=vm_entry.uuid):
if not image_storage_handler.resize_vm_image(path=vm_entry.uuid, size=vm_hdd):
vm_entry.status = "ERROR"
vm_entry.status = VMStatus.error
else:
logger.info("New VM Created")
@ -208,9 +209,10 @@ def start(vm_entry: VMEntry, destination_host_key=None, migration_port=None):
# VM already running. No need to proceed further.
if _vm:
logger.info("VM %s already running", vm_entry.uuid)
logger.info("VM %s already running" % vm_entry.uuid)
return
else:
logger.info("Trying to start %s" % vm_entry.uuid)
if destination_host_key:
launch_vm(vm_entry, migration=True, migration_port=migration_port,
destination_host_key=destination_host_key)
@ -288,7 +290,7 @@ def transfer(request_event):
def launch_vm(vm_entry, migration=False, migration_port=None, destination_host_key=None):
logger.info("Starting %s", vm_entry.key)
logger.info("Starting %s" % vm_entry.key)
vm = create_vm_object(vm_entry, migration=migration, migration_port=migration_port)
try:

View file

@ -67,7 +67,7 @@ def main():
hosts=[host_pool.get(request_entry.destination)])
except NoSuitableHostFound:
logger.info("Requested destination host doesn't have enough capacity"
"to hold %s" % vm_entry.uuid)
"to hold %s" % vm_entry.uuid)
else:
r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
uuid=request_entry.uuid,
@ -82,7 +82,7 @@ def main():
try:
assign_host(vm_entry)
except NoSuitableHostFound:
vm_entry.log.append("Can't schedule VM. No Resource Left.")
vm_entry.add_log("Can't schedule VM. No Resource Left.")
vm_pool.put(vm_entry)
pending_vms.append(vm_entry)