find ucloud -name \*.py -exec sed -i "s/ucloud/uncloud/g" {} \;
This commit is contained in:
parent
70c8da544e
commit
7b6c02b3ab
68 changed files with 0 additions and 0 deletions
0
uncloud/__init__.py
Normal file
0
uncloud/__init__.py
Normal file
12
uncloud/api/README.md
Executable file
12
uncloud/api/README.md
Executable file
|
|
@ -0,0 +1,12 @@
|
|||
# ucloud-api
|
||||
[](https://www.repostatus.org/#wip)
|
||||
|
||||
## Installation
|
||||
|
||||
**Make sure you have Python >= 3.5 and Pipenv installed.**
|
||||
|
||||
1. Clone the repository and `cd` into it.
|
||||
2. Run the following commands
|
||||
- `pipenv install`
|
||||
- `pipenv shell`
|
||||
- `python main.py`
|
||||
3
uncloud/api/__init__.py
Normal file
3
uncloud/api/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
62
uncloud/api/common_fields.py
Executable file
62
uncloud/api/common_fields.py
Executable file
|
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
|
||||
|
||||
class Optional:
|
||||
pass
|
||||
|
||||
|
||||
class Field:
|
||||
def __init__(self, _name, _type, _value=None):
|
||||
self.name = _name
|
||||
self.value = _value
|
||||
self.type = _type
|
||||
self.__errors = []
|
||||
|
||||
def validation(self):
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
if self.value == KeyError:
|
||||
self.add_error(
|
||||
"'{}' field is a required field".format(self.name)
|
||||
)
|
||||
else:
|
||||
if isinstance(self.value, Optional):
|
||||
pass
|
||||
elif not isinstance(self.value, self.type):
|
||||
self.add_error(
|
||||
"Incorrect Type for '{}' field".format(self.name)
|
||||
)
|
||||
else:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return self.__errors
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class VmUUIDField(Field):
|
||||
def __init__(self, data):
|
||||
self.uuid = data.get("uuid", KeyError)
|
||||
|
||||
super().__init__("uuid", str, self.uuid)
|
||||
|
||||
self.validation = self.vm_uuid_validation
|
||||
|
||||
def vm_uuid_validation(self):
|
||||
r = shared.etcd_client.get(
|
||||
os.path.join(settings["etcd"]["vm_prefix"], self.uuid)
|
||||
)
|
||||
if not r:
|
||||
self.add_error(
|
||||
"VM with uuid {} does not exists".format(self.uuid)
|
||||
)
|
||||
20
uncloud/api/create_image_store.py
Executable file
20
uncloud/api/create_image_store.py
Executable file
|
|
@ -0,0 +1,20 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
|
||||
data = {
|
||||
"is_public": True,
|
||||
"type": "ceph",
|
||||
"name": "images",
|
||||
"description": "first ever public image-store",
|
||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
||||
}
|
||||
|
||||
shared.etcd_client.put(
|
||||
os.path.join(settings["etcd"]["image_store_prefix"], uuid4().hex),
|
||||
json.dumps(data),
|
||||
)
|
||||
151
uncloud/api/helper.py
Executable file
151
uncloud/api/helper.py
Executable file
|
|
@ -0,0 +1,151 @@
|
|||
import binascii
|
||||
import ipaddress
|
||||
import random
|
||||
import subprocess as sp
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from pyotp import TOTP
|
||||
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check_otp(name, realm, token):
|
||||
try:
|
||||
data = {
|
||||
"auth_name": settings["otp"]["auth_name"],
|
||||
"auth_token": TOTP(settings["otp"]["auth_seed"]).now(),
|
||||
"auth_realm": settings["otp"]["auth_realm"],
|
||||
"name": name,
|
||||
"realm": realm,
|
||||
"token": token,
|
||||
}
|
||||
except binascii.Error as err:
|
||||
logger.error(
|
||||
"Cannot compute OTP for seed: {}".format(
|
||||
settings["otp"]["auth_seed"]
|
||||
)
|
||||
)
|
||||
return 400
|
||||
|
||||
response = requests.post(
|
||||
settings["otp"]["verification_controller_url"], json=data
|
||||
)
|
||||
return response.status_code
|
||||
|
||||
|
||||
def resolve_vm_name(name, owner):
|
||||
"""Return UUID of Virtual Machine of name == name and owner == owner
|
||||
|
||||
Input: name of vm, owner of vm.
|
||||
Output: uuid of vm if found otherwise None
|
||||
"""
|
||||
result = next(
|
||||
filter(
|
||||
lambda vm: vm.value["owner"] == owner
|
||||
and vm.value["name"] == name,
|
||||
shared.vm_pool.vms,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if result:
|
||||
return result.key.split("/")[-1]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def resolve_image_name(name, etcd_client):
|
||||
"""Return image uuid given its name and its store
|
||||
|
||||
* If the provided name is not in correct format
|
||||
i.e {store_name}:{image_name} return ValueError
|
||||
* If no such image found then return KeyError
|
||||
|
||||
"""
|
||||
|
||||
seperator = ":"
|
||||
|
||||
# Ensure, user/program passed valid name that is of type string
|
||||
try:
|
||||
store_name_and_image_name = name.split(seperator)
|
||||
|
||||
"""
|
||||
Examples, where it would work and where it would raise exception
|
||||
"images:alpine" --> ["images", "alpine"]
|
||||
|
||||
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
||||
|
||||
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
||||
as too many values to unpack
|
||||
"""
|
||||
store_name, image_name = store_name_and_image_name
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Image name not in correct format i.e {store_name}:{image_name}"
|
||||
)
|
||||
|
||||
images = etcd_client.get_prefix(
|
||||
settings["etcd"]["image_prefix"], value_in_json=True
|
||||
)
|
||||
|
||||
# Try to find image with name == image_name and store_name == store_name
|
||||
try:
|
||||
image = next(
|
||||
filter(
|
||||
lambda im: im.value["name"] == image_name
|
||||
and im.value["store_name"] == store_name,
|
||||
images,
|
||||
)
|
||||
)
|
||||
except StopIteration:
|
||||
raise KeyError("No image with name {} found.".format(name))
|
||||
else:
|
||||
image_uuid = image.key.split("/")[-1]
|
||||
|
||||
return image_uuid
|
||||
|
||||
|
||||
def random_bytes(num=6):
|
||||
return [random.randrange(256) for _ in range(num)]
|
||||
|
||||
|
||||
def generate_mac(
|
||||
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||
):
|
||||
mac = random_bytes()
|
||||
if oui:
|
||||
if type(oui) == str:
|
||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||
mac = oui + random_bytes(num=6 - len(oui))
|
||||
else:
|
||||
if multicast:
|
||||
mac[0] |= 1 # set bit 0
|
||||
else:
|
||||
mac[0] &= ~1 # clear bit 0
|
||||
if uaa:
|
||||
mac[0] &= ~(1 << 1) # clear bit 1
|
||||
else:
|
||||
mac[0] |= 1 << 1 # set bit 1
|
||||
return separator.join(byte_fmt % b for b in mac)
|
||||
|
||||
|
||||
def mac2ipv6(mac, prefix):
|
||||
# only accept MACs separated by a colon
|
||||
parts = mac.split(":")
|
||||
|
||||
# modify parts to match IPv6 value
|
||||
parts.insert(3, "ff")
|
||||
parts.insert(4, "fe")
|
||||
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
|
||||
|
||||
# format output
|
||||
ipv6_parts = [str(0)] * 4
|
||||
for i in range(0, len(parts), 2):
|
||||
ipv6_parts.append("".join(parts[i : i + 2]))
|
||||
|
||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
||||
prefix = ipaddress.IPv6Address(prefix)
|
||||
return str(prefix + int(lower_part))
|
||||
590
uncloud/api/main.py
Normal file
590
uncloud/api/main.py
Normal file
|
|
@ -0,0 +1,590 @@
|
|||
import json
|
||||
import pynetbox
|
||||
import logging
|
||||
|
||||
from uuid import uuid4
|
||||
from os.path import join as join_path
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
from ucloud.common import counters
|
||||
from ucloud.common.vm import VMStatus
|
||||
from ucloud.common.request import RequestEntry, RequestType
|
||||
from ucloud.settings import settings
|
||||
from ucloud.shared import shared
|
||||
|
||||
from . import schemas
|
||||
from .helper import generate_mac, mac2ipv6
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = Flask(__name__)
|
||||
api = Api(app)
|
||||
app.logger.handlers.clear()
|
||||
|
||||
|
||||
@app.errorhandler(Exception)
|
||||
def handle_exception(e):
|
||||
app.logger.error(e)
|
||||
# pass through HTTP errors
|
||||
if isinstance(e, HTTPException):
|
||||
return e
|
||||
|
||||
# now you're handling non-HTTP exceptions only
|
||||
return {"message": "Server Error"}, 500
|
||||
|
||||
|
||||
class CreateVM(Resource):
|
||||
"""API Request to Handle Creation of VM"""
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateVMSchema(data)
|
||||
if validator.is_valid():
|
||||
vm_uuid = uuid4().hex
|
||||
vm_key = join_path(settings["etcd"]["vm_prefix"], vm_uuid)
|
||||
specs = {
|
||||
"cpu": validator.specs["cpu"],
|
||||
"ram": validator.specs["ram"],
|
||||
"os-ssd": validator.specs["os-ssd"],
|
||||
"hdd": validator.specs["hdd"],
|
||||
}
|
||||
macs = [generate_mac() for _ in range(len(data["network"]))]
|
||||
tap_ids = [
|
||||
counters.increment_etcd_counter(
|
||||
shared.etcd_client, "/v1/counter/tap"
|
||||
)
|
||||
for _ in range(len(data["network"]))
|
||||
]
|
||||
vm_entry = {
|
||||
"name": data["vm_name"],
|
||||
"owner": data["name"],
|
||||
"owner_realm": data["realm"],
|
||||
"specs": specs,
|
||||
"hostname": "",
|
||||
"status": VMStatus.stopped,
|
||||
"image_uuid": validator.image_uuid,
|
||||
"log": [],
|
||||
"vnc_socket": "",
|
||||
"network": list(zip(data["network"], macs, tap_ids)),
|
||||
"metadata": {"ssh-keys": []},
|
||||
"in_migration": False,
|
||||
}
|
||||
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||
|
||||
# Create ScheduleVM Request
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.ScheduleVM,
|
||||
uuid=vm_uuid,
|
||||
request_prefix=settings["etcd"]["request_prefix"],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
|
||||
return {"message": "VM Creation Queued"}, 200
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VmStatus(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.VMStatusSchema(data)
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(
|
||||
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
|
||||
)
|
||||
vm_value = vm.value.copy()
|
||||
vm_value["ip"] = []
|
||||
for network_mac_and_tap in vm.network:
|
||||
network_name, mac, tap = network_mac_and_tap
|
||||
network = shared.etcd_client.get(
|
||||
join_path(
|
||||
settings["etcd"]["network_prefix"],
|
||||
data["name"],
|
||||
network_name,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
ipv6_addr = (
|
||||
network.value.get("ipv6").split("::")[0] + "::"
|
||||
)
|
||||
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
|
||||
vm.value = vm_value
|
||||
return vm.value
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateImage(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateImageSchema(data)
|
||||
if validator.is_valid():
|
||||
file_entry = shared.etcd_client.get(
|
||||
join_path(settings["etcd"]["file_prefix"], data["uuid"])
|
||||
)
|
||||
file_entry_value = json.loads(file_entry.value)
|
||||
|
||||
image_entry_json = {
|
||||
"status": "TO_BE_CREATED",
|
||||
"owner": file_entry_value["owner"],
|
||||
"filename": file_entry_value["filename"],
|
||||
"name": data["name"],
|
||||
"store_name": data["image_store"],
|
||||
"visibility": "public",
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
join_path(
|
||||
settings["etcd"]["image_prefix"], data["uuid"]
|
||||
),
|
||||
json.dumps(image_entry_json),
|
||||
)
|
||||
|
||||
return {"message": "Image queued for creation."}
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListPublicImages(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
images = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["image_prefix"], value_in_json=True
|
||||
)
|
||||
r = {"images": []}
|
||||
for image in images:
|
||||
image_key = "{}:{}".format(
|
||||
image.value["store_name"], image.value["name"]
|
||||
)
|
||||
r["images"].append(
|
||||
{"name": image_key, "status": image.value["status"]}
|
||||
)
|
||||
return r, 200
|
||||
|
||||
|
||||
class VMAction(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmActionSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm_entry = shared.vm_pool.get(
|
||||
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
|
||||
)
|
||||
action = data["action"]
|
||||
|
||||
if action == "start":
|
||||
action = "schedule"
|
||||
|
||||
if action == "delete" and vm_entry.hostname == "":
|
||||
if shared.storage_handler.is_vm_image_exists(
|
||||
vm_entry.uuid
|
||||
):
|
||||
r_status = shared.storage_handler.delete_vm_image(
|
||||
vm_entry.uuid
|
||||
)
|
||||
if r_status:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {"message": "VM successfully deleted"}
|
||||
else:
|
||||
logger.error(
|
||||
"Some Error Occurred while deleting VM"
|
||||
)
|
||||
return {"message": "VM deletion unsuccessfull"}
|
||||
else:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {"message": "VM successfully deleted"}
|
||||
|
||||
r = RequestEntry.from_scratch(
|
||||
type="{}VM".format(action.title()),
|
||||
uuid=data["uuid"],
|
||||
hostname=vm_entry.hostname,
|
||||
request_prefix=settings["etcd"]["request_prefix"],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{"message": "VM {} Queued".format(action.title())},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VMMigration(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmMigrationSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(data["uuid"])
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.InitVMMigration,
|
||||
uuid=vm.uuid,
|
||||
hostname=join_path(
|
||||
settings["etcd"]["host_prefix"],
|
||||
validator.destination.value,
|
||||
),
|
||||
request_prefix=settings["etcd"]["request_prefix"],
|
||||
)
|
||||
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{"message": "VM Migration Initialization Queued"},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserVM(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vms = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["vm_prefix"], value_in_json=True
|
||||
)
|
||||
return_vms = []
|
||||
user_vms = filter(
|
||||
lambda v: v.value["owner"] == data["name"], vms
|
||||
)
|
||||
for vm in user_vms:
|
||||
return_vms.append(
|
||||
{
|
||||
"name": vm.value["name"],
|
||||
"vm_uuid": vm.key.split("/")[-1],
|
||||
"specs": vm.value["specs"],
|
||||
"status": vm.value["status"],
|
||||
"hostname": vm.value["hostname"],
|
||||
"vnc_socket": vm.value.get("vnc_socket", None),
|
||||
}
|
||||
)
|
||||
if return_vms:
|
||||
return {"message": return_vms}, 200
|
||||
return {"message": "No VM found"}, 404
|
||||
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserFiles(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
files = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["file_prefix"], value_in_json=True
|
||||
)
|
||||
return_files = []
|
||||
user_files = list(
|
||||
filter(
|
||||
lambda f: f.value["owner"] == data["name"], files
|
||||
)
|
||||
)
|
||||
for file in user_files:
|
||||
return_files.append(
|
||||
{
|
||||
"filename": file.value["filename"],
|
||||
"uuid": file.key.split("/")[-1],
|
||||
}
|
||||
)
|
||||
return {"message": return_files}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateHost(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateHostSchema(data)
|
||||
if validator.is_valid():
|
||||
host_key = join_path(
|
||||
settings["etcd"]["host_prefix"], uuid4().hex
|
||||
)
|
||||
host_entry = {
|
||||
"specs": data["specs"],
|
||||
"hostname": data["hostname"],
|
||||
"status": "DEAD",
|
||||
"last_heartbeat": "",
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
host_key, host_entry, value_in_json=True
|
||||
)
|
||||
|
||||
return {"message": "Host Created"}, 200
|
||||
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListHost(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
hosts = shared.host_pool.hosts
|
||||
r = {
|
||||
host.key: {
|
||||
"status": host.status,
|
||||
"specs": host.specs,
|
||||
"hostname": host.hostname,
|
||||
}
|
||||
for host in hosts
|
||||
}
|
||||
return r, 200
|
||||
|
||||
|
||||
class GetSSHKeys(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.GetSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
if not validator.key_name.value:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/
|
||||
etcd_key = join_path(
|
||||
settings["etcd"]["user_prefix"],
|
||||
data["realm"],
|
||||
data["name"],
|
||||
"key",
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get_prefix(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
keys = {
|
||||
key.key.split("/")[-1]: key.value
|
||||
for key in etcd_entry
|
||||
}
|
||||
return {"keys": keys}
|
||||
else:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
settings["etcd"]["user_prefix"],
|
||||
data["realm"],
|
||||
data["name"],
|
||||
"key",
|
||||
data["key_name"],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
if etcd_entry:
|
||||
return {
|
||||
"keys": {
|
||||
etcd_entry.key.split("/")[
|
||||
-1
|
||||
]: etcd_entry.value
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {"keys": {}}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class AddSSHKey(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.AddSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
settings["etcd"]["user_prefix"],
|
||||
data["realm"],
|
||||
data["name"],
|
||||
"key",
|
||||
data["key_name"],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
return {
|
||||
"message": "Key with name '{}' already exists".format(
|
||||
data["key_name"]
|
||||
)
|
||||
}
|
||||
else:
|
||||
# Key Not Found. It implies user' haven't added any key yet.
|
||||
shared.etcd_client.put(
|
||||
etcd_key, data["key"], value_in_json=True
|
||||
)
|
||||
return {"message": "Key added successfully"}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class RemoveSSHKey(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.RemoveSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
settings["etcd"]["user_prefix"],
|
||||
data["realm"],
|
||||
data["name"],
|
||||
"key",
|
||||
data["key_name"],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
shared.etcd_client.client.delete(etcd_key)
|
||||
return {"message": "Key successfully removed."}
|
||||
else:
|
||||
return {
|
||||
"message": "No Key with name '{}' Exists at all.".format(
|
||||
data["key_name"]
|
||||
)
|
||||
}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateNetwork(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateNetwork(data)
|
||||
|
||||
if validator.is_valid():
|
||||
|
||||
network_entry = {
|
||||
"id": counters.increment_etcd_counter(
|
||||
shared.etcd_client, "/v1/counter/vxlan"
|
||||
),
|
||||
"type": data["type"],
|
||||
}
|
||||
if validator.user.value:
|
||||
try:
|
||||
nb = pynetbox.api(
|
||||
url=settings["netbox"]["url"],
|
||||
token=settings["netbox"]["token"],
|
||||
)
|
||||
nb_prefix = nb.ipam.prefixes.get(
|
||||
prefix=settings["network"]["prefix"]
|
||||
)
|
||||
prefix = nb_prefix.available_prefixes.create(
|
||||
data={
|
||||
"prefix_length": int(
|
||||
settings["network"]["prefix_length"]
|
||||
),
|
||||
"description": '{}\'s network "{}"'.format(
|
||||
data["name"], data["network_name"]
|
||||
),
|
||||
"is_pool": True,
|
||||
}
|
||||
)
|
||||
except Exception as err:
|
||||
app.logger.error(err)
|
||||
return {
|
||||
"message": "Error occured while creating network."
|
||||
}
|
||||
else:
|
||||
network_entry["ipv6"] = prefix["prefix"]
|
||||
else:
|
||||
network_entry["ipv6"] = "fd00::/64"
|
||||
|
||||
network_key = join_path(
|
||||
settings["etcd"]["network_prefix"],
|
||||
data["name"],
|
||||
data["network_name"],
|
||||
)
|
||||
shared.etcd_client.put(
|
||||
network_key, network_entry, value_in_json=True
|
||||
)
|
||||
return {"message": "Network successfully added."}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserNetwork(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
prefix = join_path(
|
||||
settings["etcd"]["network_prefix"], data["name"]
|
||||
)
|
||||
networks = shared.etcd_client.get_prefix(
|
||||
prefix, value_in_json=True
|
||||
)
|
||||
user_networks = []
|
||||
for net in networks:
|
||||
net.value["name"] = net.key.split("/")[-1]
|
||||
user_networks.append(net.value)
|
||||
return {"networks": user_networks}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
api.add_resource(CreateVM, "/vm/create")
|
||||
api.add_resource(VmStatus, "/vm/status")
|
||||
|
||||
api.add_resource(VMAction, "/vm/action")
|
||||
api.add_resource(VMMigration, "/vm/migrate")
|
||||
|
||||
api.add_resource(CreateImage, "/image/create")
|
||||
api.add_resource(ListPublicImages, "/image/list-public")
|
||||
|
||||
api.add_resource(ListUserVM, "/user/vms")
|
||||
api.add_resource(ListUserFiles, "/user/files")
|
||||
api.add_resource(ListUserNetwork, "/user/networks")
|
||||
|
||||
api.add_resource(AddSSHKey, "/user/add-ssh")
|
||||
api.add_resource(RemoveSSHKey, "/user/remove-ssh")
|
||||
api.add_resource(GetSSHKeys, "/user/get-ssh")
|
||||
|
||||
api.add_resource(CreateHost, "/host/create")
|
||||
api.add_resource(ListHost, "/host/list")
|
||||
|
||||
api.add_resource(CreateNetwork, "/network/create")
|
||||
|
||||
|
||||
def main():
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["image_store_prefix"], value_in_json=True
|
||||
)
|
||||
)
|
||||
if not image_stores:
|
||||
data = {
|
||||
"is_public": True,
|
||||
"type": "ceph",
|
||||
"name": "images",
|
||||
"description": "first ever public image-store",
|
||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
||||
}
|
||||
|
||||
shared.etcd_client.put(
|
||||
join_path(
|
||||
settings["etcd"]["image_store_prefix"], uuid4().hex
|
||||
),
|
||||
json.dumps(data),
|
||||
)
|
||||
|
||||
app.run(host="::", debug=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
566
uncloud/api/schemas.py
Executable file
566
uncloud/api/schemas.py
Executable file
|
|
@ -0,0 +1,566 @@
|
|||
"""
|
||||
This module contain classes thats validates and intercept/modify
|
||||
data coming from ucloud-cli (user)
|
||||
|
||||
It was primarily developed as an alternative to argument parser
|
||||
of Flask_Restful which is going to be deprecated. I also tried
|
||||
marshmallow for that purpose but it was an overkill (because it
|
||||
do validation + serialization + deserialization) and little
|
||||
inflexible for our purpose.
|
||||
"""
|
||||
|
||||
# TODO: Fix error message when user's mentioned VM (referred by name)
|
||||
# does not exists.
|
||||
#
|
||||
# Currently, it says uuid is a required field.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import bitmath
|
||||
|
||||
from ucloud.common.host import HostStatus
|
||||
from ucloud.common.vm import VMStatus
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
from . import helper, logger
|
||||
from .common_fields import Field, VmUUIDField
|
||||
from .helper import check_otp, resolve_vm_name
|
||||
|
||||
|
||||
class BaseSchema:
|
||||
def __init__(self, data, fields=None):
|
||||
_ = data # suppress linter warning
|
||||
self.__errors = []
|
||||
if fields is None:
|
||||
self.fields = []
|
||||
else:
|
||||
self.fields = fields
|
||||
|
||||
def validation(self):
|
||||
# custom validation is optional
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
for field in self.fields:
|
||||
field.is_valid()
|
||||
self.add_field_errors(field)
|
||||
|
||||
for parent in self.__class__.__bases__:
|
||||
try:
|
||||
parent.validation(self)
|
||||
except AttributeError:
|
||||
pass
|
||||
if not self.__errors:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return {"message": self.__errors}
|
||||
|
||||
def add_field_errors(self, field: Field):
|
||||
self.__errors += field.get_errors()
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class OTPSchema(BaseSchema):
|
||||
def __init__(self, data: dict, fields=None):
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.realm = Field("realm", str, data.get("realm", KeyError))
|
||||
self.token = Field("token", str, data.get("token", KeyError))
|
||||
|
||||
_fields = [self.name, self.realm, self.token]
|
||||
if fields:
|
||||
_fields += fields
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def validation(self):
|
||||
if (
|
||||
check_otp(
|
||||
self.name.value, self.realm.value, self.token.value
|
||||
)
|
||||
!= 200
|
||||
):
|
||||
self.add_error("Wrong Credentials")
|
||||
|
||||
|
||||
########################## Image Operations ###############################################
|
||||
|
||||
|
||||
class CreateImageSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.image_store = Field(
|
||||
"image_store", str, data.get("image_store", KeyError)
|
||||
)
|
||||
|
||||
# Validations
|
||||
self.uuid.validation = self.file_uuid_validation
|
||||
self.image_store.validation = self.image_store_name_validation
|
||||
|
||||
# All Fields
|
||||
fields = [self.uuid, self.name, self.image_store]
|
||||
super().__init__(data, fields)
|
||||
|
||||
def file_uuid_validation(self):
|
||||
file_entry = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
settings["etcd"]["file_prefix"], self.uuid.value
|
||||
)
|
||||
)
|
||||
if file_entry is None:
|
||||
self.add_error(
|
||||
"Image File with uuid '{}' Not Found".format(
|
||||
self.uuid.value
|
||||
)
|
||||
)
|
||||
|
||||
def image_store_name_validation(self):
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["image_store_prefix"]
|
||||
)
|
||||
)
|
||||
|
||||
image_store = next(
|
||||
filter(
|
||||
lambda s: json.loads(s.value)["name"]
|
||||
== self.image_store.value,
|
||||
image_stores,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not image_store:
|
||||
self.add_error(
|
||||
"Store '{}' does not exists".format(
|
||||
self.image_store.value
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Host Operations
|
||||
|
||||
|
||||
class CreateHostSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.hostname = Field(
|
||||
"hostname", str, data.get("hostname", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.specs.validation = self.specs_validation
|
||||
|
||||
fields = [self.hostname, self.specs]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if _cpu < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(10):
|
||||
self.add_error("OS-SSD must be atleast 10 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
def validation(self):
|
||||
if self.realm.value != "ungleich-admin":
|
||||
self.add_error(
|
||||
"Invalid Credentials/Insufficient Permission"
|
||||
)
|
||||
|
||||
|
||||
# VM Operations
|
||||
|
||||
|
||||
class CreateVMSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.vm_name = Field(
|
||||
"vm_name", str, data.get("vm_name", KeyError)
|
||||
)
|
||||
self.image = Field("image", str, data.get("image", KeyError))
|
||||
self.network = Field(
|
||||
"network", list, data.get("network", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.image.validation = self.image_validation
|
||||
self.vm_name.validation = self.vm_name_validation
|
||||
self.specs.validation = self.specs_validation
|
||||
self.network.validation = self.network_validation
|
||||
|
||||
fields = [self.vm_name, self.image, self.specs, self.network]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def image_validation(self):
|
||||
try:
|
||||
image_uuid = helper.resolve_image_name(
|
||||
self.image.value, shared.etcd_client
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Cannot resolve image name = %s", self.image.value
|
||||
)
|
||||
self.add_error(str(e))
|
||||
else:
|
||||
self.image_uuid = image_uuid
|
||||
|
||||
def vm_name_validation(self):
|
||||
if resolve_vm_name(
|
||||
name=self.vm_name.value, owner=self.name.value
|
||||
):
|
||||
self.add_error(
|
||||
'VM with same name "{}" already exists'.format(
|
||||
self.vm_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_validation(self):
|
||||
_network = self.network.value
|
||||
|
||||
if _network:
|
||||
for net in _network:
|
||||
network = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
settings["etcd"]["network_prefix"],
|
||||
self.name.value,
|
||||
net,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
if not network:
|
||||
self.add_error(
|
||||
"Network with name {} does not exists".format(
|
||||
net
|
||||
)
|
||||
)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if _cpu < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(1):
|
||||
self.add_error("OS-SSD must be atleast 1 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
|
||||
class VMStatusSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
|
||||
fields = [self.uuid]
|
||||
|
||||
super().__init__(data, fields)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
|
||||
class VmActionSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.action = Field("action", str, data.get("action", KeyError))
|
||||
|
||||
self.action.validation = self.action_validation
|
||||
|
||||
_fields = [self.uuid, self.action]
|
||||
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def action_validation(self):
|
||||
allowed_actions = ["start", "stop", "delete"]
|
||||
if self.action.value not in allowed_actions:
|
||||
self.add_error(
|
||||
"Invalid Action. Allowed Actions are {}".format(
|
||||
allowed_actions
|
||||
)
|
||||
)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if (
|
||||
self.action.value == "start"
|
||||
and vm.status == VMStatus.running
|
||||
and vm.hostname != ""
|
||||
):
|
||||
self.add_error("VM Already Running")
|
||||
|
||||
if self.action.value == "stop":
|
||||
if vm.status == VMStatus.stopped:
|
||||
self.add_error("VM Already Stopped")
|
||||
elif vm.status != VMStatus.running:
|
||||
self.add_error("Cannot stop non-running VM")
|
||||
|
||||
|
||||
class VmMigrationSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.destination = Field(
|
||||
"destination", str, data.get("destination", KeyError)
|
||||
)
|
||||
|
||||
self.destination.validation = self.destination_validation
|
||||
|
||||
fields = [self.destination]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def destination_validation(self):
|
||||
hostname = self.destination.value
|
||||
host = next(
|
||||
filter(
|
||||
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not host:
|
||||
self.add_error(
|
||||
"No Such Host ({}) exists".format(
|
||||
self.destination.value
|
||||
)
|
||||
)
|
||||
elif host.status != HostStatus.alive:
|
||||
self.add_error("Destination Host is dead")
|
||||
else:
|
||||
self.destination.value = host.key
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if vm.status != VMStatus.running:
|
||||
self.add_error("Can't migrate non-running VM")
|
||||
|
||||
if vm.hostname == os.path.join(
|
||||
settings["etcd"]["host_prefix"], self.destination.value
|
||||
):
|
||||
self.add_error(
|
||||
"Destination host couldn't be same as Source Host"
|
||||
)
|
||||
|
||||
|
||||
class AddSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||
|
||||
fields = [self.key_name, self.key]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class RemoveSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class GetSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", None)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class CreateNetwork(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.network_name = Field(
|
||||
"network_name", str, data.get("network_name", KeyError)
|
||||
)
|
||||
self.type = Field("type", str, data.get("type", KeyError))
|
||||
self.user = Field("user", bool, bool(data.get("user", False)))
|
||||
|
||||
self.network_name.validation = self.network_name_validation
|
||||
self.type.validation = self.network_type_validation
|
||||
|
||||
fields = [self.network_name, self.type, self.user]
|
||||
super().__init__(data, fields=fields)
|
||||
|
||||
def network_name_validation(self):
|
||||
network = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
settings["etcd"]["network_prefix"],
|
||||
self.name.value,
|
||||
self.network_name.value,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
if network:
|
||||
self.add_error(
|
||||
"Network with name {} already exists".format(
|
||||
self.network_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_type_validation(self):
|
||||
supported_network_types = ["vxlan"]
|
||||
if self.type.value not in supported_network_types:
|
||||
self.add_error(
|
||||
"Unsupported Network Type. Supported network types are {}".format(
|
||||
supported_network_types
|
||||
)
|
||||
)
|
||||
3
uncloud/common/__init__.py
Normal file
3
uncloud/common/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
26
uncloud/common/classes.py
Normal file
26
uncloud/common/classes.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
from .etcd_wrapper import EtcdEntry
|
||||
|
||||
|
||||
class SpecificEtcdEntryBase:
|
||||
def __init__(self, e: EtcdEntry):
|
||||
self.key = e.key
|
||||
|
||||
for k in e.value.keys():
|
||||
self.__setattr__(k, e.value[k])
|
||||
|
||||
def original_keys(self):
|
||||
r = dict(self.__dict__)
|
||||
if "key" in r:
|
||||
del r["key"]
|
||||
return r
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self.original_keys()
|
||||
|
||||
@value.setter
|
||||
def value(self, v):
|
||||
self.__dict__ = v
|
||||
|
||||
def __repr__(self):
|
||||
return str(dict(self.__dict__))
|
||||
21
uncloud/common/counters.py
Normal file
21
uncloud/common/counters.py
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
from .etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
|
||||
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
|
||||
if kv:
|
||||
counter = int(kv.value)
|
||||
counter = counter + 1
|
||||
else:
|
||||
counter = 1
|
||||
|
||||
etcd_client.put(key, str(counter))
|
||||
return counter
|
||||
|
||||
|
||||
def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
if kv:
|
||||
return int(kv.value)
|
||||
return None
|
||||
117
uncloud/common/etcd_wrapper.py
Normal file
117
uncloud/common/etcd_wrapper.py
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
import etcd3
|
||||
import json
|
||||
import queue
|
||||
import copy
|
||||
|
||||
from collections import namedtuple
|
||||
from functools import wraps
|
||||
|
||||
from . import logger
|
||||
|
||||
PseudoEtcdMeta = namedtuple("PseudoEtcdMeta", ["key"])
|
||||
|
||||
|
||||
class EtcdEntry:
|
||||
# key: str
|
||||
# value: str
|
||||
|
||||
def __init__(self, meta, value, value_in_json=False):
|
||||
self.key = meta.key.decode("utf-8")
|
||||
self.value = value.decode("utf-8")
|
||||
|
||||
if value_in_json:
|
||||
self.value = json.loads(self.value)
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError as err:
|
||||
raise etcd3.exceptions.ConnectionFailedError(
|
||||
"etcd connection failed."
|
||||
) from err
|
||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||
raise etcd3.exceptions.ConnectionTimeoutError(
|
||||
"etcd connection timeout."
|
||||
) from err
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Some etcd error occured. See syslog for details."
|
||||
)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class Etcd3Wrapper:
|
||||
@readable_errors
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = etcd3.client(*args, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get(self, *args, value_in_json=False, **kwargs):
|
||||
_value, _key = self.client.get(*args, **kwargs)
|
||||
if _key is None or _value is None:
|
||||
return None
|
||||
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def put(self, *args, value_in_json=False, **kwargs):
|
||||
_key, _value = args
|
||||
if value_in_json:
|
||||
_value = json.dumps(_value)
|
||||
|
||||
if not isinstance(_key, str):
|
||||
_key = _key.decode("utf-8")
|
||||
|
||||
return self.client.put(_key, _value, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, *args, value_in_json=False, **kwargs):
|
||||
r = self.client.get_prefix(*args, **kwargs)
|
||||
for entry in r:
|
||||
e = EtcdEntry(*entry[::-1], value_in_json=value_in_json)
|
||||
if e.value:
|
||||
yield e
|
||||
|
||||
@readable_errors
|
||||
def watch_prefix(self, key, timeout=0, value_in_json=False):
|
||||
timeout_event = EtcdEntry(
|
||||
PseudoEtcdMeta(key=b"TIMEOUT"),
|
||||
value=str.encode(
|
||||
json.dumps({"status": "TIMEOUT", "type": "TIMEOUT"})
|
||||
),
|
||||
value_in_json=value_in_json,
|
||||
)
|
||||
|
||||
event_queue = queue.Queue()
|
||||
|
||||
def add_event_to_queue(event):
|
||||
if hasattr(event, "events"):
|
||||
for e in event.events:
|
||||
if e.value:
|
||||
event_queue.put(
|
||||
EtcdEntry(
|
||||
e, e.value, value_in_json=value_in_json
|
||||
)
|
||||
)
|
||||
|
||||
self.client.add_watch_prefix_callback(key, add_event_to_queue)
|
||||
|
||||
while True:
|
||||
try:
|
||||
while True:
|
||||
v = event_queue.get(timeout=timeout)
|
||||
yield v
|
||||
except queue.Empty:
|
||||
event_queue.put(copy.deepcopy(timeout_event))
|
||||
|
||||
|
||||
class PsuedoEtcdEntry(EtcdEntry):
|
||||
def __init__(self, key, value, value_in_json=False):
|
||||
super().__init__(
|
||||
PseudoEtcdMeta(key=key.encode("utf-8")),
|
||||
value,
|
||||
value_in_json=value_in_json,
|
||||
)
|
||||
69
uncloud/common/host.py
Normal file
69
uncloud/common/host.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
import time
|
||||
from datetime import datetime
|
||||
from os.path import join
|
||||
from typing import List
|
||||
|
||||
from .classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class HostStatus:
|
||||
"""Possible Statuses of ucloud host."""
|
||||
|
||||
alive = "ALIVE"
|
||||
dead = "DEAD"
|
||||
|
||||
|
||||
class HostEntry(SpecificEtcdEntryBase):
|
||||
"""Represents Host Entry Structure and its supporting methods."""
|
||||
|
||||
def __init__(self, e):
|
||||
self.specs = None # type: dict
|
||||
self.hostname = None # type: str
|
||||
self.status = None # type: str
|
||||
self.last_heartbeat = None # type: str
|
||||
|
||||
super().__init__(e)
|
||||
|
||||
def update_heartbeat(self):
|
||||
self.status = HostStatus.alive
|
||||
self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def is_alive(self):
|
||||
last_heartbeat = datetime.strptime(
|
||||
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
delta = datetime.utcnow() - last_heartbeat
|
||||
if delta.total_seconds() > 60:
|
||||
return False
|
||||
return True
|
||||
|
||||
def declare_dead(self):
|
||||
self.status = HostStatus.dead
|
||||
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
|
||||
class HostPool:
|
||||
def __init__(self, etcd_client, host_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = host_prefix
|
||||
|
||||
@property
|
||||
def hosts(self) -> List[HostEntry]:
|
||||
_hosts = self.client.get_prefix(self.prefix, value_in_json=True)
|
||||
return [HostEntry(host) for host in _hosts]
|
||||
|
||||
def get(self, key):
|
||||
if not key.startswith(self.prefix):
|
||||
key = join(self.prefix, key)
|
||||
v = self.client.get(key, value_in_json=True)
|
||||
if v:
|
||||
return HostEntry(v)
|
||||
return None
|
||||
|
||||
def put(self, obj: HostEntry):
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
||||
|
||||
def by_status(self, status, _hosts=None):
|
||||
if _hosts is None:
|
||||
_hosts = self.hosts
|
||||
return list(filter(lambda x: x.status == status, _hosts))
|
||||
27
uncloud/common/logging.py
Normal file
27
uncloud/common/logging.py
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
import logging
|
||||
import colorama
|
||||
|
||||
|
||||
class NoTracebackStreamHandler(logging.StreamHandler):
|
||||
def handle(self, record):
|
||||
info, cache = record.exc_info, record.exc_text
|
||||
record.exc_info, record.exc_text = None, None
|
||||
|
||||
if record.levelname in ["WARNING", "WARN"]:
|
||||
color = colorama.Fore.LIGHTYELLOW_EX
|
||||
elif record.levelname == "ERROR":
|
||||
color = colorama.Fore.LIGHTRED_EX
|
||||
elif record.levelname == "INFO":
|
||||
color = colorama.Fore.LIGHTGREEN_EX
|
||||
elif record.levelname == "CRITICAL":
|
||||
color = colorama.Fore.LIGHTCYAN_EX
|
||||
else:
|
||||
color = colorama.Fore.WHITE
|
||||
|
||||
try:
|
||||
print(color, end="", flush=True)
|
||||
super().handle(record)
|
||||
finally:
|
||||
record.exc_info = info
|
||||
record.exc_text = cache
|
||||
print(colorama.Style.RESET_ALL, end="", flush=True)
|
||||
72
uncloud/common/network.py
Normal file
72
uncloud/common/network.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
import subprocess as sp
|
||||
import random
|
||||
import logging
|
||||
import socket
|
||||
from contextlib import closing
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def random_bytes(num=6):
|
||||
return [random.randrange(256) for _ in range(num)]
|
||||
|
||||
|
||||
def generate_mac(
|
||||
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||
):
|
||||
mac = random_bytes()
|
||||
if oui:
|
||||
if type(oui) == str:
|
||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||
mac = oui + random_bytes(num=6 - len(oui))
|
||||
else:
|
||||
if multicast:
|
||||
mac[0] |= 1 # set bit 0
|
||||
else:
|
||||
mac[0] &= ~1 # clear bit 0
|
||||
if uaa:
|
||||
mac[0] &= ~(1 << 1) # clear bit 1
|
||||
else:
|
||||
mac[0] |= 1 << 1 # set bit 1
|
||||
return separator.join(byte_fmt % b for b in mac)
|
||||
|
||||
|
||||
def create_dev(script, _id, dev, ip=None):
|
||||
command = [
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to create network devices for vm: ",
|
||||
script,
|
||||
str(_id),
|
||||
dev,
|
||||
]
|
||||
if ip:
|
||||
command.append(ip)
|
||||
try:
|
||||
output = sp.check_output(command, stderr=sp.PIPE)
|
||||
except Exception:
|
||||
logger.exception("Creation of interface %s failed.", dev)
|
||||
return None
|
||||
else:
|
||||
return output.decode("utf-8").strip()
|
||||
|
||||
|
||||
def delete_network_interface(iface):
|
||||
try:
|
||||
sp.check_output(
|
||||
[
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to remove {} network device: ".format(
|
||||
iface
|
||||
),
|
||||
"ip",
|
||||
"link",
|
||||
"del",
|
||||
iface,
|
||||
],
|
||||
stderr=sp.PIPE,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Interface %s Deletion failed", iface)
|
||||
|
||||
49
uncloud/common/request.py
Normal file
49
uncloud/common/request.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
import json
|
||||
from os.path import join
|
||||
from uuid import uuid4
|
||||
|
||||
from .etcd_wrapper import PsuedoEtcdEntry
|
||||
from .classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class RequestType:
|
||||
CreateVM = "CreateVM"
|
||||
ScheduleVM = "ScheduleVM"
|
||||
StartVM = "StartVM"
|
||||
StopVM = "StopVM"
|
||||
InitVMMigration = "InitVMMigration"
|
||||
TransferVM = "TransferVM"
|
||||
DeleteVM = "DeleteVM"
|
||||
|
||||
|
||||
class RequestEntry(SpecificEtcdEntryBase):
|
||||
def __init__(self, e):
|
||||
self.destination_sock_path = None
|
||||
self.destination_host_key = None
|
||||
self.type = None # type: str
|
||||
self.migration = None # type: bool
|
||||
self.destination = None # type: str
|
||||
self.uuid = None # type: str
|
||||
self.hostname = None # type: str
|
||||
super().__init__(e)
|
||||
|
||||
@classmethod
|
||||
def from_scratch(cls, request_prefix, **kwargs):
|
||||
e = PsuedoEtcdEntry(
|
||||
join(request_prefix, uuid4().hex),
|
||||
value=json.dumps(kwargs).encode("utf-8"),
|
||||
value_in_json=True,
|
||||
)
|
||||
return cls(e)
|
||||
|
||||
|
||||
class RequestPool:
|
||||
def __init__(self, etcd_client, request_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = request_prefix
|
||||
|
||||
def put(self, obj: RequestEntry):
|
||||
if not obj.key.startswith(self.prefix):
|
||||
obj.key = join(self.prefix, obj.key)
|
||||
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
||||
41
uncloud/common/schemas.py
Normal file
41
uncloud/common/schemas.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
import bitmath
|
||||
|
||||
from marshmallow import fields, Schema
|
||||
|
||||
|
||||
class StorageUnit(fields.Field):
|
||||
def _serialize(self, value, attr, obj, **kwargs):
|
||||
return str(value)
|
||||
|
||||
def _deserialize(self, value, attr, data, **kwargs):
|
||||
return bitmath.parse_string_unsafe(value)
|
||||
|
||||
|
||||
class SpecsSchema(Schema):
|
||||
cpu = fields.Int()
|
||||
ram = StorageUnit()
|
||||
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
|
||||
hdd = fields.List(StorageUnit())
|
||||
|
||||
|
||||
class VMSchema(Schema):
|
||||
name = fields.Str()
|
||||
owner = fields.Str()
|
||||
owner_realm = fields.Str()
|
||||
specs = fields.Nested(SpecsSchema)
|
||||
status = fields.Str()
|
||||
log = fields.List(fields.Str())
|
||||
vnc_socket = fields.Str()
|
||||
image_uuid = fields.Str()
|
||||
hostname = fields.Str()
|
||||
metadata = fields.Dict()
|
||||
network = fields.List(
|
||||
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
|
||||
)
|
||||
in_migration = fields.Bool()
|
||||
|
||||
|
||||
class NetworkSchema(Schema):
|
||||
_id = fields.Int(data_key="id", attribute="id")
|
||||
_type = fields.Str(data_key="type", attribute="type")
|
||||
ipv6 = fields.Str()
|
||||
208
uncloud/common/storage_handlers.py
Normal file
208
uncloud/common/storage_handlers.py
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
import shutil
|
||||
import subprocess as sp
|
||||
import os
|
||||
import stat
|
||||
|
||||
from abc import ABC
|
||||
from . import logger
|
||||
from os.path import join as join_path
|
||||
|
||||
from ucloud.settings import settings as config
|
||||
|
||||
|
||||
class ImageStorageHandler(ABC):
|
||||
handler_name = "base"
|
||||
|
||||
def __init__(self, image_base, vm_base):
|
||||
self.image_base = image_base
|
||||
self.vm_base = vm_base
|
||||
|
||||
def import_image(self, image_src, image_dest, protect=False):
|
||||
"""Put an image at the destination
|
||||
:param image_src: An Image file
|
||||
:param image_dest: A path where :param src: is to be put.
|
||||
:param protect: If protect is true then the dest is protect (readonly etc)
|
||||
The obj must exist on filesystem.
|
||||
"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def make_vm_image(self, image_path, path):
|
||||
"""Copy image from src to dest
|
||||
|
||||
:param image_path: A path
|
||||
:param path: A path
|
||||
|
||||
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
"""Resize image located at :param path:
|
||||
:param path: The file which is to be resized
|
||||
:param size: Size must be in Megabytes
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def execute_command(self, command, report=True, error_origin=None):
|
||||
if not error_origin:
|
||||
error_origin = self.handler_name
|
||||
|
||||
command = list(map(str, command))
|
||||
try:
|
||||
sp.check_output(command, stderr=sp.PIPE)
|
||||
except sp.CalledProcessError as e:
|
||||
_stderr = e.stderr.decode("utf-8").strip()
|
||||
if report:
|
||||
logger.exception("%s:- %s", error_origin, _stderr)
|
||||
return False
|
||||
return True
|
||||
|
||||
def vm_path_string(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||
handler_name = "Filesystem"
|
||||
|
||||
def import_image(self, src, dest, protect=False):
|
||||
dest = join_path(self.image_base, dest)
|
||||
try:
|
||||
shutil.copy(src, dest)
|
||||
if protect:
|
||||
os.chmod(
|
||||
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def make_vm_image(self, src, dest):
|
||||
src = join_path(self.image_base, src)
|
||||
dest = join_path(self.vm_base, dest)
|
||||
try:
|
||||
shutil.copyfile(src, dest)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = [
|
||||
"qemu-img",
|
||||
"resize",
|
||||
"-f",
|
||||
"raw",
|
||||
path,
|
||||
"{}M".format(size),
|
||||
]
|
||||
if self.execute_command(command):
|
||||
return True
|
||||
else:
|
||||
self.delete_vm_image(path)
|
||||
return False
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def vm_path_string(self, path):
|
||||
return join_path(self.vm_base, path)
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
return self.vm_path_string(path)
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["ls", path]
|
||||
return self.execute_command(command, report=False)
|
||||
|
||||
|
||||
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||
handler_name = "Ceph"
|
||||
|
||||
def import_image(self, src, dest, protect=False):
|
||||
dest = join_path(self.image_base, dest)
|
||||
import_command = ["rbd", "import", src, dest]
|
||||
commands = [import_command]
|
||||
if protect:
|
||||
snap_create_command = [
|
||||
"rbd",
|
||||
"snap",
|
||||
"create",
|
||||
"{}@protected".format(dest),
|
||||
]
|
||||
snap_protect_command = [
|
||||
"rbd",
|
||||
"snap",
|
||||
"protect",
|
||||
"{}@protected".format(dest),
|
||||
]
|
||||
commands.append(snap_create_command)
|
||||
commands.append(snap_protect_command)
|
||||
|
||||
result = True
|
||||
for command in commands:
|
||||
result = result and self.execute_command(command)
|
||||
|
||||
return result
|
||||
|
||||
def make_vm_image(self, src, dest):
|
||||
src = join_path(self.image_base, src)
|
||||
dest = join_path(self.vm_base, dest)
|
||||
|
||||
command = ["rbd", "clone", "{}@protected".format(src), dest]
|
||||
return self.execute_command(command)
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "resize", path, "--size", size]
|
||||
return self.execute_command(command)
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "rm", path]
|
||||
return self.execute_command(command)
|
||||
|
||||
def vm_path_string(self, path):
|
||||
return join_path(self.vm_base, path)
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
return "rbd:{}".format(self.vm_path_string(path))
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "info", path]
|
||||
return self.execute_command(command, report=False)
|
||||
|
||||
|
||||
def get_storage_handler():
|
||||
__storage_backend = config["storage"]["storage_backend"]
|
||||
if __storage_backend == "filesystem":
|
||||
return FileSystemBasedImageStorageHandler(
|
||||
vm_base=config["storage"]["vm_dir"],
|
||||
image_base=config["storage"]["image_dir"],
|
||||
)
|
||||
elif __storage_backend == "ceph":
|
||||
return CEPHBasedImageStorageHandler(
|
||||
vm_base=config["storage"]["ceph_vm_pool"],
|
||||
image_base=config["storage"]["ceph_image_pool"],
|
||||
)
|
||||
else:
|
||||
raise Exception("Unknown Image Storage Handler")
|
||||
102
uncloud/common/vm.py
Normal file
102
uncloud/common/vm.py
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from os.path import join
|
||||
|
||||
from .classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class VMStatus:
|
||||
stopped = "STOPPED" # After requested_shutdown
|
||||
killed = "KILLED" # either host died or vm died itself
|
||||
running = "RUNNING"
|
||||
error = "ERROR" # An error occurred that cannot be resolved automatically
|
||||
|
||||
|
||||
def declare_stopped(vm):
|
||||
vm["hostname"] = ""
|
||||
vm["in_migration"] = False
|
||||
vm["status"] = VMStatus.stopped
|
||||
|
||||
|
||||
class VMEntry(SpecificEtcdEntryBase):
|
||||
def __init__(self, e):
|
||||
self.owner = None # type: str
|
||||
self.specs = None # type: dict
|
||||
self.hostname = None # type: str
|
||||
self.status = None # type: str
|
||||
self.image_uuid = None # type: str
|
||||
self.log = None # type: list
|
||||
self.in_migration = None # type: bool
|
||||
|
||||
super().__init__(e)
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.key.split("/")[-1]
|
||||
|
||||
def declare_killed(self):
|
||||
self.hostname = ""
|
||||
self.in_migration = False
|
||||
if self.status == VMStatus.running:
|
||||
self.status = VMStatus.killed
|
||||
|
||||
def declare_stopped(self):
|
||||
self.hostname = ""
|
||||
self.in_migration = False
|
||||
self.status = VMStatus.stopped
|
||||
|
||||
def add_log(self, msg):
|
||||
self.log = self.log[:5]
|
||||
self.log.append(
|
||||
"{} - {}".format(datetime.now().isoformat(), msg)
|
||||
)
|
||||
|
||||
|
||||
class VmPool:
|
||||
def __init__(self, etcd_client, vm_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = vm_prefix
|
||||
|
||||
@property
|
||||
def vms(self):
|
||||
_vms = self.client.get_prefix(self.prefix, value_in_json=True)
|
||||
return [VMEntry(vm) for vm in _vms]
|
||||
|
||||
def by_host(self, host, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.hostname == host, _vms))
|
||||
|
||||
def by_status(self, status, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.status == status, _vms))
|
||||
|
||||
def by_owner(self, owner, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.owner == owner, _vms))
|
||||
|
||||
def except_status(self, status, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.status != status, _vms))
|
||||
|
||||
def get(self, key):
|
||||
if not key.startswith(self.prefix):
|
||||
key = join(self.prefix, key)
|
||||
v = self.client.get(key, value_in_json=True)
|
||||
if v:
|
||||
return VMEntry(v)
|
||||
return None
|
||||
|
||||
def put(self, obj: VMEntry):
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
||||
|
||||
@contextmanager
|
||||
def get_put(self, key) -> VMEntry:
|
||||
# Updates object at key on exit
|
||||
obj = self.get(key)
|
||||
yield obj
|
||||
if obj:
|
||||
self.put(obj)
|
||||
0
uncloud/configure/__init__.py
Normal file
0
uncloud/configure/__init__.py
Normal file
79
uncloud/configure/main.py
Normal file
79
uncloud/configure/main.py
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
import os
|
||||
|
||||
from ucloud.settings import settings
|
||||
from ucloud.shared import shared
|
||||
|
||||
|
||||
def update_config(section, kwargs):
|
||||
uncloud_config = shared.etcd_client.get(
|
||||
settings.config_key, value_in_json=True
|
||||
)
|
||||
if not uncloud_config:
|
||||
uncloud_config = {}
|
||||
else:
|
||||
uncloud_config = uncloud_config.value
|
||||
|
||||
uncloud_config[section] = kwargs
|
||||
shared.etcd_client.put(
|
||||
settings.config_key, uncloud_config, value_in_json=True
|
||||
)
|
||||
|
||||
|
||||
def configure_parser(parser):
|
||||
configure_subparsers = parser.add_subparsers(dest="subcommand")
|
||||
|
||||
otp_parser = configure_subparsers.add_parser("otp")
|
||||
otp_parser.add_argument(
|
||||
"--verification-controller-url", required=True, metavar="URL"
|
||||
)
|
||||
otp_parser.add_argument(
|
||||
"--auth-name", required=True, metavar="OTP-NAME"
|
||||
)
|
||||
otp_parser.add_argument(
|
||||
"--auth-realm", required=True, metavar="OTP-REALM"
|
||||
)
|
||||
otp_parser.add_argument(
|
||||
"--auth-seed", required=True, metavar="OTP-SEED"
|
||||
)
|
||||
|
||||
network_parser = configure_subparsers.add_parser("network")
|
||||
network_parser.add_argument(
|
||||
"--prefix-length", required=True, type=int
|
||||
)
|
||||
network_parser.add_argument("--prefix", required=True)
|
||||
network_parser.add_argument("--vxlan-phy-dev", required=True)
|
||||
|
||||
netbox_parser = configure_subparsers.add_parser("netbox")
|
||||
netbox_parser.add_argument("--url", required=True)
|
||||
netbox_parser.add_argument("--token", required=True)
|
||||
|
||||
ssh_parser = configure_subparsers.add_parser("ssh")
|
||||
ssh_parser.add_argument("--username", default="root")
|
||||
ssh_parser.add_argument(
|
||||
"--private-key-path",
|
||||
default=os.path.expanduser("~/.ssh/id_rsa"),
|
||||
)
|
||||
|
||||
storage_parser = configure_subparsers.add_parser("storage")
|
||||
storage_parser.add_argument("--file-dir", required=True)
|
||||
storage_parser_subparsers = storage_parser.add_subparsers(
|
||||
dest="storage_backend"
|
||||
)
|
||||
|
||||
filesystem_storage_parser = storage_parser_subparsers.add_parser(
|
||||
"filesystem"
|
||||
)
|
||||
filesystem_storage_parser.add_argument("--vm-dir", required=True)
|
||||
filesystem_storage_parser.add_argument("--image-dir", required=True)
|
||||
|
||||
ceph_storage_parser = storage_parser_subparsers.add_parser("ceph")
|
||||
ceph_storage_parser.add_argument("--ceph-vm-pool", required=True)
|
||||
ceph_storage_parser.add_argument("--ceph-image-pool", required=True)
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop("subcommand")
|
||||
if not subcommand:
|
||||
pass
|
||||
else:
|
||||
update_config(subcommand, kwargs)
|
||||
25
uncloud/docs/Makefile
Normal file
25
uncloud/docs/Makefile
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source/
|
||||
BUILDDIR = build/
|
||||
DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/ucloud/
|
||||
|
||||
.PHONY: all build clean
|
||||
|
||||
publish: build permissions
|
||||
rsync -av $(BUILDDIR) $(DESTINATION)
|
||||
|
||||
permissions: build
|
||||
find $(BUILDDIR) -type f -exec chmod 0644 {} \;
|
||||
find $(BUILDDIR) -type d -exec chmod 0755 {} \;
|
||||
|
||||
build:
|
||||
$(SPHINXBUILD) "$(SOURCEDIR)" "$(BUILDDIR)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)
|
||||
0
uncloud/docs/__init__.py
Normal file
0
uncloud/docs/__init__.py
Normal file
0
uncloud/docs/source/__init__.py
Normal file
0
uncloud/docs/source/__init__.py
Normal file
158
uncloud/docs/source/admin-guide
Normal file
158
uncloud/docs/source/admin-guide
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
.. _admin-guide:
|
||||
|
||||
|
||||
Usage Guide For Administrators
|
||||
==============================
|
||||
|
||||
Start API
|
||||
----------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud api
|
||||
|
||||
Host Creation
|
||||
-------------
|
||||
|
||||
Currently, we don't have any host (that runs virtual machines).
|
||||
So, we need to create it by executing the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli host create --hostname ungleich.ch --cpu 32 --ram '32GB' --os-ssd '32GB'
|
||||
|
||||
You should see something like the following
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": "Host Created"
|
||||
}
|
||||
|
||||
Start Scheduler
|
||||
---------------
|
||||
Scheduler is responsible for scheduling VMs on appropriate host.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud scheduler
|
||||
|
||||
Start Host
|
||||
----------
|
||||
Host is responsible for handling the following actions
|
||||
|
||||
* Start VM.
|
||||
* Stop VM.
|
||||
* Create VM.
|
||||
* Delete VM.
|
||||
* Migrate VM.
|
||||
* Manage Network Resources needed by VMs.
|
||||
|
||||
It uses a hypervisor such as QEMU to perform these actions.
|
||||
|
||||
To start host we created earlier, execute the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud host ungleich.ch
|
||||
|
||||
Create OS Image
|
||||
---------------
|
||||
|
||||
Create ucloud-init ready OS image (Optional)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
This step is optional if you just want to test ucloud. However, sooner or later
|
||||
you want to create OS images with ucloud-init to properly
|
||||
contexualize VMs.
|
||||
|
||||
1. Start a VM with OS image on which you want to install ucloud-init
|
||||
2. Execute the following command on the started VM
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
apk add git
|
||||
git clone https://code.ungleich.ch/ucloud/ucloud-init.git
|
||||
cd ucloud-init
|
||||
sh ./install.sh
|
||||
3. Congratulations. Your image is now ucloud-init ready.
|
||||
|
||||
|
||||
Upload Sample OS Image
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
Execute the following to get the sample OS image file.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mkdir /var/www/admin
|
||||
(cd /var/www/admin && wget https://cloud.ungleich.ch/s/qTb5dFYW5ii8KsD/download)
|
||||
|
||||
Run File Scanner and Image Scanner
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Currently, our uploaded file *alpine-untouched.qcow2* is not tracked by ucloud. We can only make
|
||||
images from tracked files. So, we need to track the file by running File Scanner
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud filescanner
|
||||
|
||||
File Scanner would run, scan your uploaded image and track it. You can check whether your image
|
||||
is successfully tracked by executing the :code:`ucloud-cli user files`, It will return something like the following
|
||||
|
||||
.. _list-user-files:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": [
|
||||
{
|
||||
"filename": "alpine-untouched.qcow2",
|
||||
"uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Our file is now being tracked by ucloud. Lets create an OS image using the uploaded file.
|
||||
|
||||
An image belongs to an image store. There are two types of store
|
||||
|
||||
* Public Image Store
|
||||
* Private Image Store (Not Implemented Yet)
|
||||
|
||||
.. note::
|
||||
**Quick Quiz** Have we created an image store yet?
|
||||
|
||||
The answer is **No, we haven't**. Creating a sample image store is very easy.
|
||||
Just execute the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
(cd ~/ucloud && pipenv run python api/create_image_store.py)
|
||||
|
||||
An image store (with name = "images") would be created. Now, we are fully ready for creating our
|
||||
very own image. Executing the following command to create image using the file uploaded earlier
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli image create-from-file --name alpine --uuid 3f75bd20-45d6-4013-89c4-7fceaedc8dda --image-store-name images
|
||||
|
||||
Please note that your **uuid** would be different. See :ref:`List of user files <list-user-files>`.
|
||||
|
||||
Now, ucloud have received our request to create an image from file. We have to run Image Scanner to make the image.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud imagescanner
|
||||
|
||||
To make sure, that our image is create run :code:`ucloud-cli image list --public`. You would get
|
||||
output something like the following
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"images": [
|
||||
{
|
||||
"name": "images:alpine",
|
||||
"status": "CREATED"
|
||||
}
|
||||
]
|
||||
}
|
||||
53
uncloud/docs/source/conf.py
Normal file
53
uncloud/docs/source/conf.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# This file only contains a selection of the most common options. For a full
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Path setup --------------------------------------------------------------
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "ucloud"
|
||||
copyright = "2019, ungleich"
|
||||
author = "ungleich"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx_rtd_theme",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = []
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ["_static"]
|
||||
44
uncloud/docs/source/diagram-code/ucloud
Normal file
44
uncloud/docs/source/diagram-code/ucloud
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
graph LR
|
||||
style ucloud fill:#FFD2FC
|
||||
style cron fill:#FFF696
|
||||
style infrastructure fill:#BDF0FF
|
||||
subgraph ucloud[ucloud]
|
||||
ucloud-cli[CLI]-->ucloud-api[API]
|
||||
ucloud-api-->ucloud-scheduler[Scheduler]
|
||||
ucloud-api-->ucloud-imagescanner[Image Scanner]
|
||||
ucloud-api-->ucloud-host[Host]
|
||||
ucloud-scheduler-->ucloud-host
|
||||
|
||||
ucloud-host-->need-networking{VM need Networking}
|
||||
need-networking-->|Yes| networking-scripts
|
||||
need-networking-->|No| VM[Virtual Machine]
|
||||
need-networking-->|SLAAC?| radvd
|
||||
networking-scripts-->VM
|
||||
networking-scripts--Create Networks Devices-->networking-scripts
|
||||
subgraph cron[Cron Jobs]
|
||||
ucloud-imagescanner
|
||||
ucloud-filescanner[File Scanner]
|
||||
ucloud-filescanner--Track User files-->ucloud-filescanner
|
||||
end
|
||||
subgraph infrastructure[Infrastructure]
|
||||
radvd
|
||||
etcd
|
||||
networking-scripts[Networking Scripts]
|
||||
ucloud-imagescanner-->image-store
|
||||
image-store{Image Store}
|
||||
image-store-->|CEPH| ceph
|
||||
image-store-->|FILE| file-system
|
||||
ceph[CEPH]
|
||||
file-system[File System]
|
||||
end
|
||||
subgraph virtual-machine[Virtual Machine]
|
||||
VM
|
||||
VM-->ucloud-init
|
||||
|
||||
end
|
||||
|
||||
subgraph metadata-group[Metadata Server]
|
||||
metadata-->ucloud-init
|
||||
ucloud-init<-->metadata
|
||||
end
|
||||
end
|
||||
17
uncloud/docs/source/hacking.rst
Normal file
17
uncloud/docs/source/hacking.rst
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
Hacking
|
||||
=======
|
||||
How to hack on the code.
|
||||
|
||||
[ to be done by Balazs:
|
||||
|
||||
* make nice
|
||||
* indent with shell script mode
|
||||
|
||||
]
|
||||
|
||||
* git clone the repo
|
||||
* cd to the repo
|
||||
* Setup your venv: python -m venv venv
|
||||
* . ./venv/bin/activate # you need the leading dot for sourcing!
|
||||
* Run ./bin/ucloud-run-reinstall - it should print you an error
|
||||
message on how to use ucloud
|
||||
494
uncloud/docs/source/images/ucloud.svg
Normal file
494
uncloud/docs/source/images/ucloud.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 37 KiB |
27
uncloud/docs/source/index.rst
Normal file
27
uncloud/docs/source/index.rst
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
.. ucloud documentation master file, created by
|
||||
sphinx-quickstart on Mon Nov 11 19:08:16 2019.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to ucloud's documentation!
|
||||
==================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
introduction
|
||||
user-guide
|
||||
setup-install
|
||||
admin-guide
|
||||
user-guide/how-to-create-an-os-image-for-ucloud
|
||||
troubleshooting
|
||||
hacking
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
58
uncloud/docs/source/introduction.rst
Normal file
58
uncloud/docs/source/introduction.rst
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
Introduction
|
||||
============
|
||||
|
||||
ucloud is a modern, IPv6 first virtual machine management system.
|
||||
It is an alternative to `OpenNebula <https://opennebula.org/>`_,
|
||||
`OpenStack <https://www.openstack.org/>`_ or
|
||||
`Cloudstack <https://cloudstack.apache.org/>`_.
|
||||
|
||||
ucloud is the first cloud management system that puts IPv6
|
||||
first. ucloud also has an integral ordering process that we missed in
|
||||
existing solutions.
|
||||
|
||||
The ucloud documentation is separated into various sections for the
|
||||
different use cases:
|
||||
|
||||
* :ref:`The user guide <user-guide>` describes how to use an existing
|
||||
ucloud installation
|
||||
* There are :ref:`setup instructions <setup-install>` which describe on how to setup a new
|
||||
ucloud instance
|
||||
* :ref:`The admin guide <admin-guide>` describe on how to
|
||||
administrate ucloud
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
We try to reuse existing components for ucloud. Generally speaking,
|
||||
ucloud consist of a variety of daemons who handle specific tasks and
|
||||
connect to a shared database.
|
||||
|
||||
All interactions with the clients are done through an API.
|
||||
|
||||
ucloud consists of the following components:
|
||||
|
||||
* API
|
||||
* Scheduler
|
||||
* Host
|
||||
* File Scanner
|
||||
* Image Scanner
|
||||
* Metadata Server
|
||||
* VM Init Scripts (dubbed as ucloud-init)How does ucloud work?
|
||||
|
||||
|
||||
Tech Stack
|
||||
----------
|
||||
The following technologies are utilised:
|
||||
|
||||
* Python 3
|
||||
* Flask
|
||||
* QEMU as hypervisor
|
||||
* etcd (key/value store)
|
||||
* radvd for Router Advertisement
|
||||
|
||||
|
||||
Optional components:
|
||||
|
||||
* CEPH for distributed image storage
|
||||
* uotp for user authentication
|
||||
* netbox for IPAM
|
||||
32
uncloud/docs/source/misc/todo.rst
Normal file
32
uncloud/docs/source/misc/todo.rst
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
TODO
|
||||
====
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
* **Check Authentication:** Nico reported that some endpoints
|
||||
even work without providing token. (e.g ListUserVM)
|
||||
|
||||
Refactoring/Feature
|
||||
-------------------
|
||||
|
||||
* Put overrides for **IMAGE_BASE**, **VM_BASE** in **ImageStorageHandler**.
|
||||
* Expose more details in ListUserFiles.
|
||||
* Throw KeyError instead of returning None when some key is not found in etcd.
|
||||
* Create Network Manager
|
||||
* That would handle tasks like up/down an interface
|
||||
* Create VXLANs, Bridges, TAPs.
|
||||
* Remove them when they are no longer used.
|
||||
|
||||
Reliability
|
||||
-----------
|
||||
|
||||
* What to do if some command hangs forever? e.g CEPH commands
|
||||
:code:`rbd ls ssd` etc. hangs forever if CEPH isn't running
|
||||
or not responding.
|
||||
* What to do if etcd goes down?
|
||||
|
||||
Misc.
|
||||
-----
|
||||
|
||||
* Put "Always use only one StorageHandler"
|
||||
323
uncloud/docs/source/setup-install.rst
Normal file
323
uncloud/docs/source/setup-install.rst
Normal file
|
|
@ -0,0 +1,323 @@
|
|||
.. _setup-install:
|
||||
|
||||
Installation of ucloud
|
||||
======================
|
||||
To install ucloud, you will first need to install the requirements and
|
||||
then ucloud itself.
|
||||
|
||||
We describe the installation in x sections:
|
||||
|
||||
* Installation overview
|
||||
* Requirements on Alpine
|
||||
* Installation on Arch Linux
|
||||
|
||||
|
||||
Installation overview
|
||||
---------------------
|
||||
|
||||
ucloud requires the following components to run:
|
||||
|
||||
* python3
|
||||
* an etcd cluster
|
||||
|
||||
|
||||
Installation on Arch Linux
|
||||
--------------------------
|
||||
|
||||
In Arch Linux, some packages can be installed from the regular
|
||||
repositories, some packages need to be installed from AUR.
|
||||
|
||||
|
||||
System packages
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
pacman -Syu qemu
|
||||
|
||||
|
||||
AUR packages
|
||||
~~~~~~~~~~~~
|
||||
Use your favorite AUR manager to install the following packages:
|
||||
|
||||
* etcd
|
||||
|
||||
|
||||
Alpine
|
||||
------
|
||||
|
||||
.. note::
|
||||
Python Wheel (Binary) Packages does not support Alpine Linux as it is
|
||||
using musl libc instead of glibc. Therefore, expect longer installation
|
||||
times than other linux distributions.
|
||||
|
||||
Enable Edge Repos, Update and Upgrade
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. warning::
|
||||
The below commands would overwrite your repositories sources and
|
||||
upgrade all packages and their dependencies to match those available
|
||||
in edge repos. **So, be warned**
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
cat > /etc/apk/repositories << EOF
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/main
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/testing
|
||||
EOF
|
||||
|
||||
apk update
|
||||
apk upgrade
|
||||
|
||||
reboot
|
||||
|
||||
|
||||
Install Dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
.. note::
|
||||
The installation and configuration of a production grade etcd cluster
|
||||
is out of scope of this manual. So, we will install etcd with default
|
||||
configuration.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
apk add git python3 alpine-sdk python3-dev etcd etcd-ctl openntpd \
|
||||
libffi-dev openssl-dev make py3-protobuf py3-tempita chrony
|
||||
|
||||
pip3 install pipenv
|
||||
|
||||
|
||||
**Install QEMU (For Filesystem based Installation)**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
apk add qemu qemu-system-x86_64 qemu-img
|
||||
|
||||
**Install QEMU/CEPH/radvd (For CEPH based Installation)**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$(git clone https://code.ungleich.ch/ahmedbilal/qemu-with-rbd-alpine.git && cd qemu-with-rbd-alpine && apk add apks/*.apk --allow-untrusted)
|
||||
apk add ceph radvd
|
||||
|
||||
Syncronize Date/Time
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
service chronyd start
|
||||
rc-update add chronyd
|
||||
|
||||
|
||||
Start etcd and enable it
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note::
|
||||
The following :command:`curl` statement shouldn't be run once
|
||||
etcd is fixed in alpine repos.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
curl https://raw.githubusercontent.com/etcd-io/etcd/release-3.4/etcd.conf.yml.sample -o /etc/etcd/conf.yml
|
||||
service etcd start
|
||||
rc-update add etcd
|
||||
|
||||
|
||||
Install uotp
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ungleich-public/uotp.git
|
||||
cd uotp
|
||||
mv .env.sample .env
|
||||
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
pipenv run python app.py
|
||||
|
||||
Run :code:`$(cd scripts && pipenv run python get-admin.py)` to get
|
||||
admin seed. A sample output
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"seed": "FYTVQ72A2CJJ4TB4",
|
||||
"realm": ["ungleich-admin"]
|
||||
}
|
||||
|
||||
Now, run :code:`pipenv run python scripts/create-auth.py FYTVQ72A2CJJ4TB4`
|
||||
(Replace **FYTVQ72A2CJJ4TB4** with your admin seed obtained in previous step).
|
||||
A sample output is as below. It shows seed of auth.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": "Account Created",
|
||||
"name": "auth",
|
||||
"realm": ["ungleich-auth"],
|
||||
"seed": "XZLTUMX26TRAZOXC"
|
||||
}
|
||||
|
||||
.. note::
|
||||
Please note both **admin** and **auth** seeds as we would need them in setting up ucloud.
|
||||
|
||||
|
||||
Install and configure ucloud
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ucloud/ucloud.git
|
||||
cd ucloud
|
||||
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
|
||||
**Filesystem based Installation**
|
||||
|
||||
You just need to update **AUTH_SEED** in the below code to match your auth's seed.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
mkdir /etc/ucloud
|
||||
|
||||
cat > /etc/ucloud/ucloud.conf << EOF
|
||||
AUTH_NAME=auth
|
||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
||||
AUTH_REALM=ungleich-auth
|
||||
|
||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
||||
|
||||
OTP_SERVER="http://127.0.0.1:8000/"
|
||||
|
||||
ETCD_URL=localhost
|
||||
|
||||
STORAGE_BACKEND=filesystem
|
||||
|
||||
BASE_DIR=/var/www
|
||||
IMAGE_DIR=/var/image
|
||||
VM_DIR=/var/vm
|
||||
|
||||
VM_PREFIX=/v1/vm/
|
||||
HOST_PREFIX=/v1/host/
|
||||
REQUEST_PREFIX=/v1/request/
|
||||
FILE_PREFIX=/v1/file/
|
||||
IMAGE_PREFIX=/v1/image/
|
||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
||||
USER_PREFIX=/v1/user/
|
||||
NETWORK_PREFIX=/v1/network/
|
||||
|
||||
ssh_username=meow
|
||||
ssh_pkey="~/.ssh/id_rsa"
|
||||
|
||||
VXLAN_PHY_DEV="eth0"
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
|
||||
**CEPH based Installation**
|
||||
You need to update the following
|
||||
|
||||
* **AUTH_SEED**
|
||||
* **NETBOX_URL**
|
||||
* **NETBOX_TOKEN**
|
||||
* **PREFIX**
|
||||
* **PREFIX_LENGTH**
|
||||
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
mkdir /etc/ucloud
|
||||
|
||||
cat > /etc/ucloud/ucloud.conf << EOF
|
||||
AUTH_NAME=auth
|
||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
||||
AUTH_REALM=ungleich-auth
|
||||
|
||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
||||
|
||||
OTP_SERVER="http://127.0.0.1:8000/"
|
||||
|
||||
ETCD_URL=localhost
|
||||
|
||||
STORAGE_BACKEND=ceph
|
||||
|
||||
BASE_DIR=/var/www
|
||||
IMAGE_DIR=/var/image
|
||||
VM_DIR=/var/vm
|
||||
|
||||
VM_PREFIX=/v1/vm/
|
||||
HOST_PREFIX=/v1/host/
|
||||
REQUEST_PREFIX=/v1/request/
|
||||
FILE_PREFIX=/v1/file/
|
||||
IMAGE_PREFIX=/v1/image/
|
||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
||||
USER_PREFIX=/v1/user/
|
||||
NETWORK_PREFIX=/v1/network/
|
||||
|
||||
ssh_username=meow
|
||||
ssh_pkey="~/.ssh/id_rsa"
|
||||
|
||||
VXLAN_PHY_DEV="eth0"
|
||||
|
||||
NETBOX_URL="<url-for-your-netbox-installation>"
|
||||
NETBOX_TOKEN="netbox-token"
|
||||
PREFIX="your-prefix"
|
||||
PREFIX_LENGTH="64"
|
||||
EOF
|
||||
|
||||
|
||||
Install and configure ucloud-cli
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ucloud/ucloud-cli.git
|
||||
cd ucloud-cli
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
|
||||
cat > ~/.ucloud.conf << EOF
|
||||
UCLOUD_API_SERVER=http://localhost:5000
|
||||
EOF
|
||||
|
||||
mkdir /var/www/
|
||||
|
||||
**Only for Filesystem Based Installation**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mkdir /var/image/
|
||||
mkdir /var/vm/
|
||||
|
||||
|
||||
Environment Variables and aliases
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To ease usage of ucloud and its various components put the following in
|
||||
your shell profile e.g *~/.profile*
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
export OTP_NAME=admin
|
||||
export OTP_REALM=ungleich-admin
|
||||
export OTP_SEED=FYTVQ72A2CJJ4TB4
|
||||
|
||||
alias ucloud='cd /root/ucloud/ && pipenv run python ucloud.py'
|
||||
alias ucloud-cli='cd /root/ucloud-cli/ && pipenv run python ucloud-cli.py'
|
||||
alias uotp='cd /root/uotp/ && pipenv run python app.py'
|
||||
|
||||
and run :code:`source ~/.profile`
|
||||
98
uncloud/docs/source/theory/summary.rst
Normal file
98
uncloud/docs/source/theory/summary.rst
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
Summary
|
||||
=======
|
||||
|
||||
.. image:: /images/ucloud.svg
|
||||
|
||||
.. code-block::
|
||||
|
||||
<cli>
|
||||
|
|
||||
|
|
||||
|
|
||||
+-------------------------<api>
|
||||
| |
|
||||
| |```````````````|```````````````|
|
||||
| | | |
|
||||
| <file_scanner> <scheduler> <image_scanner>
|
||||
| |
|
||||
| |
|
||||
+-------------------------<host>
|
||||
|
|
||||
|
|
||||
|
|
||||
Virtual Machine------<init>------<metadata>
|
||||
|
||||
|
||||
|
||||
**ucloud-cli** interact with **ucloud-api** to do the following operations:
|
||||
|
||||
- Create/Delete/Start/Stop/Migrate/Probe (Status of) Virtual Machines
|
||||
- Create/Delete Networks
|
||||
- Add/Get/Delete SSH Keys
|
||||
- Create OS Image out of a file (tracked by file_scanner)
|
||||
- List User's files/networks/vms
|
||||
- Add Host
|
||||
|
||||
ucloud can currently stores OS-Images on
|
||||
|
||||
* File System
|
||||
* `CEPH <https://ceph.io/>`_
|
||||
|
||||
|
||||
**ucloud-api** in turns creates appropriate Requests which are taken
|
||||
by suitable components of ucloud. For Example, if user uses ucloud-cli
|
||||
to create a VM, **ucloud-api** would create a **ScheduleVMRequest** containing
|
||||
things like pointer to VM's entry which have specs, networking
|
||||
configuration of VMs.
|
||||
|
||||
**ucloud-scheduler** accepts requests for VM's scheduling and
|
||||
migration. It finds a host from a list of available host on which
|
||||
the incoming VM can run and schedules it on that host.
|
||||
|
||||
**ucloud-host** runs on host servers i.e servers that
|
||||
actually runs virtual machines, accepts requests
|
||||
intended only for them. It creates/delete/start/stop/migrate
|
||||
virtual machines. It also arrange network resources needed for the
|
||||
incoming VM.
|
||||
|
||||
**ucloud-filescanner** keep tracks of user's files which would be needed
|
||||
later for creating OS Images.
|
||||
|
||||
**ucloud-imagescanner** converts images files from qcow2 format to raw
|
||||
format which would then be imported into image store.
|
||||
|
||||
* In case of **File System**, the converted image would be copied to
|
||||
:file:`/var/image/` or the path referred by :envvar:`IMAGE_PATH`
|
||||
environement variable mentioned in :file:`/etc/ucloud/ucloud.conf`.
|
||||
|
||||
* In case of **CEPH**, the converted image would be imported into
|
||||
specific pool (it depends on the image store in which the image
|
||||
belongs) of CEPH Block Storage.
|
||||
|
||||
**ucloud-metadata** provides metadata which is used to contextualize
|
||||
VMs. When, the VM is created, it is just clone (duplicate) of OS
|
||||
image from which it is created. So, to differentiate between my
|
||||
VM and your VM, the VM need to be contextualized. This works
|
||||
like the following
|
||||
|
||||
.. note::
|
||||
Actually, ucloud-init makes the GET request. You can also try it
|
||||
yourself using curl but ucloud-init does that for yourself.
|
||||
|
||||
* VM make a GET requests http://metadata which resolves to actual
|
||||
address of metadata server. The metadata server looks at the IPv6
|
||||
Address of the requester and extracts the MAC Address which is possible
|
||||
because the IPv6 address is
|
||||
`IPv6 EUI-64 <https://community.cisco.com/t5/networking-documents/understanding-ipv6-eui-64-bit-address/ta-p/3116953>`_.
|
||||
Metadata use this MAC address to find the actual VM to which it belongs
|
||||
and its owner, ssh-keys and much more. Then, metadata return these
|
||||
details back to the calling VM in JSON format. These details are
|
||||
then used be the **ucloud-init** which is explained next.
|
||||
|
||||
**ucloud-init** gets the metadata from **ucloud-metadata** to contextualize
|
||||
the VM. Specifically, it gets owner's ssh keys (or any other keys the
|
||||
owner of VM added to authorized keys for this VM) and put them to ssh
|
||||
server's (installed on VM) authorized keys so that owner can access
|
||||
the VM using ssh. It also install softwares that are needed for correct
|
||||
behavior of VM e.g rdnssd (needed for `SLAAC <https://en.wikipedia.org/wiki/IPv6#Stateless_address_autoconfiguration_(SLAAC)>`_).
|
||||
|
||||
24
uncloud/docs/source/troubleshooting.rst
Normal file
24
uncloud/docs/source/troubleshooting.rst
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
Installation Troubleshooting
|
||||
============================
|
||||
|
||||
etcd doesn't start
|
||||
------------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
[root@archlinux ~]# systemctl start etcd
|
||||
Job for etcd.service failed because the control process exited with error code.
|
||||
See "systemctl status etcd.service" and "journalctl -xe" for details
|
||||
|
||||
possible solution
|
||||
~~~~~~~~~~~~~~~~~
|
||||
Try :code:`cat /etc/hosts` if its output contain the following
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
127.0.0.1 localhost.localdomain localhost
|
||||
::1 localhost localhost.localdomain
|
||||
|
||||
|
||||
then unfortunately, we can't help you. But, if it doesn't contain the
|
||||
above you can put the above in :file:`/etc/hosts` to fix the issue.
|
||||
121
uncloud/docs/source/user-guide.rst
Normal file
121
uncloud/docs/source/user-guide.rst
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
.. _user-guide:
|
||||
|
||||
User Guide
|
||||
==========
|
||||
|
||||
Create VM
|
||||
---------
|
||||
|
||||
The following command would create a Virtual Machine (name: meow)
|
||||
with following specs
|
||||
|
||||
* CPU: 1
|
||||
* RAM: 1GB
|
||||
* OS-SSD: 4GB
|
||||
* OS: Alpine Linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm create --vm-name meow --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine
|
||||
|
||||
|
||||
.. _how-to-check-vm-status:
|
||||
|
||||
Check VM Status
|
||||
---------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm status --vm-name meow
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"hostname": "/v1/host/74c21c332f664972bf5078e8de080eea",
|
||||
"image_uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda",
|
||||
"in_migration": null,
|
||||
"log": [
|
||||
"2019-11-12T09:11:09.800798 - Started successfully"
|
||||
],
|
||||
"metadata": {
|
||||
"ssh-keys": []
|
||||
},
|
||||
"name": "meow",
|
||||
"network": [],
|
||||
"owner": "admin",
|
||||
"owner_realm": "ungleich-admin",
|
||||
"specs": {
|
||||
"cpu": 1,
|
||||
"hdd": [],
|
||||
"os-ssd": "4.0 GB",
|
||||
"ram": "1.0 GB"
|
||||
},
|
||||
"status": "RUNNING",
|
||||
"vnc_socket": "/tmp/tmpj1k6sdo_"
|
||||
}
|
||||
|
||||
|
||||
Connect to VM using VNC
|
||||
-----------------------
|
||||
|
||||
We would need **socat** utility and a remote desktop client
|
||||
e.g Remmina, KRDC etc. We can get the vnc socket path by getting
|
||||
its status, see :ref:`how-to-check-vm-status`.
|
||||
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
socat TCP-LISTEN:1234,reuseaddr,fork UNIX-CLIENT:/tmp/tmpj1k6sdo_
|
||||
|
||||
|
||||
Then, launch your remote desktop client and connect to vnc://localhost:1234.
|
||||
|
||||
Create Network
|
||||
--------------
|
||||
|
||||
Layer 2 Network with sample IPv6 range fd00::/64 (without IPAM and routing)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli network create --network-name mynet --network-type vxlan
|
||||
|
||||
|
||||
Layer 2 Network with /64 network with automatic IPAM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli network create --network-name mynet --network-type vxlan --user True
|
||||
|
||||
Attach Network to VM
|
||||
--------------------
|
||||
|
||||
Currently, user can only attach network to his/her VM at
|
||||
the time of creation. A sample command to create VM with
|
||||
a network is as follow
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm create --vm-name meow2 --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine --network mynet
|
||||
|
||||
.. _get-list-of-hosts:
|
||||
|
||||
Get List of Hosts
|
||||
-----------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli host list
|
||||
|
||||
|
||||
Migrate VM
|
||||
----------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm migrate --vm-name meow --destination server1.place10
|
||||
|
||||
|
||||
.. option:: --destination
|
||||
|
||||
The name of destination host. You can find a list of host
|
||||
using :ref:`get-list-of-hosts`
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
How to create VM images for ucloud
|
||||
==================================
|
||||
|
||||
Overview
|
||||
---------
|
||||
|
||||
ucloud tries to be least invasise towards VMs and only require
|
||||
strictly necessary changes for running in a virtualised
|
||||
environment. This includes configurations for:
|
||||
|
||||
* Configuring the network
|
||||
* Managing access via ssh keys
|
||||
* Resizing the attached disk(s)
|
||||
|
||||
|
||||
Network configuration
|
||||
---------------------
|
||||
All VMs in ucloud are required to support IPv6. The primary network
|
||||
configuration is always done using SLAAC. A VM thus needs only to be
|
||||
configured to
|
||||
|
||||
* accept router advertisements on all network interfaces
|
||||
* use the router advertisements to configure the network interfaces
|
||||
* accept the DNS entries from the router advertisements
|
||||
|
||||
|
||||
Configuring SSH keys
|
||||
--------------------
|
||||
|
||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
||||
|
||||
To accept ssh keys in your VM, request the URL
|
||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
||||
**authorized_keys** file. Below you find sample code to accomplish
|
||||
this task:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tmp=$(mktemp)
|
||||
curl -s http://metadata/ssk_keys > "$tmp"
|
||||
touch ~/.ssh/authorized_keys # ensure it exists
|
||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
||||
|
||||
|
||||
Disk resize
|
||||
-----------
|
||||
In virtualised environments, the disk sizes might grow. The operating
|
||||
system should detect disks that are bigger than the existing partition
|
||||
table and resize accordingly. This task is os specific.
|
||||
|
||||
ucloud does not support shrinking disks due to the complexity and
|
||||
intra OS dependencies.
|
||||
3
uncloud/filescanner/__init__.py
Normal file
3
uncloud/filescanner/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
89
uncloud/filescanner/main.py
Executable file
89
uncloud/filescanner/main.py
Executable file
|
|
@ -0,0 +1,89 @@
|
|||
import glob
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess as sp
|
||||
import time
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from . import logger
|
||||
from ucloud.settings import settings
|
||||
from ucloud.shared import shared
|
||||
|
||||
|
||||
def sha512sum(file: str):
|
||||
"""Use sha512sum utility to compute sha512 sum of arg:file
|
||||
|
||||
IF arg:file does not exists:
|
||||
raise FileNotFoundError exception
|
||||
ELSE IF sum successfully computer:
|
||||
return computed sha512 sum
|
||||
ELSE:
|
||||
return None
|
||||
"""
|
||||
if not isinstance(file, str):
|
||||
raise TypeError
|
||||
try:
|
||||
output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
|
||||
except sp.CalledProcessError as e:
|
||||
error = e.stderr.decode("utf-8")
|
||||
if "No such file or directory" in error:
|
||||
raise FileNotFoundError from None
|
||||
else:
|
||||
output = output.decode("utf-8").strip()
|
||||
output = output.split(" ")
|
||||
return output[0]
|
||||
return None
|
||||
|
||||
|
||||
def track_file(file, base_dir):
|
||||
file_id = uuid4()
|
||||
|
||||
# Get Username
|
||||
owner = pathlib.Path(file).parts[len(pathlib.Path(base_dir).parts)]
|
||||
|
||||
# Get Creation Date of File
|
||||
# Here, we are assuming that ctime is creation time
|
||||
# which is mostly not true.
|
||||
creation_date = time.ctime(os.stat(file).st_ctime)
|
||||
|
||||
file_path = pathlib.Path(file).parts[-1]
|
||||
|
||||
# Create Entry
|
||||
entry_key = os.path.join(
|
||||
settings["etcd"]["file_prefix"], str(file_id)
|
||||
)
|
||||
entry_value = {
|
||||
"filename": file_path,
|
||||
"owner": owner,
|
||||
"sha512sum": sha512sum(file),
|
||||
"creation_date": creation_date,
|
||||
"size": os.path.getsize(file),
|
||||
}
|
||||
|
||||
logger.info("Tracking %s", file)
|
||||
|
||||
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
||||
os.setxattr(file, "user.utracked", b"True")
|
||||
|
||||
|
||||
def main():
|
||||
base_dir = settings["storage"]["file_dir"]
|
||||
|
||||
# Recursively Get All Files and Folder below BASE_DIR
|
||||
files = glob.glob("{}/**".format(base_dir), recursive=True)
|
||||
|
||||
# Retain only Files
|
||||
files = [file for file in files if os.path.isfile(file)]
|
||||
|
||||
untracked_files = []
|
||||
for file in files:
|
||||
try:
|
||||
os.getxattr(file, "user.utracked")
|
||||
except OSError:
|
||||
track_file(file, base_dir)
|
||||
untracked_files.append(file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
13
uncloud/hack/README.org
Normal file
13
uncloud/hack/README.org
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
This directory contains unfinishe hacks / inspirations
|
||||
* firewalling / networking in ucloud
|
||||
** automatically route a network per VM - /64?
|
||||
** nft: one chain per VM on each vm host (?)
|
||||
*** might have scaling issues?
|
||||
** firewall rules on each VM host
|
||||
- mac filtering:
|
||||
* To add / block
|
||||
** TODO arp poisoning
|
||||
** TODO ndp "poisoning"
|
||||
** TODO ipv4 dhcp server
|
||||
*** drop dhcpv4 requests
|
||||
*** drop dhcpv4 answers
|
||||
1
uncloud/hack/conf.d/ucloud-host
Normal file
1
uncloud/hack/conf.d/ucloud-host
Normal file
|
|
@ -0,0 +1 @@
|
|||
HOSTNAME=server1.place10
|
||||
94
uncloud/hack/nftables.conf
Normal file
94
uncloud/hack/nftables.conf
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
flush ruleset
|
||||
|
||||
table bridge filter {
|
||||
chain prerouting {
|
||||
type filter hook prerouting priority 0;
|
||||
policy accept;
|
||||
ibrname br100 jump netpublic
|
||||
}
|
||||
chain netpublic {
|
||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
||||
}
|
||||
}
|
||||
|
||||
table ip6 filter {
|
||||
chain forward {
|
||||
type filter hook forward priority 0;
|
||||
|
||||
# this would be nice...
|
||||
policy drop;
|
||||
|
||||
ct state established,related accept;
|
||||
|
||||
}
|
||||
|
||||
chain prerouting {
|
||||
type filter hook prerouting priority 0;
|
||||
policy accept;
|
||||
|
||||
# not supporting in here!
|
||||
|
||||
|
||||
iifname vmXXXX jump vmXXXX
|
||||
iifname vmYYYY jump vmYYYY
|
||||
|
||||
iifname brXX jump brXX
|
||||
|
||||
iifname vxlan100 jump vxlan100
|
||||
iifname br100 jump br100
|
||||
}
|
||||
|
||||
# 1. Rules per VM (names: vmXXXXX?
|
||||
# 2. Rules per network (names: vxlanXXXX, what about non vxlan?)
|
||||
# 3. Rules per bridge:
|
||||
# vxlanXX is inside brXX
|
||||
# This is effectively a network filter
|
||||
# 4. Kill all malicous traffic:
|
||||
# - router advertisements from VMs in which they should not announce RAs
|
||||
|
||||
|
||||
|
||||
chain vxlan100 {
|
||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
||||
}
|
||||
chain br100 {
|
||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
||||
}
|
||||
|
||||
chain netpublic {
|
||||
# drop router advertisements that don't come from us
|
||||
iifname != vxlanpublic icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
||||
# icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
||||
|
||||
}
|
||||
|
||||
# This vlan
|
||||
chain brXX {
|
||||
ip6 saddr != 2001:db8:1::/64 drop;
|
||||
}
|
||||
|
||||
chain vmXXXX {
|
||||
ether saddr != 00:0f:54:0c:11:04 drop;
|
||||
ip6 saddr != 2001:db8:1:000f::540c:11ff:fe04 drop;
|
||||
jump drop_from_vm_without_ipam
|
||||
}
|
||||
|
||||
chain net_2a0ae5c05something {
|
||||
|
||||
|
||||
}
|
||||
|
||||
chain drop_from_vm_without_ipam {
|
||||
|
||||
}
|
||||
|
||||
chain vmYYYY {
|
||||
ether saddr != 00:0f:54:0c:11:05 drop;
|
||||
jump drop_from_vm_with_ipam
|
||||
}
|
||||
|
||||
# Drop stuff from every VM
|
||||
chain drop_from_vm_with_ipam {
|
||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
||||
}
|
||||
}
|
||||
8
uncloud/hack/rc-scripts/ucloud-api
Normal file
8
uncloud/hack/rc-scripts/ucloud-api
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name="$RC_SVCNAME"
|
||||
pidfile="/var/run/${name}.pid"
|
||||
command="$(which pipenv)"
|
||||
command_args="run python ucloud.py api"
|
||||
command_background="true"
|
||||
directory="/root/ucloud"
|
||||
8
uncloud/hack/rc-scripts/ucloud-host
Normal file
8
uncloud/hack/rc-scripts/ucloud-host
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name="$RC_SVCNAME"
|
||||
pidfile="/var/run/${name}.pid"
|
||||
command="$(which pipenv)"
|
||||
command_args="run python ucloud.py host ${HOSTNAME}"
|
||||
command_background="true"
|
||||
directory="/root/ucloud"
|
||||
8
uncloud/hack/rc-scripts/ucloud-metadata
Normal file
8
uncloud/hack/rc-scripts/ucloud-metadata
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name="$RC_SVCNAME"
|
||||
pidfile="/var/run/${name}.pid"
|
||||
command="$(which pipenv)"
|
||||
command_args="run python ucloud.py metadata"
|
||||
command_background="true"
|
||||
directory="/root/ucloud"
|
||||
8
uncloud/hack/rc-scripts/ucloud-scheduler
Normal file
8
uncloud/hack/rc-scripts/ucloud-scheduler
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
name="$RC_SVCNAME"
|
||||
pidfile="/var/run/${name}.pid"
|
||||
command="$(which pipenv)"
|
||||
command_args="run python ucloud.py scheduler"
|
||||
command_background="true"
|
||||
directory="/root/ucloud"
|
||||
3
uncloud/host/__init__.py
Normal file
3
uncloud/host/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
101
uncloud/host/main.py
Executable file
101
uncloud/host/main.py
Executable file
|
|
@ -0,0 +1,101 @@
|
|||
import argparse
|
||||
import multiprocessing as mp
|
||||
import time
|
||||
|
||||
from ucloud.common.request import RequestEntry, RequestType
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
from ucloud.common.vm import VMStatus
|
||||
from ucloud.vmm import VMM
|
||||
from os.path import join as join_path
|
||||
|
||||
from . import virtualmachine, logger
|
||||
|
||||
|
||||
def update_heartbeat(hostname):
|
||||
"""Update Last HeartBeat Time for :param hostname: in etcd"""
|
||||
host_pool = shared.host_pool
|
||||
this_host = next(
|
||||
filter(lambda h: h.hostname == hostname, host_pool.hosts), None
|
||||
)
|
||||
while True:
|
||||
this_host.update_heartbeat()
|
||||
host_pool.put(this_host)
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
def maintenance(host):
|
||||
vmm = VMM()
|
||||
running_vms = vmm.discover()
|
||||
for vm_uuid in running_vms:
|
||||
if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == "running":
|
||||
logger.debug('VM {} is running on {}'.format(vm_uuid, host))
|
||||
vm = shared.vm_pool.get(
|
||||
join_path(settings["etcd"]["vm_prefix"], vm_uuid)
|
||||
)
|
||||
vm.status = VMStatus.running
|
||||
vm.vnc_socket = vmm.get_vnc(vm_uuid)
|
||||
vm.hostname = host
|
||||
shared.vm_pool.put(vm)
|
||||
|
||||
|
||||
def main(hostname):
|
||||
host_pool = shared.host_pool
|
||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
||||
assert host is not None, "No such host with name = {}".format(hostname)
|
||||
|
||||
try:
|
||||
heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
|
||||
heartbeat_updating_process.start()
|
||||
except Exception as e:
|
||||
raise Exception("ucloud-host heartbeat updating mechanism is not working") from e
|
||||
|
||||
for events_iterator in [
|
||||
shared.etcd_client.get_prefix(settings["etcd"]["request_prefix"], value_in_json=True),
|
||||
shared.etcd_client.watch_prefix(settings["etcd"]["request_prefix"], timeout=10, value_in_json=True)
|
||||
]:
|
||||
for request_event in events_iterator:
|
||||
request_event = RequestEntry(request_event)
|
||||
|
||||
if request_event.type == "TIMEOUT":
|
||||
maintenance(host.key)
|
||||
|
||||
elif request_event.hostname == host.key:
|
||||
logger.debug("VM Request: %s on Host %s", request_event, host.hostname)
|
||||
shared.request_pool.client.client.delete(request_event.key)
|
||||
vm_entry = shared.etcd_client.get(
|
||||
join_path(settings["etcd"]["vm_prefix"], request_event.uuid)
|
||||
)
|
||||
logger.debug("VM hostname: {}".format(vm_entry.value))
|
||||
vm = virtualmachine.VM(vm_entry)
|
||||
if request_event.type == RequestType.StartVM:
|
||||
vm.start()
|
||||
|
||||
elif request_event.type == RequestType.StopVM:
|
||||
vm.stop()
|
||||
|
||||
elif request_event.type == RequestType.DeleteVM:
|
||||
vm.delete()
|
||||
|
||||
elif request_event.type == RequestType.InitVMMigration:
|
||||
vm.start(destination_host_key=host.key)
|
||||
|
||||
elif request_event.type == RequestType.TransferVM:
|
||||
destination_host = host_pool.get(request_event.destination_host_key)
|
||||
if destination_host:
|
||||
vm.migrate(
|
||||
destination_host=destination_host.hostname,
|
||||
destination_sock_path=request_event.destination_sock_path,
|
||||
)
|
||||
else:
|
||||
logger.error("Host %s not found!", request_event.destination_host_key)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
argparser = argparse.ArgumentParser()
|
||||
argparser.add_argument(
|
||||
"hostname", help="Name of this host. e.g uncloud1.ungleich.ch"
|
||||
)
|
||||
args = argparser.parse_args()
|
||||
mp.set_start_method("spawn")
|
||||
main(args.hostname)
|
||||
301
uncloud/host/virtualmachine.py
Executable file
301
uncloud/host/virtualmachine.py
Executable file
|
|
@ -0,0 +1,301 @@
|
|||
# QEMU Manual
|
||||
# https://qemu.weilnetz.de/doc/qemu-doc.html
|
||||
|
||||
# For QEMU Monitor Protocol Commands Information, See
|
||||
# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
|
||||
|
||||
import os
|
||||
import subprocess as sp
|
||||
import ipaddress
|
||||
|
||||
from string import Template
|
||||
from os.path import join as join_path
|
||||
|
||||
from ucloud.common.request import RequestEntry, RequestType
|
||||
from ucloud.common.vm import VMStatus, declare_stopped
|
||||
from ucloud.common.network import create_dev, delete_network_interface
|
||||
from ucloud.common.schemas import VMSchema, NetworkSchema
|
||||
from ucloud.host import logger
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
from ucloud.vmm import VMM
|
||||
|
||||
from marshmallow import ValidationError
|
||||
|
||||
|
||||
class VM:
|
||||
def __init__(self, vm_entry):
|
||||
self.schema = VMSchema()
|
||||
self.vmm = VMM()
|
||||
self.key = vm_entry.key
|
||||
try:
|
||||
self.vm = self.schema.loads(vm_entry.value)
|
||||
except ValidationError:
|
||||
logger.exception(
|
||||
"Couldn't validate VM Entry", vm_entry.value
|
||||
)
|
||||
self.vm = None
|
||||
else:
|
||||
self.uuid = vm_entry.key.split("/")[-1]
|
||||
self.host_key = self.vm["hostname"]
|
||||
logger.debug('VM Hostname {}'.format(self.host_key))
|
||||
|
||||
def get_qemu_args(self):
|
||||
command = (
|
||||
"-drive file={file},format=raw,if=virtio,cache=none"
|
||||
" -device virtio-rng-pci"
|
||||
" -m {memory} -smp cores={cores},threads={threads}"
|
||||
" -name {owner}_{name}"
|
||||
).format(
|
||||
owner=self.vm["owner"],
|
||||
name=self.vm["name"],
|
||||
memory=int(self.vm["specs"]["ram"].to_MB()),
|
||||
cores=self.vm["specs"]["cpu"],
|
||||
threads=1,
|
||||
file=shared.storage_handler.qemu_path_string(self.uuid),
|
||||
)
|
||||
|
||||
return command.split(" ")
|
||||
|
||||
def start(self, destination_host_key=None):
|
||||
migration = False
|
||||
if destination_host_key:
|
||||
migration = True
|
||||
|
||||
self.create()
|
||||
try:
|
||||
network_args = self.create_network_dev()
|
||||
except Exception as err:
|
||||
declare_stopped(self.vm)
|
||||
self.vm["log"].append("Cannot Setup Network Properly")
|
||||
logger.error("Cannot Setup Network Properly for vm %s", self.uuid, exc_info=err)
|
||||
else:
|
||||
self.vmm.start(
|
||||
uuid=self.uuid,
|
||||
migration=migration,
|
||||
*self.get_qemu_args(),
|
||||
*network_args
|
||||
)
|
||||
|
||||
status = self.vmm.get_status(self.uuid)
|
||||
logger.debug('VM {} status is {}'.format(self.uuid, status))
|
||||
if status == "running":
|
||||
self.vm["status"] = VMStatus.running
|
||||
self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
|
||||
elif status == "inmigrate":
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.TransferVM, # Transfer VM
|
||||
hostname=self.host_key, # Which VM should get this request. It is source host
|
||||
uuid=self.uuid, # uuid of VM
|
||||
destination_sock_path=join_path(
|
||||
self.vmm.socket_dir, self.uuid
|
||||
),
|
||||
destination_host_key=destination_host_key, # Where source host transfer VM
|
||||
request_prefix=settings["etcd"]["request_prefix"],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
else:
|
||||
self.stop()
|
||||
declare_stopped(self.vm)
|
||||
logger.debug('VM {} has hostname {}'.format(self.uuid, self.vm['hostname']))
|
||||
self.sync()
|
||||
|
||||
def stop(self):
|
||||
self.vmm.stop(self.uuid)
|
||||
self.delete_network_dev()
|
||||
declare_stopped(self.vm)
|
||||
self.sync()
|
||||
|
||||
def migrate(self, destination_host, destination_sock_path):
|
||||
self.vmm.transfer(
|
||||
src_uuid=self.uuid,
|
||||
destination_sock_path=destination_sock_path,
|
||||
host=destination_host,
|
||||
)
|
||||
|
||||
def create_network_dev(self):
|
||||
command = ""
|
||||
for network_mac_and_tap in self.vm["network"]:
|
||||
network_name, mac, tap = network_mac_and_tap
|
||||
|
||||
_key = os.path.join(
|
||||
settings["etcd"]["network_prefix"],
|
||||
self.vm["owner"],
|
||||
network_name,
|
||||
)
|
||||
network = shared.etcd_client.get(_key, value_in_json=True)
|
||||
network_schema = NetworkSchema()
|
||||
try:
|
||||
network = network_schema.load(network.value)
|
||||
except ValidationError:
|
||||
continue
|
||||
|
||||
if network["type"] == "vxlan":
|
||||
tap = create_vxlan_br_tap(
|
||||
_id=network["id"],
|
||||
_dev=settings["network"]["vxlan_phy_dev"],
|
||||
tap_id=tap,
|
||||
ip=network["ipv6"],
|
||||
)
|
||||
|
||||
all_networks = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["network_prefix"],
|
||||
value_in_json=True,
|
||||
)
|
||||
|
||||
if ipaddress.ip_network(network["ipv6"]).is_global:
|
||||
update_radvd_conf(all_networks)
|
||||
|
||||
command += (
|
||||
"-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
|
||||
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
|
||||
tap=tap, net_id=network["id"], mac=mac
|
||||
)
|
||||
)
|
||||
|
||||
return command.split(" ")
|
||||
|
||||
def delete_network_dev(self):
|
||||
try:
|
||||
for network in self.vm["network"]:
|
||||
network_name = network[0]
|
||||
_ = network[1] # tap_mac
|
||||
tap_id = network[2]
|
||||
|
||||
delete_network_interface("tap{}".format(tap_id))
|
||||
|
||||
owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
|
||||
owners_running_vms = shared.vm_pool.by_status(
|
||||
VMStatus.running, _vms=owners_vms
|
||||
)
|
||||
|
||||
networks = map(
|
||||
lambda n: n[0],
|
||||
map(lambda vm: vm.network, owners_running_vms),
|
||||
)
|
||||
networks_in_use_by_user_vms = [vm[0] for vm in networks]
|
||||
if network_name not in networks_in_use_by_user_vms:
|
||||
network_entry = resolve_network(
|
||||
network[0], self.vm["owner"]
|
||||
)
|
||||
if network_entry:
|
||||
network_type = network_entry.value["type"]
|
||||
network_id = network_entry.value["id"]
|
||||
if network_type == "vxlan":
|
||||
delete_network_interface(
|
||||
"br{}".format(network_id)
|
||||
)
|
||||
delete_network_interface(
|
||||
"vxlan{}".format(network_id)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Exception in network interface deletion")
|
||||
|
||||
def create(self):
|
||||
if shared.storage_handler.is_vm_image_exists(self.uuid):
|
||||
# File Already exists. No Problem Continue
|
||||
logger.debug("Image for vm %s exists", self.uuid)
|
||||
else:
|
||||
if shared.storage_handler.make_vm_image(
|
||||
src=self.vm["image_uuid"], dest=self.uuid
|
||||
):
|
||||
if not shared.storage_handler.resize_vm_image(
|
||||
path=self.uuid,
|
||||
size=int(self.vm["specs"]["os-ssd"].to_MB()),
|
||||
):
|
||||
self.vm["status"] = VMStatus.error
|
||||
else:
|
||||
logger.info("New VM Created")
|
||||
|
||||
def sync(self):
|
||||
shared.etcd_client.put(
|
||||
self.key, self.schema.dump(self.vm), value_in_json=True
|
||||
)
|
||||
|
||||
def delete(self):
|
||||
self.stop()
|
||||
|
||||
if shared.storage_handler.is_vm_image_exists(self.uuid):
|
||||
r_status = shared.storage_handler.delete_vm_image(self.uuid)
|
||||
if r_status:
|
||||
shared.etcd_client.client.delete(self.key)
|
||||
else:
|
||||
shared.etcd_client.client.delete(self.key)
|
||||
|
||||
|
||||
def resolve_network(network_name, network_owner):
|
||||
network = shared.etcd_client.get(
|
||||
join_path(
|
||||
settings["etcd"]["network_prefix"],
|
||||
network_owner,
|
||||
network_name,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
return network
|
||||
|
||||
|
||||
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
|
||||
network_script_base = os.path.join(
|
||||
os.path.dirname(os.path.dirname(__file__)), "network"
|
||||
)
|
||||
vxlan = create_dev(
|
||||
script=os.path.join(network_script_base, "create-vxlan.sh"),
|
||||
_id=_id,
|
||||
dev=_dev,
|
||||
)
|
||||
if vxlan:
|
||||
bridge = create_dev(
|
||||
script=os.path.join(
|
||||
network_script_base, "create-bridge.sh"
|
||||
),
|
||||
_id=_id,
|
||||
dev=vxlan,
|
||||
ip=ip,
|
||||
)
|
||||
if bridge:
|
||||
tap = create_dev(
|
||||
script=os.path.join(
|
||||
network_script_base, "create-tap.sh"
|
||||
),
|
||||
_id=str(tap_id),
|
||||
dev=bridge,
|
||||
)
|
||||
if tap:
|
||||
return tap
|
||||
|
||||
|
||||
def update_radvd_conf(all_networks):
|
||||
network_script_base = os.path.join(
|
||||
os.path.dirname(os.path.dirname(__file__)), "network"
|
||||
)
|
||||
|
||||
networks = {
|
||||
net.value["ipv6"]: net.value["id"]
|
||||
for net in all_networks
|
||||
if net.value.get("ipv6")
|
||||
and ipaddress.ip_network(net.value.get("ipv6")).is_global
|
||||
}
|
||||
radvd_template = open(
|
||||
os.path.join(network_script_base, "radvd-template.conf"), "r"
|
||||
).read()
|
||||
radvd_template = Template(radvd_template)
|
||||
|
||||
content = [
|
||||
radvd_template.safe_substitute(
|
||||
bridge="br{}".format(networks[net]), prefix=net
|
||||
)
|
||||
for net in networks
|
||||
if networks.get(net)
|
||||
]
|
||||
with open("/etc/radvd.conf", "w") as radvd_conf:
|
||||
radvd_conf.writelines(content)
|
||||
try:
|
||||
sp.check_output(["systemctl", "restart", "radvd"])
|
||||
except sp.CalledProcessError:
|
||||
try:
|
||||
sp.check_output(["service", "radvd", "restart"])
|
||||
except sp.CalledProcessError as err:
|
||||
raise err.__class__(
|
||||
"Cannot start/restart radvd service", err.cmd
|
||||
) from err
|
||||
3
uncloud/imagescanner/__init__.py
Normal file
3
uncloud/imagescanner/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
118
uncloud/imagescanner/main.py
Executable file
118
uncloud/imagescanner/main.py
Executable file
|
|
@ -0,0 +1,118 @@
|
|||
import json
|
||||
import os
|
||||
import subprocess as sp
|
||||
|
||||
from os.path import join as join_path
|
||||
from ucloud.settings import settings
|
||||
from ucloud.shared import shared
|
||||
from ucloud.imagescanner import logger
|
||||
|
||||
|
||||
def qemu_img_type(path):
|
||||
qemu_img_info_command = [
|
||||
"qemu-img",
|
||||
"info",
|
||||
"--output",
|
||||
"json",
|
||||
path,
|
||||
]
|
||||
try:
|
||||
qemu_img_info = sp.check_output(qemu_img_info_command)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return None
|
||||
else:
|
||||
qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
|
||||
return qemu_img_info["format"]
|
||||
|
||||
|
||||
def main():
|
||||
# We want to get images entries that requests images to be created
|
||||
images = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["image_prefix"], value_in_json=True
|
||||
)
|
||||
images_to_be_created = list(
|
||||
filter(lambda im: im.value["status"] == "TO_BE_CREATED", images)
|
||||
)
|
||||
|
||||
for image in images_to_be_created:
|
||||
try:
|
||||
image_uuid = image.key.split("/")[-1]
|
||||
image_owner = image.value["owner"]
|
||||
image_filename = image.value["filename"]
|
||||
image_store_name = image.value["store_name"]
|
||||
image_full_path = join_path(
|
||||
settings["storage"]["file_dir"],
|
||||
image_owner,
|
||||
image_filename,
|
||||
)
|
||||
|
||||
image_stores = shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["image_store_prefix"],
|
||||
value_in_json=True,
|
||||
)
|
||||
user_image_store = next(
|
||||
filter(
|
||||
lambda s, store_name=image_store_name: s.value[
|
||||
"name"
|
||||
]
|
||||
== store_name,
|
||||
image_stores,
|
||||
)
|
||||
)
|
||||
|
||||
image_store_pool = user_image_store.value["attributes"][
|
||||
"pool"
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
else:
|
||||
# At least our basic data is available
|
||||
qemu_img_convert_command = [
|
||||
"qemu-img",
|
||||
"convert",
|
||||
"-f",
|
||||
"qcow2",
|
||||
"-O",
|
||||
"raw",
|
||||
image_full_path,
|
||||
"image.raw",
|
||||
]
|
||||
|
||||
if qemu_img_type(image_full_path) == "qcow2":
|
||||
try:
|
||||
# Convert .qcow2 to .raw
|
||||
sp.check_output(qemu_img_convert_command,)
|
||||
|
||||
except sp.CalledProcessError:
|
||||
logger.exception(
|
||||
"Image convertion from .qcow2 to .raw failed."
|
||||
)
|
||||
else:
|
||||
# Import and Protect
|
||||
r_status = shared.storage_handler.import_image(
|
||||
src="image.raw", dest=image_uuid, protect=True
|
||||
)
|
||||
if r_status:
|
||||
# Everything is successfully done
|
||||
image.value["status"] = "CREATED"
|
||||
shared.etcd_client.put(
|
||||
image.key, json.dumps(image.value)
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
os.remove("image.raw")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
else:
|
||||
# The user provided image is either not found or of invalid format
|
||||
image.value["status"] = "INVALID_IMAGE"
|
||||
shared.etcd_client.put(
|
||||
image.key, json.dumps(image.value)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
uncloud/metadata/__init__.py
Normal file
3
uncloud/metadata/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
119
uncloud/metadata/main.py
Normal file
119
uncloud/metadata/main.py
Normal file
|
|
@ -0,0 +1,119 @@
|
|||
import os
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
from ucloud.settings import settings
|
||||
from ucloud.shared import shared
|
||||
|
||||
app = Flask(__name__)
|
||||
api = Api(app)
|
||||
|
||||
app.logger.handlers.clear()
|
||||
|
||||
|
||||
@app.errorhandler(Exception)
|
||||
def handle_exception(e):
|
||||
app.logger.error(e)
|
||||
# pass through HTTP errors
|
||||
if isinstance(e, HTTPException):
|
||||
return e
|
||||
|
||||
# now you're handling non-HTTP exceptions only
|
||||
return {"message": "Server Error"}, 500
|
||||
|
||||
|
||||
def get_vm_entry(mac_addr):
|
||||
return next(
|
||||
filter(
|
||||
lambda vm: mac_addr in list(zip(*vm.network))[1],
|
||||
shared.vm_pool.vms,
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
|
||||
def ipv62mac(ipv6):
|
||||
# remove subnet info if given
|
||||
subnet_index = ipv6.find("/")
|
||||
if subnet_index != -1:
|
||||
ipv6 = ipv6[:subnet_index]
|
||||
|
||||
ipv6_parts = ipv6.split(":")
|
||||
mac_parts = list()
|
||||
for ipv6_part in ipv6_parts[-4:]:
|
||||
while len(ipv6_part) < 4:
|
||||
ipv6_part = "0" + ipv6_part
|
||||
mac_parts.append(ipv6_part[:2])
|
||||
mac_parts.append(ipv6_part[-2:])
|
||||
|
||||
# modify parts to match MAC value
|
||||
mac_parts[0] = "%02x" % (int(mac_parts[0], 16) ^ 2)
|
||||
del mac_parts[4]
|
||||
del mac_parts[3]
|
||||
return ":".join(mac_parts)
|
||||
|
||||
|
||||
class Root(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
data = get_vm_entry(ipv62mac(request.remote_addr))
|
||||
|
||||
if not data:
|
||||
return (
|
||||
{"message": "Metadata for such VM does not exists."},
|
||||
404,
|
||||
)
|
||||
else:
|
||||
etcd_key = os.path.join(
|
||||
settings["etcd"]["user_prefix"],
|
||||
data.value["owner_realm"],
|
||||
data.value["owner"],
|
||||
"key",
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get_prefix(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
user_personal_ssh_keys = [key.value for key in etcd_entry]
|
||||
data.value["metadata"]["ssh-keys"] += user_personal_ssh_keys
|
||||
return data.value["metadata"], 200
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
return {"message": "Previous Implementation is deprecated."}
|
||||
# data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
|
||||
# print(data)
|
||||
# if data:
|
||||
# for k in request.json:
|
||||
# if k not in data.value:
|
||||
# data.value[k] = request.json[k]
|
||||
# if k.endswith("-list"):
|
||||
# data.value[k] = [request.json[k]]
|
||||
# else:
|
||||
# if k.endswith("-list"):
|
||||
# data.value[k].append(request.json[k])
|
||||
# else:
|
||||
# data.value[k] = request.json[k]
|
||||
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
||||
# data.value, value_in_json=True)
|
||||
# else:
|
||||
# data = {}
|
||||
# for k in request.json:
|
||||
# data[k] = request.json[k]
|
||||
# if k.endswith("-list"):
|
||||
# data[k] = [request.json[k]]
|
||||
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
||||
# data, value_in_json=True)
|
||||
|
||||
|
||||
api.add_resource(Root, "/")
|
||||
|
||||
|
||||
def main():
|
||||
app.run(debug=True, host="::", port="80")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
195
uncloud/network/README
Normal file
195
uncloud/network/README
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
The network base - experimental
|
||||
|
||||
|
||||
We want to have 1 "main" network for convience.
|
||||
|
||||
We want to be able to create networks automatically, once a new
|
||||
customer is created -> need hooks!
|
||||
|
||||
|
||||
Mapping:
|
||||
|
||||
- each network is a "virtual" network. We use vxlan by default, but
|
||||
could be any technology!
|
||||
- we need a counter for vxlan mappings / network IDs -> cannot use
|
||||
|
||||
Model in etcd:
|
||||
|
||||
/v1/networks/
|
||||
|
||||
|
||||
Tests
|
||||
see
|
||||
https://vincent.bernat.ch/en/blog/2017-vxlan-linux
|
||||
|
||||
|
||||
# local 2001:db8:1::1 \
|
||||
|
||||
|
||||
netid=100
|
||||
dev=wlp2s0
|
||||
dev=wlp0s20f3
|
||||
ip -6 link add vxlan${netid} type vxlan \
|
||||
id ${netid} \
|
||||
dstport 4789 \
|
||||
group ff05::${netid} \
|
||||
dev ${dev} \
|
||||
ttl 5
|
||||
|
||||
[root@diamond ~]# ip addr add 2a0a:e5c0:5::1/48 dev vxlan100
|
||||
root@manager:~/.ssh# ip addr add 2a0a:e5c0:5::2/48 dev vxlan100
|
||||
root@manager:~/.ssh# ping -c3 2a0a:e5c0:5::1
|
||||
PING 2a0a:e5c0:5::1(2a0a:e5c0:5::1) 56 data bytes
|
||||
64 bytes from 2a0a:e5c0:5::1: icmp_seq=1 ttl=64 time=15.6 ms
|
||||
64 bytes from 2a0a:e5c0:5::1: icmp_seq=2 ttl=64 time=30.3 ms
|
||||
64 bytes from 2a0a:e5c0:5::1: icmp_seq=3 ttl=64 time=84.4 ms
|
||||
|
||||
--- 2a0a:e5c0:5::1 ping statistics ---
|
||||
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
|
||||
rtt min/avg/max/mdev = 15.580/43.437/84.417/29.594 ms
|
||||
|
||||
--> work even via wifi
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Creating a network:
|
||||
|
||||
1) part of the initialisation / demo data (?)
|
||||
|
||||
We should probably provide some demo sets that can easily be used.
|
||||
|
||||
2) manual/hook based request
|
||||
|
||||
- hosts might have different network interfaces (?)
|
||||
-> this will make things very tricky -> don't support it
|
||||
- endpoint needs only support
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
IPAM
|
||||
|
||||
IP address management (IPAM) is related to networks, but needs to be
|
||||
decoupled to allow pure L2 networks.
|
||||
|
||||
From a customer point of view, we probably want to do something like:
|
||||
|
||||
- ORDERING an IPv6 network can include creating a virtual network and
|
||||
an IPAM service
|
||||
|
||||
Maybe "orders" should always be the first class citizen and ucloud
|
||||
internally "hooks" or binds things together.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
testing / hacking:
|
||||
|
||||
- starting etcd as storage
|
||||
|
||||
|
||||
[18:07] diamond:~% etcdctl put /v1/network/200 "{ some_network }"
|
||||
OK
|
||||
[18:08] diamond:~% etcdctl watch -w=json --prefix /v1/network
|
||||
{"Header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":6,"raft_term":2},"Events":[{"kv":{"key":"L3YxL25ldHdvcmsvMjAw","create_revision":5,"mod_revision":6,"version":2,"value":"eyBzb21lX25ldHdvcmsgfQ=="}}],"CompactRevision":0,"Canceled":false,"Created":false}
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Flow for using and creating networks:
|
||||
|
||||
- a network is created -> entry in etcd is created
|
||||
-> we need to keep a counter/lock so that 2 processes don't create
|
||||
the same network [Ahmed]
|
||||
-> nothing to be done on the hosts
|
||||
- a VM using a network is created
|
||||
- a VM using a network is scheduled to some host
|
||||
- the local "spawn a VM" process needs to check whether there is a
|
||||
vxlan interface existing -> if no, create it before creating the VM.
|
||||
-> if no, also create the bridge
|
||||
-> possibly adjusting the MTU (??)
|
||||
-> both names should be in hexadecimal (i.e. brff01 or vxlanff01)
|
||||
--> this way they are consistent with the multicast ipv6 address
|
||||
--> attention, ip -6 link ... id XXX expects DECIMAL input
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
If we also supply IPAM:
|
||||
|
||||
- ipam needs to be created *after* the network is created
|
||||
- ipam is likely to be coupled to netbox (?)
|
||||
--> we need a "get next /64 prefix" function
|
||||
- when an ipam service is created in etcd, we need to create a new
|
||||
radvd instance on all routers (this will be a different service on
|
||||
BSDs)
|
||||
- we will need to create a new vxlan device on the routers
|
||||
- we need to create a new / modify radvd.conf
|
||||
- only after all of the routers reloaded radvd the ipam service is
|
||||
available!
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
If the user requests an IPv4 VM:
|
||||
|
||||
- we need to get the next free IPv4 address (again, netbox?)
|
||||
- we need to create a mapping entry on the routers for NAT64
|
||||
--> this requires the VM to be in a network with IPAM
|
||||
--> we always assume that the VM embeds itself using EUI64
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
mac address handling!
|
||||
|
||||
Example
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
TODOs
|
||||
|
||||
- create-vxlan-on-dev.sh -> the multicast group
|
||||
needs to be ff05:: +int(vxlan_id)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Python hints:
|
||||
|
||||
>>> vxlan_id = 3400
|
||||
>>> b = ipaddress.IPv6Network("ff05::/16")
|
||||
>>> b[vxlan_id]
|
||||
IPv6Address('ff05::d48')
|
||||
|
||||
we need / should assign hex values for vxlan ids in etcd!
|
||||
--> easier to read
|
||||
|
||||
>>> b[0x3400]
|
||||
IPv6Address('ff05::3400')
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Bridge names are limited to 15 characters
|
||||
|
||||
|
||||
Maximum/highest number of vxlan:
|
||||
|
||||
>>> 2**24
|
||||
16777216
|
||||
>>> (2**25)-1
|
||||
33554431
|
||||
|
||||
>>> b[33554431]
|
||||
IPv6Address('ff05::1ff:ffff')
|
||||
|
||||
Last interface:
|
||||
br1ffffff
|
||||
vxlan1ffffff
|
||||
|
||||
root@manager:~/ucloud/network# ip -6 link add vxlan1ffffff type vxlan id 33554431 dstport 4789 group ff05::1ff:ffff dev wlp2s0 ttl 5
|
||||
Error: argument "33554431" is wrong: invalid id
|
||||
|
||||
root@manager:~/ucloud/network# ip -6 link add vxlanffffff type vxlan id 16777215 dstport 4789 group ff05::ff:ffff dev wlp2s0 ttl 5
|
||||
|
||||
|
||||
# id needs to be decimal
|
||||
root@manager:~# ip -6 link add vxlanff01 type vxlan id ff01 dstport 4789 group ff05::ff01 dev ttl 5
|
||||
Error: argument "ff01" is wrong: invalid id
|
||||
root@manager:~# ip -6 link add vxlanff01 type vxlan id 65281 dstport 4789 group ff05::ff01 dev wlp2s0 ttl 5
|
||||
0
uncloud/network/__init__.py
Normal file
0
uncloud/network/__init__.py
Normal file
24
uncloud/network/create-bridge.sh
Executable file
24
uncloud/network/create-bridge.sh
Executable file
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 3 ]; then
|
||||
echo "$0 brid dev ip"
|
||||
echo "f.g. $0 100 vxlan100 fd00:/64"
|
||||
echo "Missing arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
brid=$1; shift
|
||||
dev=$1; shift
|
||||
ip=$1; shift
|
||||
bridge=br${brid}
|
||||
|
||||
sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
|
||||
|
||||
if ! ip link show $bridge > /dev/null 2> /dev/null; then
|
||||
ip link add name $bridge type bridge
|
||||
ip link set $bridge up
|
||||
ip link set $dev master $bridge
|
||||
ip address add $ip dev $bridge
|
||||
fi
|
||||
|
||||
echo $bridge
|
||||
22
uncloud/network/create-tap.sh
Executable file
22
uncloud/network/create-tap.sh
Executable file
|
|
@ -0,0 +1,22 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "$0 tapid dev"
|
||||
echo "f.g. $0 100 br100"
|
||||
echo "Missing arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tapid=$1; shift
|
||||
bridge=$1; shift
|
||||
vxlan=vxlan${tapid}
|
||||
tap=tap${tapid}
|
||||
|
||||
if ! ip link show $tap > /dev/null 2> /dev/null; then
|
||||
ip tuntap add $tap mode tap user `whoami`
|
||||
ip link set $tap up
|
||||
sleep 0.5s
|
||||
ip link set $tap master $bridge
|
||||
fi
|
||||
|
||||
echo $tap
|
||||
26
uncloud/network/create-vxlan.sh
Executable file
26
uncloud/network/create-vxlan.sh
Executable file
|
|
@ -0,0 +1,26 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
echo "$0 vxlanid dev"
|
||||
echo "f.i. $0 100 eno1"
|
||||
echo "Missing arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
netid=$1; shift
|
||||
dev=$1; shift
|
||||
vxlan=vxlan${netid}
|
||||
|
||||
if ! ip link show $vxlan > /dev/null 2> /dev/null; then
|
||||
ip -6 link add $vxlan type vxlan \
|
||||
id $netid \
|
||||
dstport 4789 \
|
||||
group ff05::$netid \
|
||||
dev $dev \
|
||||
ttl 5
|
||||
|
||||
ip link set $dev up
|
||||
ip link set $vxlan up
|
||||
fi
|
||||
|
||||
echo $vxlan
|
||||
13
uncloud/network/radvd-template.conf
Normal file
13
uncloud/network/radvd-template.conf
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
interface $bridge
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 5;
|
||||
AdvDefaultLifetime 10;
|
||||
|
||||
prefix $prefix { };
|
||||
|
||||
RDNSS 2a0a:e5c0:2:1::5 2a0a:e5c0:2:1::6 { AdvRDNSSLifetime 6000; };
|
||||
DNSSL place6.ungleich.ch { AdvDNSSLLifetime 6000; } ;
|
||||
};
|
||||
|
||||
3
uncloud/scheduler/__init__.py
Normal file
3
uncloud/scheduler/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
138
uncloud/scheduler/helper.py
Executable file
138
uncloud/scheduler/helper.py
Executable file
|
|
@ -0,0 +1,138 @@
|
|||
from collections import Counter
|
||||
from functools import reduce
|
||||
|
||||
import bitmath
|
||||
|
||||
from ucloud.common.host import HostStatus
|
||||
from ucloud.common.request import RequestEntry, RequestType
|
||||
from ucloud.common.vm import VMStatus
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
|
||||
|
||||
def accumulated_specs(vms_specs):
|
||||
if not vms_specs:
|
||||
return {}
|
||||
return reduce((lambda x, y: Counter(x) + Counter(y)), vms_specs)
|
||||
|
||||
|
||||
def remaining_resources(host_specs, vms_specs):
|
||||
# Return remaining resources host_specs - vms
|
||||
|
||||
_vms_specs = Counter(vms_specs)
|
||||
_remaining = Counter(host_specs)
|
||||
|
||||
for component in _vms_specs:
|
||||
if isinstance(_vms_specs[component], str):
|
||||
_vms_specs[component] = int(
|
||||
bitmath.parse_string_unsafe(
|
||||
_vms_specs[component]
|
||||
).to_MB()
|
||||
)
|
||||
elif isinstance(_vms_specs[component], list):
|
||||
_vms_specs[component] = map(
|
||||
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
|
||||
_vms_specs[component],
|
||||
)
|
||||
_vms_specs[component] = reduce(
|
||||
lambda x, y: x + y, _vms_specs[component], 0
|
||||
)
|
||||
|
||||
for component in _remaining:
|
||||
if isinstance(_remaining[component], str):
|
||||
_remaining[component] = int(
|
||||
bitmath.parse_string_unsafe(
|
||||
_remaining[component]
|
||||
).to_MB()
|
||||
)
|
||||
elif isinstance(_remaining[component], list):
|
||||
_remaining[component] = map(
|
||||
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
|
||||
_remaining[component],
|
||||
)
|
||||
_remaining[component] = reduce(
|
||||
lambda x, y: x + y, _remaining[component], 0
|
||||
)
|
||||
|
||||
_remaining.subtract(_vms_specs)
|
||||
|
||||
return _remaining
|
||||
|
||||
|
||||
class NoSuitableHostFound(Exception):
|
||||
"""Exception when no host found that can host a VM."""
|
||||
|
||||
|
||||
def get_suitable_host(vm_specs, hosts=None):
|
||||
if hosts is None:
|
||||
hosts = shared.host_pool.by_status(HostStatus.alive)
|
||||
|
||||
for host in hosts:
|
||||
# Filter them by host_name
|
||||
vms = shared.vm_pool.by_host(host.key)
|
||||
|
||||
# Filter them by status
|
||||
vms = shared.vm_pool.by_status(VMStatus.running, vms)
|
||||
|
||||
running_vms_specs = [vm.specs for vm in vms]
|
||||
|
||||
# Accumulate all of their combined specs
|
||||
running_vms_accumulated_specs = accumulated_specs(
|
||||
running_vms_specs
|
||||
)
|
||||
|
||||
# Find out remaining resources after
|
||||
# host_specs - already running vm_specs
|
||||
remaining = remaining_resources(
|
||||
host.specs, running_vms_accumulated_specs
|
||||
)
|
||||
|
||||
# Find out remaining - new_vm_specs
|
||||
remaining = remaining_resources(remaining, vm_specs)
|
||||
|
||||
if all(map(lambda x: x >= 0, remaining.values())):
|
||||
return host.key
|
||||
|
||||
raise NoSuitableHostFound
|
||||
|
||||
|
||||
def dead_host_detection():
|
||||
# Bring out your dead! - Monty Python and the Holy Grail
|
||||
hosts = shared.host_pool.by_status(HostStatus.alive)
|
||||
dead_hosts_keys = []
|
||||
|
||||
for host in hosts:
|
||||
# Only check those who claims to be alive
|
||||
if host.status == HostStatus.alive:
|
||||
if not host.is_alive():
|
||||
dead_hosts_keys.append(host.key)
|
||||
|
||||
return dead_hosts_keys
|
||||
|
||||
|
||||
def dead_host_mitigation(dead_hosts_keys):
|
||||
for host_key in dead_hosts_keys:
|
||||
host = shared.host_pool.get(host_key)
|
||||
host.declare_dead()
|
||||
|
||||
vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
|
||||
for vm in vms_hosted_on_dead_host:
|
||||
vm.status = "UNKNOWN"
|
||||
shared.vm_pool.put(vm)
|
||||
shared.host_pool.put(host)
|
||||
|
||||
|
||||
def assign_host(vm):
|
||||
vm.hostname = get_suitable_host(vm.specs)
|
||||
shared.vm_pool.put(vm)
|
||||
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.StartVM,
|
||||
uuid=vm.uuid,
|
||||
hostname=vm.hostname,
|
||||
request_prefix=settings["etcd"]["request_prefix"],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
|
||||
vm.log.append("VM scheduled for starting")
|
||||
return vm.hostname
|
||||
75
uncloud/scheduler/main.py
Executable file
75
uncloud/scheduler/main.py
Executable file
|
|
@ -0,0 +1,75 @@
|
|||
# TODO
|
||||
# 1. send an email to an email address defined by env['admin-email']
|
||||
# if resources are finished
|
||||
# 2. Introduce a status endpoint of the scheduler -
|
||||
# maybe expose a prometheus compatible output
|
||||
|
||||
from ucloud.common.request import RequestEntry, RequestType
|
||||
from ucloud.shared import shared
|
||||
from ucloud.settings import settings
|
||||
from .helper import (
|
||||
dead_host_mitigation,
|
||||
dead_host_detection,
|
||||
assign_host,
|
||||
NoSuitableHostFound,
|
||||
)
|
||||
from . import logger
|
||||
|
||||
|
||||
def main():
|
||||
for request_iterator in [
|
||||
shared.etcd_client.get_prefix(
|
||||
settings["etcd"]["request_prefix"], value_in_json=True
|
||||
),
|
||||
shared.etcd_client.watch_prefix(
|
||||
settings["etcd"]["request_prefix"],
|
||||
timeout=5,
|
||||
value_in_json=True,
|
||||
),
|
||||
]:
|
||||
for request_event in request_iterator:
|
||||
request_entry = RequestEntry(request_event)
|
||||
# Never Run time critical mechanism inside timeout
|
||||
# mechanism because timeout mechanism only comes
|
||||
# when no other event is happening. It means under
|
||||
# heavy load there would not be a timeout event.
|
||||
if request_entry.type == "TIMEOUT":
|
||||
|
||||
# Detect hosts that are dead and set their status
|
||||
# to "DEAD", and their VMs' status to "KILLED"
|
||||
dead_hosts = dead_host_detection()
|
||||
if dead_hosts:
|
||||
logger.debug("Dead hosts: %s", dead_hosts)
|
||||
dead_host_mitigation(dead_hosts)
|
||||
|
||||
elif request_entry.type == RequestType.ScheduleVM:
|
||||
print(request_event.value)
|
||||
logger.debug(
|
||||
"%s, %s", request_entry.key, request_entry.value
|
||||
)
|
||||
|
||||
vm_entry = shared.vm_pool.get(request_entry.uuid)
|
||||
if vm_entry is None:
|
||||
logger.info(
|
||||
"Trying to act on {} but it is deleted".format(
|
||||
request_entry.uuid
|
||||
)
|
||||
)
|
||||
continue
|
||||
shared.etcd_client.client.delete(
|
||||
request_entry.key
|
||||
) # consume Request
|
||||
|
||||
try:
|
||||
assign_host(vm_entry)
|
||||
except NoSuitableHostFound:
|
||||
vm_entry.add_log(
|
||||
"Can't schedule VM. No Resource Left."
|
||||
)
|
||||
shared.vm_pool.put(vm_entry)
|
||||
|
||||
logger.info("No Resource Left. Emailing admin....")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
uncloud/scheduler/tests/__init__.py
Normal file
0
uncloud/scheduler/tests/__init__.py
Normal file
233
uncloud/scheduler/tests/test_basics.py
Executable file
233
uncloud/scheduler/tests/test_basics.py
Executable file
|
|
@ -0,0 +1,233 @@
|
|||
import json
|
||||
import multiprocessing
|
||||
import sys
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
from os.path import dirname
|
||||
|
||||
BASE_DIR = dirname(dirname(__file__))
|
||||
sys.path.insert(0, BASE_DIR)
|
||||
|
||||
from main import (
|
||||
accumulated_specs,
|
||||
remaining_resources,
|
||||
VmPool,
|
||||
main,
|
||||
)
|
||||
|
||||
from ucloud.config import etcd_client
|
||||
|
||||
|
||||
class TestFunctions(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.client = etcd_client
|
||||
cls.host_prefix = "/test/host"
|
||||
cls.vm_prefix = "/test/vm"
|
||||
|
||||
# These deletion could also be in
|
||||
# tearDown() but it is more appropriate here
|
||||
# as it enable us to check the ETCD store
|
||||
# even after test is run
|
||||
cls.client.client.delete_prefix(cls.host_prefix)
|
||||
cls.client.client.delete_prefix(cls.vm_prefix)
|
||||
cls.create_hosts(cls)
|
||||
cls.create_vms(cls)
|
||||
|
||||
cls.p = multiprocessing.Process(
|
||||
target=main, args=[cls.vm_prefix, cls.host_prefix]
|
||||
)
|
||||
cls.p.start()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.p.terminate()
|
||||
|
||||
def create_hosts(self):
|
||||
host1 = {
|
||||
"cpu": 32,
|
||||
"ram": 128,
|
||||
"hdd": 1024,
|
||||
"sdd": 0,
|
||||
"status": "ALIVE",
|
||||
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||
}
|
||||
host2 = {
|
||||
"cpu": 16,
|
||||
"ram": 64,
|
||||
"hdd": 512,
|
||||
"sdd": 0,
|
||||
"status": "ALIVE",
|
||||
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
host3 = {
|
||||
"cpu": 16,
|
||||
"ram": 32,
|
||||
"hdd": 256,
|
||||
"sdd": 256,
|
||||
"status": "ALIVE",
|
||||
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||
}
|
||||
with self.client.client.lock("lock"):
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/1", host1, value_in_json=True
|
||||
)
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/2", host2, value_in_json=True
|
||||
)
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/3", host3, value_in_json=True
|
||||
)
|
||||
|
||||
def create_vms(self):
|
||||
vm1 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm2 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm3 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 16, "ram": 32, "hdd": 128, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm4 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm5 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 2, "ram": 2, "hdd": 10, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm6 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
vm7 = json.dumps(
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
}
|
||||
)
|
||||
self.client.put(f"{self.vm_prefix}/1", vm1)
|
||||
self.client.put(f"{self.vm_prefix}/2", vm2)
|
||||
self.client.put(f"{self.vm_prefix}/3", vm3)
|
||||
self.client.put(f"{self.vm_prefix}/4", vm4)
|
||||
self.client.put(f"{self.vm_prefix}/5", vm5)
|
||||
self.client.put(f"{self.vm_prefix}/6", vm6)
|
||||
self.client.put(f"{self.vm_prefix}/7", vm7)
|
||||
|
||||
def test_accumulated_specs(self):
|
||||
vms = [
|
||||
{"ssd": 10, "cpu": 4, "ram": 8},
|
||||
{"hdd": 10, "cpu": 4, "ram": 8},
|
||||
{"cpu": 8, "ram": 32},
|
||||
]
|
||||
self.assertEqual(
|
||||
accumulated_specs(vms),
|
||||
{"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
|
||||
)
|
||||
|
||||
def test_remaining_resources(self):
|
||||
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
||||
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
|
||||
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
|
||||
self.assertEqual(
|
||||
remaining_resources(host_specs, vms_specs), resultant_specs
|
||||
)
|
||||
|
||||
def test_vmpool(self):
|
||||
self.p.join(1)
|
||||
vm_pool = VmPool(self.client, self.vm_prefix)
|
||||
|
||||
# vm_pool by host
|
||||
actual = vm_pool.by_host(vm_pool.vms, f"{self.host_prefix}/3")
|
||||
ground_truth = [
|
||||
(
|
||||
f"{self.vm_prefix}/1",
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {
|
||||
"cpu": 4,
|
||||
"ram": 8,
|
||||
"hdd": 100,
|
||||
"sdd": 256,
|
||||
},
|
||||
"hostname": f"{self.host_prefix}/3",
|
||||
"status": "SCHEDULED_DEPLOY",
|
||||
},
|
||||
)
|
||||
]
|
||||
self.assertEqual(actual[0], ground_truth[0])
|
||||
|
||||
# vm_pool by status
|
||||
actual = vm_pool.by_status(vm_pool.vms, "REQUESTED_NEW")
|
||||
ground_truth = [
|
||||
(
|
||||
f"{self.vm_prefix}/7",
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {
|
||||
"cpu": 10,
|
||||
"ram": 22,
|
||||
"hdd": 146,
|
||||
"sdd": 0,
|
||||
},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
},
|
||||
)
|
||||
]
|
||||
self.assertEqual(actual[0], ground_truth[0])
|
||||
|
||||
# vm_pool by except status
|
||||
actual = vm_pool.except_status(vm_pool.vms, "SCHEDULED_DEPLOY")
|
||||
ground_truth = [
|
||||
(
|
||||
f"{self.vm_prefix}/7",
|
||||
{
|
||||
"owner": "meow",
|
||||
"specs": {
|
||||
"cpu": 10,
|
||||
"ram": 22,
|
||||
"hdd": 146,
|
||||
"sdd": 0,
|
||||
},
|
||||
"hostname": "",
|
||||
"status": "REQUESTED_NEW",
|
||||
},
|
||||
)
|
||||
]
|
||||
self.assertEqual(actual[0], ground_truth[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
83
uncloud/scheduler/tests/test_dead_host_mechanism.py
Executable file
83
uncloud/scheduler/tests/test_dead_host_mechanism.py
Executable file
|
|
@ -0,0 +1,83 @@
|
|||
import sys
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
from os.path import dirname
|
||||
|
||||
BASE_DIR = dirname(dirname(__file__))
|
||||
sys.path.insert(0, BASE_DIR)
|
||||
|
||||
from main import dead_host_detection, dead_host_mitigation, config
|
||||
|
||||
|
||||
class TestDeadHostMechanism(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.client = config.etcd_client
|
||||
self.host_prefix = "/test/host"
|
||||
self.vm_prefix = "/test/vm"
|
||||
|
||||
self.client.client.delete_prefix(self.host_prefix)
|
||||
self.client.client.delete_prefix(self.vm_prefix)
|
||||
|
||||
self.create_hosts()
|
||||
|
||||
def create_hosts(self):
|
||||
host1 = {
|
||||
"cpu": 32,
|
||||
"ram": 128,
|
||||
"hdd": 1024,
|
||||
"sdd": 0,
|
||||
"status": "ALIVE",
|
||||
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||
}
|
||||
host2 = {
|
||||
"cpu": 16,
|
||||
"ram": 64,
|
||||
"hdd": 512,
|
||||
"sdd": 0,
|
||||
"status": "ALIVE",
|
||||
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
||||
}
|
||||
|
||||
host3 = {"cpu": 16, "ram": 32, "hdd": 256, "sdd": 256}
|
||||
host4 = {
|
||||
"cpu": 16,
|
||||
"ram": 32,
|
||||
"hdd": 256,
|
||||
"sdd": 256,
|
||||
"status": "DEAD",
|
||||
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
||||
}
|
||||
with self.client.client.lock("lock"):
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/1", host1, value_in_json=True
|
||||
)
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/2", host2, value_in_json=True
|
||||
)
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/3", host3, value_in_json=True
|
||||
)
|
||||
self.client.put(
|
||||
f"{self.host_prefix}/4", host4, value_in_json=True
|
||||
)
|
||||
|
||||
def test_dead_host_detection(self):
|
||||
hosts = self.client.get_prefix(
|
||||
self.host_prefix, value_in_json=True
|
||||
)
|
||||
deads = dead_host_detection(hosts)
|
||||
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
|
||||
return deads
|
||||
|
||||
def test_dead_host_mitigation(self):
|
||||
deads = self.test_dead_host_detection()
|
||||
dead_host_mitigation(self.client, deads)
|
||||
hosts = self.client.get_prefix(
|
||||
self.host_prefix, value_in_json=True
|
||||
)
|
||||
deads = dead_host_detection(hosts)
|
||||
self.assertEqual(deads, [])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
122
uncloud/settings/__init__.py
Normal file
122
uncloud/settings/__init__.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
import configparser
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
|
||||
from ucloud.common.etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CustomConfigParser(configparser.RawConfigParser):
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
result = super().__getitem__(key)
|
||||
except KeyError as err:
|
||||
raise KeyError(
|
||||
"Key '{}' not found in configuration. Make sure you configure ucloud.".format(
|
||||
key
|
||||
)
|
||||
) from err
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
class Settings(object):
|
||||
def __init__(self, config_key="/uncloud/config/"):
|
||||
conf_name = "ucloud.conf"
|
||||
conf_dir = os.environ.get(
|
||||
"UCLOUD_CONF_DIR", os.path.expanduser("~/ucloud/")
|
||||
)
|
||||
self.config_file = os.path.join(conf_dir, conf_name)
|
||||
|
||||
self.config_parser = CustomConfigParser(allow_no_value=True)
|
||||
self.config_key = config_key
|
||||
|
||||
self.read_internal_values()
|
||||
try:
|
||||
self.config_parser.read(self.config_file)
|
||||
except Exception as err:
|
||||
logger.error("%s", err)
|
||||
|
||||
def get_etcd_client(self):
|
||||
args = tuple()
|
||||
try:
|
||||
kwargs = {
|
||||
"host": self.config_parser.get("etcd", "url"),
|
||||
"port": self.config_parser.get("etcd", "port"),
|
||||
"ca_cert": self.config_parser.get("etcd", "ca_cert"),
|
||||
"cert_cert": self.config_parser.get(
|
||||
"etcd", "cert_cert"
|
||||
),
|
||||
"cert_key": self.config_parser.get("etcd", "cert_key"),
|
||||
}
|
||||
except configparser.Error as err:
|
||||
raise configparser.Error(
|
||||
"{} in config file {}".format(
|
||||
err.message, self.config_file
|
||||
)
|
||||
) from err
|
||||
else:
|
||||
try:
|
||||
wrapper = Etcd3Wrapper(*args, **kwargs)
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
"etcd connection not successfull. Please check your config file."
|
||||
"\nDetails: %s\netcd connection parameters: %s",
|
||||
err,
|
||||
kwargs,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
return wrapper
|
||||
|
||||
def read_internal_values(self):
|
||||
self.config_parser.read_dict(
|
||||
{
|
||||
"etcd": {
|
||||
"file_prefix": "/files/",
|
||||
"host_prefix": "/hosts/",
|
||||
"image_prefix": "/images/",
|
||||
"image_store_prefix": "/imagestore/",
|
||||
"network_prefix": "/networks/",
|
||||
"request_prefix": "/requests/",
|
||||
"user_prefix": "/users/",
|
||||
"vm_prefix": "/vms/",
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def read_config_file_values(self, config_file):
|
||||
try:
|
||||
# Trying to read configuration file
|
||||
with open(config_file, "r") as config_file_handle:
|
||||
self.config_parser.read_file(config_file_handle)
|
||||
except FileNotFoundError:
|
||||
sys.exit(
|
||||
"Configuration file {} not found!".format(config_file)
|
||||
)
|
||||
except Exception as err:
|
||||
logger.exception(err)
|
||||
sys.exit("Error occurred while reading configuration file")
|
||||
|
||||
def read_values_from_etcd(self):
|
||||
etcd_client = self.get_etcd_client()
|
||||
config_from_etcd = etcd_client.get(
|
||||
self.config_key, value_in_json=True
|
||||
)
|
||||
if config_from_etcd:
|
||||
self.config_parser.read_dict(config_from_etcd.value)
|
||||
else:
|
||||
raise KeyError(
|
||||
"Key '{}' not found in etcd. Please configure ucloud.".format(
|
||||
self.config_key
|
||||
)
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.read_values_from_etcd()
|
||||
return self.config_parser[key]
|
||||
|
||||
|
||||
settings = Settings()
|
||||
34
uncloud/shared/__init__.py
Normal file
34
uncloud/shared/__init__.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
from ucloud.settings import settings
|
||||
from ucloud.common.vm import VmPool
|
||||
from ucloud.common.host import HostPool
|
||||
from ucloud.common.request import RequestPool
|
||||
from ucloud.common.storage_handlers import get_storage_handler
|
||||
|
||||
|
||||
class Shared:
|
||||
@property
|
||||
def etcd_client(self):
|
||||
return settings.get_etcd_client()
|
||||
|
||||
@property
|
||||
def host_pool(self):
|
||||
return HostPool(
|
||||
self.etcd_client, settings["etcd"]["host_prefix"]
|
||||
)
|
||||
|
||||
@property
|
||||
def vm_pool(self):
|
||||
return VmPool(self.etcd_client, settings["etcd"]["vm_prefix"])
|
||||
|
||||
@property
|
||||
def request_pool(self):
|
||||
return RequestPool(
|
||||
self.etcd_client, settings["etcd"]["request_prefix"]
|
||||
)
|
||||
|
||||
@property
|
||||
def storage_handler(self):
|
||||
return get_storage_handler()
|
||||
|
||||
|
||||
shared = Shared()
|
||||
292
uncloud/vmm/__init__.py
Normal file
292
uncloud/vmm/__init__.py
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
import os
|
||||
import subprocess as sp
|
||||
import logging
|
||||
import socket
|
||||
import json
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from contextlib import suppress
|
||||
from multiprocessing import Process
|
||||
from os.path import join as join_path
|
||||
from os.path import isdir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VMQMPHandles:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.sock = socket.socket(socket.AF_UNIX)
|
||||
self.file = self.sock.makefile()
|
||||
|
||||
def __enter__(self):
|
||||
self.sock.connect(self.path)
|
||||
|
||||
# eat qmp greetings
|
||||
self.file.readline()
|
||||
|
||||
# init qmp
|
||||
self.sock.sendall(b'{ "execute": "qmp_capabilities" }')
|
||||
self.file.readline()
|
||||
|
||||
return self.sock, self.file
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.file.close()
|
||||
self.sock.close()
|
||||
|
||||
if exc_type:
|
||||
logger.error(
|
||||
"Couldn't get handle for VM.", exc_type, exc_val, exc_tb
|
||||
)
|
||||
raise exc_type("Couldn't get handle for VM.") from exc_type
|
||||
|
||||
|
||||
class TransferVM(Process):
|
||||
def __init__(self, src_uuid, dest_sock_path, host, socket_dir):
|
||||
self.src_uuid = src_uuid
|
||||
self.host = host
|
||||
self.src_sock_path = os.path.join(socket_dir, self.src_uuid)
|
||||
self.dest_sock_path = dest_sock_path
|
||||
|
||||
super().__init__()
|
||||
|
||||
def run(self):
|
||||
with suppress(FileNotFoundError):
|
||||
os.remove(self.src_sock_path)
|
||||
|
||||
command = [
|
||||
"ssh",
|
||||
"-nNT",
|
||||
"-L",
|
||||
"{}:{}".format(self.src_sock_path, self.dest_sock_path),
|
||||
"root@{}".format(self.host),
|
||||
]
|
||||
|
||||
try:
|
||||
p = sp.Popen(command)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Couldn' forward unix socks over ssh.", exc_info=e
|
||||
)
|
||||
else:
|
||||
time.sleep(2)
|
||||
vmm = VMM()
|
||||
logger.debug("Executing: ssh forwarding command: %s", command)
|
||||
vmm.execute_command(
|
||||
self.src_uuid,
|
||||
command="migrate",
|
||||
arguments={"uri": "unix:{}".format(self.src_sock_path)},
|
||||
)
|
||||
|
||||
while p.poll() is None:
|
||||
success, output = vmm.execute_command(self.src_uuid, command="query-migrate")
|
||||
if success:
|
||||
status = output["return"]["status"]
|
||||
logger.info('Migration Status: {}'.format(status))
|
||||
if status == "completed":
|
||||
vmm.stop(self.src_uuid)
|
||||
return
|
||||
elif status in ['failed', 'cancelled']:
|
||||
return
|
||||
else:
|
||||
logger.error("Couldn't be able to query VM {} that was in migration".format(self.src_uuid))
|
||||
return
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
class VMM:
|
||||
# Virtual Machine Manager
|
||||
def __init__(
|
||||
self,
|
||||
qemu_path="/usr/bin/qemu-system-x86_64",
|
||||
vmm_backend=os.path.expanduser("~/ucloud/vmm/"),
|
||||
):
|
||||
self.qemu_path = qemu_path
|
||||
self.vmm_backend = vmm_backend
|
||||
self.socket_dir = os.path.join(self.vmm_backend, "sock")
|
||||
|
||||
if not os.path.isdir(self.vmm_backend):
|
||||
logger.info(
|
||||
"{} does not exists. Creating it...".format(
|
||||
self.vmm_backend
|
||||
)
|
||||
)
|
||||
os.makedirs(self.vmm_backend, exist_ok=True)
|
||||
|
||||
if not os.path.isdir(self.socket_dir):
|
||||
logger.info(
|
||||
"{} does not exists. Creating it...".format(
|
||||
self.socket_dir
|
||||
)
|
||||
)
|
||||
os.makedirs(self.socket_dir, exist_ok=True)
|
||||
|
||||
def is_running(self, uuid):
|
||||
sock_path = os.path.join(self.vmm_backend, uuid)
|
||||
try:
|
||||
sock = socket.socket(socket.AF_UNIX)
|
||||
sock.connect(sock_path)
|
||||
recv = sock.recv(4096)
|
||||
except Exception as err:
|
||||
# unix sock doesn't exists or it is closed
|
||||
logger.debug(
|
||||
"VM {} sock either don' exists or it is closed. It mean VM is stopped.".format(
|
||||
uuid
|
||||
),
|
||||
exc_info=err,
|
||||
)
|
||||
else:
|
||||
# if we receive greetings from qmp it mean VM is running
|
||||
if len(recv) > 0:
|
||||
return True
|
||||
|
||||
with suppress(FileNotFoundError):
|
||||
os.remove(sock_path)
|
||||
|
||||
return False
|
||||
|
||||
def start(self, *args, uuid, migration=False):
|
||||
# start --> sucess?
|
||||
migration_args = ()
|
||||
if migration:
|
||||
migration_args = (
|
||||
"-incoming",
|
||||
"unix:{}".format(os.path.join(self.socket_dir, uuid)),
|
||||
)
|
||||
|
||||
if self.is_running(uuid):
|
||||
logger.warning("Cannot start VM. It is already running.")
|
||||
else:
|
||||
qmp_arg = (
|
||||
"-qmp",
|
||||
"unix:{},server,nowait".format(
|
||||
join_path(self.vmm_backend, uuid)
|
||||
),
|
||||
)
|
||||
vnc_arg = (
|
||||
"-vnc",
|
||||
"unix:{}".format(tempfile.NamedTemporaryFile().name),
|
||||
)
|
||||
|
||||
command = [
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to start VM {}: ".format(uuid),
|
||||
self.qemu_path,
|
||||
*args,
|
||||
*qmp_arg,
|
||||
*migration_args,
|
||||
*vnc_arg,
|
||||
"-daemonize",
|
||||
]
|
||||
try:
|
||||
sp.check_output(command, stderr=sp.PIPE)
|
||||
except sp.CalledProcessError as err:
|
||||
logger.exception(
|
||||
"Error occurred while starting VM.\nDetail %s",
|
||||
err.stderr.decode("utf-8"),
|
||||
)
|
||||
else:
|
||||
with suppress(sp.CalledProcessError):
|
||||
sp.check_output(
|
||||
[
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to correct permission for uncloud-vmm's directory",
|
||||
"chmod",
|
||||
"-R",
|
||||
"o=rwx,g=rwx",
|
||||
self.vmm_backend,
|
||||
]
|
||||
)
|
||||
|
||||
# TODO: Find some good way to check whether the virtual machine is up and
|
||||
# running without relying on non-guarenteed ways.
|
||||
for _ in range(10):
|
||||
time.sleep(2)
|
||||
status = self.get_status(uuid)
|
||||
if status in ["running", "inmigrate"]:
|
||||
return status
|
||||
logger.warning(
|
||||
"Timeout on VM's status. Shutting down VM %s", uuid
|
||||
)
|
||||
self.stop(uuid)
|
||||
# TODO: What should we do more. VM can still continue to run in background.
|
||||
# If we have pid of vm we can kill it using OS.
|
||||
|
||||
def execute_command(self, uuid, command, **kwargs):
|
||||
# execute_command -> sucess?, output
|
||||
try:
|
||||
with VMQMPHandles(os.path.join(self.vmm_backend, uuid)) as (
|
||||
sock_handle,
|
||||
file_handle,
|
||||
):
|
||||
command_to_execute = {"execute": command, **kwargs}
|
||||
sock_handle.sendall(
|
||||
json.dumps(command_to_execute).encode("utf-8")
|
||||
)
|
||||
output = file_handle.readline()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error occurred while executing command and getting valid output from qmp"
|
||||
)
|
||||
else:
|
||||
try:
|
||||
output = json.loads(output)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"QMP Output isn't valid JSON. %s", output
|
||||
)
|
||||
else:
|
||||
return "return" in output, output
|
||||
return False, None
|
||||
|
||||
def stop(self, uuid):
|
||||
success, output = self.execute_command(
|
||||
command="quit", uuid=uuid
|
||||
)
|
||||
return success
|
||||
|
||||
def get_status(self, uuid):
|
||||
success, output = self.execute_command(
|
||||
command="query-status", uuid=uuid
|
||||
)
|
||||
if success:
|
||||
return output["return"]["status"]
|
||||
else:
|
||||
# TODO: Think about this for a little more
|
||||
return "STOPPED"
|
||||
|
||||
def discover(self):
|
||||
vms = [
|
||||
uuid
|
||||
for uuid in os.listdir(self.vmm_backend)
|
||||
if not isdir(join_path(self.vmm_backend, uuid))
|
||||
]
|
||||
return vms
|
||||
|
||||
def get_vnc(self, uuid):
|
||||
success, output = self.execute_command(
|
||||
uuid, command="query-vnc"
|
||||
)
|
||||
if success:
|
||||
return output["return"]["service"]
|
||||
return None
|
||||
|
||||
def transfer(self, src_uuid, destination_sock_path, host):
|
||||
p = TransferVM(
|
||||
src_uuid,
|
||||
destination_sock_path,
|
||||
socket_dir=self.socket_dir,
|
||||
host=host,
|
||||
)
|
||||
p.start()
|
||||
|
||||
# TODO: the following method should clean things that went wrong
|
||||
# e.g If VM migration fails or didn't start for long time
|
||||
# i.e 15 minutes we should stop the waiting VM.
|
||||
def maintenace(self):
|
||||
pass
|
||||
Loading…
Add table
Add a link
Reference in a new issue