Move all files to _etc_based

This commit is contained in:
Nico Schottelius 2020-04-02 19:29:08 +02:00
commit 3cf3439f1c
116 changed files with 1 additions and 0 deletions

View file

@ -0,0 +1,2 @@
class UncloudException(Exception):
pass

View file

@ -0,0 +1,12 @@
# ucloud-api
[![Project Status: WIP Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip)
## Installation
**Make sure you have Python >= 3.5 and Pipenv installed.**
1. Clone the repository and `cd` into it.
2. Run the following commands
- `pipenv install`
- `pipenv shell`
- `python main.py`

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,59 @@
import os
from uncloud.common.shared import shared
class Optional:
pass
class Field:
def __init__(self, _name, _type, _value=None):
self.name = _name
self.value = _value
self.type = _type
self.__errors = []
def validation(self):
return True
def is_valid(self):
if self.value == KeyError:
self.add_error(
"'{}' field is a required field".format(self.name)
)
else:
if isinstance(self.value, Optional):
pass
elif not isinstance(self.value, self.type):
self.add_error(
"Incorrect Type for '{}' field".format(self.name)
)
else:
self.validation()
if self.__errors:
return False
return True
def get_errors(self):
return self.__errors
def add_error(self, error):
self.__errors.append(error)
class VmUUIDField(Field):
def __init__(self, data):
self.uuid = data.get("uuid", KeyError)
super().__init__("uuid", str, self.uuid)
self.validation = self.vm_uuid_validation
def vm_uuid_validation(self):
r = shared.etcd_client.get(
os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
)
if not r:
self.add_error("VM with uuid {} does not exists".format(self.uuid))

View file

@ -0,0 +1,19 @@
import json
import os
from uuid import uuid4
from uncloud.common.shared import shared
data = {
'is_public': True,
'type': 'ceph',
'name': 'images',
'description': 'first ever public image-store',
'attributes': {'list': [], 'key': [], 'pool': 'images'},
}
shared.etcd_client.put(
os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
json.dumps(data),
)

View file

@ -0,0 +1,148 @@
import binascii
import ipaddress
import random
import logging
import requests
from pyotp import TOTP
from uncloud.common.shared import shared
logger = logging.getLogger(__name__)
def check_otp(name, realm, token):
try:
data = {
"auth_name": shared.settings["otp"]["auth_name"],
"auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
"auth_realm": shared.settings["otp"]["auth_realm"],
"name": name,
"realm": realm,
"token": token,
}
except binascii.Error as err:
logger.error(
"Cannot compute OTP for seed: {}".format(
shared.settings["otp"]["auth_seed"]
)
)
return 400
response = requests.post(
shared.settings["otp"]["verification_controller_url"], json=data
)
return response.status_code
def resolve_vm_name(name, owner):
"""Return UUID of Virtual Machine of name == name and owner == owner
Input: name of vm, owner of vm.
Output: uuid of vm if found otherwise None
"""
result = next(
filter(
lambda vm: vm.value["owner"] == owner
and vm.value["name"] == name,
shared.vm_pool.vms,
),
None,
)
if result:
return result.key.split("/")[-1]
return None
def resolve_image_name(name, etcd_client):
"""Return image uuid given its name and its store
* If the provided name is not in correct format
i.e {store_name}:{image_name} return ValueError
* If no such image found then return KeyError
"""
seperator = ":"
# Ensure, user/program passed valid name that is of type string
try:
store_name_and_image_name = name.split(seperator)
"""
Examples, where it would work and where it would raise exception
"images:alpine" --> ["images", "alpine"]
"images" --> ["images"] it would raise Exception as non enough value to unpack
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
as too many values to unpack
"""
store_name, image_name = store_name_and_image_name
except Exception:
raise ValueError(
"Image name not in correct format i.e {store_name}:{image_name}"
)
images = etcd_client.get_prefix(
shared.settings["etcd"]["image_prefix"], value_in_json=True
)
# Try to find image with name == image_name and store_name == store_name
try:
image = next(
filter(
lambda im: im.value["name"] == image_name
and im.value["store_name"] == store_name,
images,
)
)
except StopIteration:
raise KeyError("No image with name {} found.".format(name))
else:
image_uuid = image.key.split("/")[-1]
return image_uuid
def random_bytes(num=6):
return [random.randrange(256) for _ in range(num)]
def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
mac = random_bytes()
if oui:
if type(oui) == str:
oui = [int(chunk) for chunk in oui.split(separator)]
mac = oui + random_bytes(num=6 - len(oui))
else:
if multicast:
mac[0] |= 1 # set bit 0
else:
mac[0] &= ~1 # clear bit 0
if uaa:
mac[0] &= ~(1 << 1) # clear bit 1
else:
mac[0] |= 1 << 1 # set bit 1
return separator.join(byte_fmt % b for b in mac)
def mac2ipv6(mac, prefix):
# only accept MACs separated by a colon
parts = mac.split(":")
# modify parts to match IPv6 value
parts.insert(3, "ff")
parts.insert(4, "fe")
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
# format output
ipv6_parts = [str(0)] * 4
for i in range(0, len(parts), 2):
ipv6_parts.append("".join(parts[i : i + 2]))
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
prefix = ipaddress.IPv6Address(prefix)
return str(prefix + int(lower_part))

View file

@ -0,0 +1,600 @@
import json
import pynetbox
import logging
import argparse
from uuid import uuid4
from os.path import join as join_path
from flask import Flask, request
from flask_restful import Resource, Api
from werkzeug.exceptions import HTTPException
from uncloud.common.shared import shared
from uncloud.common import counters
from uncloud.common.vm import VMStatus
from uncloud.common.request import RequestEntry, RequestType
from uncloud.api import schemas
from uncloud.api.helper import generate_mac, mac2ipv6
from uncloud import UncloudException
logger = logging.getLogger(__name__)
app = Flask(__name__)
api = Api(app)
app.logger.handlers.clear()
arg_parser = argparse.ArgumentParser('api', add_help=False)
arg_parser.add_argument('--port', '-p')
@app.errorhandler(Exception)
def handle_exception(e):
app.logger.error(e)
# pass through HTTP errors
if isinstance(e, HTTPException):
return e
# now you're handling non-HTTP exceptions only
return {'message': 'Server Error'}, 500
class CreateVM(Resource):
"""API Request to Handle Creation of VM"""
@staticmethod
def post():
data = request.json
validator = schemas.CreateVMSchema(data)
if validator.is_valid():
vm_uuid = uuid4().hex
vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
specs = {
'cpu': validator.specs['cpu'],
'ram': validator.specs['ram'],
'os-ssd': validator.specs['os-ssd'],
'hdd': validator.specs['hdd'],
}
macs = [generate_mac() for _ in range(len(data['network']))]
tap_ids = [
counters.increment_etcd_counter(
shared.etcd_client, shared.settings['etcd']['tap_counter']
)
for _ in range(len(data['network']))
]
vm_entry = {
'name': data['vm_name'],
'owner': data['name'],
'owner_realm': data['realm'],
'specs': specs,
'hostname': '',
'status': VMStatus.stopped,
'image_uuid': validator.image_uuid,
'log': [],
'vnc_socket': '',
'network': list(zip(data['network'], macs, tap_ids)),
'metadata': {'ssh-keys': []},
'in_migration': False,
}
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
# Create ScheduleVM Request
r = RequestEntry.from_scratch(
type=RequestType.ScheduleVM,
uuid=vm_uuid,
request_prefix=shared.settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return {'message': 'VM Creation Queued'}, 200
return validator.get_errors(), 400
class VmStatus(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.VMStatusSchema(data)
if validator.is_valid():
vm = shared.vm_pool.get(
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
)
vm_value = vm.value.copy()
vm_value['ip'] = []
for network_mac_and_tap in vm.network:
network_name, mac, tap = network_mac_and_tap
network = shared.etcd_client.get(
join_path(
shared.settings['etcd']['network_prefix'],
data['name'],
network_name,
),
value_in_json=True,
)
ipv6_addr = (
network.value.get('ipv6').split('::')[0] + '::'
)
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
vm.value = vm_value
return vm.value
else:
return validator.get_errors(), 400
class CreateImage(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.CreateImageSchema(data)
if validator.is_valid():
file_entry = shared.etcd_client.get(
join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
)
file_entry_value = json.loads(file_entry.value)
image_entry_json = {
'status': 'TO_BE_CREATED',
'owner': file_entry_value['owner'],
'filename': file_entry_value['filename'],
'name': data['name'],
'store_name': data['image_store'],
'visibility': 'public',
}
shared.etcd_client.put(
join_path(
shared.settings['etcd']['image_prefix'], data['uuid']
),
json.dumps(image_entry_json),
)
return {'message': 'Image queued for creation.'}
return validator.get_errors(), 400
class ListPublicImages(Resource):
@staticmethod
def get():
images = shared.etcd_client.get_prefix(
shared.settings['etcd']['image_prefix'], value_in_json=True
)
r = {'images': []}
for image in images:
image_key = '{}:{}'.format(
image.value['store_name'], image.value['name']
)
r['images'].append(
{'name': image_key, 'status': image.value['status']}
)
return r, 200
class VMAction(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.VmActionSchema(data)
if validator.is_valid():
vm_entry = shared.vm_pool.get(
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
)
action = data['action']
if action == 'start':
action = 'schedule'
if action == 'delete' and vm_entry.hostname == '':
if shared.storage_handler.is_vm_image_exists(
vm_entry.uuid
):
r_status = shared.storage_handler.delete_vm_image(
vm_entry.uuid
)
if r_status:
shared.etcd_client.client.delete(vm_entry.key)
return {'message': 'VM successfully deleted'}
else:
logger.error(
'Some Error Occurred while deleting VM'
)
return {'message': 'VM deletion unsuccessfull'}
else:
shared.etcd_client.client.delete(vm_entry.key)
return {'message': 'VM successfully deleted'}
r = RequestEntry.from_scratch(
type='{}VM'.format(action.title()),
uuid=data['uuid'],
hostname=vm_entry.hostname,
request_prefix=shared.settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return (
{'message': 'VM {} Queued'.format(action.title())},
200,
)
else:
return validator.get_errors(), 400
class VMMigration(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.VmMigrationSchema(data)
if validator.is_valid():
vm = shared.vm_pool.get(data['uuid'])
r = RequestEntry.from_scratch(
type=RequestType.InitVMMigration,
uuid=vm.uuid,
hostname=join_path(
shared.settings['etcd']['host_prefix'],
validator.destination.value,
),
request_prefix=shared.settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return (
{'message': 'VM Migration Initialization Queued'},
200,
)
else:
return validator.get_errors(), 400
class ListUserVM(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.OTPSchema(data)
if validator.is_valid():
vms = shared.etcd_client.get_prefix(
shared.settings['etcd']['vm_prefix'], value_in_json=True
)
return_vms = []
user_vms = filter(
lambda v: v.value['owner'] == data['name'], vms
)
for vm in user_vms:
return_vms.append(
{
'name': vm.value['name'],
'vm_uuid': vm.key.split('/')[-1],
'specs': vm.value['specs'],
'status': vm.value['status'],
'hostname': vm.value['hostname'],
'vnc_socket': vm.value.get('vnc_socket', None),
}
)
if return_vms:
return {'message': return_vms}, 200
return {'message': 'No VM found'}, 404
else:
return validator.get_errors(), 400
class ListUserFiles(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.OTPSchema(data)
if validator.is_valid():
files = shared.etcd_client.get_prefix(
shared.settings['etcd']['file_prefix'], value_in_json=True
)
return_files = []
user_files = [f for f in files if f.value['owner'] == data['name']]
for file in user_files:
file_uuid = file.key.split('/')[-1]
file = file.value
file['uuid'] = file_uuid
file.pop('sha512sum', None)
file.pop('owner', None)
return_files.append(file)
return {'message': return_files}, 200
else:
return validator.get_errors(), 400
class CreateHost(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.CreateHostSchema(data)
if validator.is_valid():
host_key = join_path(
shared.settings['etcd']['host_prefix'], uuid4().hex
)
host_entry = {
'specs': data['specs'],
'hostname': data['hostname'],
'status': 'DEAD',
'last_heartbeat': '',
}
shared.etcd_client.put(
host_key, host_entry, value_in_json=True
)
return {'message': 'Host Created'}, 200
return validator.get_errors(), 400
class ListHost(Resource):
@staticmethod
def get():
hosts = shared.host_pool.hosts
r = {
host.key: {
'status': host.status,
'specs': host.specs,
'hostname': host.hostname,
}
for host in hosts
}
return r, 200
class GetSSHKeys(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.GetSSHSchema(data)
if validator.is_valid():
if not validator.key_name.value:
# {user_prefix}/{realm}/{name}/key/
etcd_key = join_path(
shared.settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
)
etcd_entry = shared.etcd_client.get_prefix(
etcd_key, value_in_json=True
)
keys = {
key.key.split('/')[-1]: key.value
for key in etcd_entry
}
return {'keys': keys}
else:
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
shared.settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
)
if etcd_entry:
return {
'keys': {
etcd_entry.key.split('/')[
-1
]: etcd_entry.value
}
}
else:
return {'keys': {}}
else:
return validator.get_errors(), 400
class AddSSHKey(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.AddSSHSchema(data)
if validator.is_valid():
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
shared.settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
)
if etcd_entry:
return {
'message': 'Key with name "{}" already exists'.format(
data['key_name']
)
}
else:
# Key Not Found. It implies user' haven't added any key yet.
shared.etcd_client.put(
etcd_key, data['key'], value_in_json=True
)
return {'message': 'Key added successfully'}
else:
return validator.get_errors(), 400
class RemoveSSHKey(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.RemoveSSHSchema(data)
if validator.is_valid():
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
shared.settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
)
if etcd_entry:
shared.etcd_client.client.delete(etcd_key)
return {'message': 'Key successfully removed.'}
else:
return {
'message': 'No Key with name "{}" Exists at all.'.format(
data['key_name']
)
}
else:
return validator.get_errors(), 400
class CreateNetwork(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.CreateNetwork(data)
if validator.is_valid():
network_entry = {
'id': counters.increment_etcd_counter(
shared.etcd_client, shared.settings['etcd']['vxlan_counter']
),
'type': data['type'],
}
if validator.user.value:
try:
nb = pynetbox.api(
url=shared.settings['netbox']['url'],
token=shared.settings['netbox']['token'],
)
nb_prefix = nb.ipam.prefixes.get(
prefix=shared.settings['network']['prefix']
)
prefix = nb_prefix.available_prefixes.create(
data={
'prefix_length': int(
shared.settings['network']['prefix_length']
),
'description': '{}\'s network "{}"'.format(
data['name'], data['network_name']
),
'is_pool': True,
}
)
except Exception as err:
app.logger.error(err)
return {
'message': 'Error occured while creating network.'
}
else:
network_entry['ipv6'] = prefix['prefix']
else:
network_entry['ipv6'] = 'fd00::/64'
network_key = join_path(
shared.settings['etcd']['network_prefix'],
data['name'],
data['network_name'],
)
shared.etcd_client.put(
network_key, network_entry, value_in_json=True
)
return {'message': 'Network successfully added.'}
else:
return validator.get_errors(), 400
class ListUserNetwork(Resource):
@staticmethod
def post():
data = request.json
validator = schemas.OTPSchema(data)
if validator.is_valid():
prefix = join_path(
shared.settings['etcd']['network_prefix'], data['name']
)
networks = shared.etcd_client.get_prefix(
prefix, value_in_json=True
)
user_networks = []
for net in networks:
net.value['name'] = net.key.split('/')[-1]
user_networks.append(net.value)
return {'networks': user_networks}, 200
else:
return validator.get_errors(), 400
api.add_resource(CreateVM, '/vm/create')
api.add_resource(VmStatus, '/vm/status')
api.add_resource(VMAction, '/vm/action')
api.add_resource(VMMigration, '/vm/migrate')
api.add_resource(CreateImage, '/image/create')
api.add_resource(ListPublicImages, '/image/list-public')
api.add_resource(ListUserVM, '/user/vms')
api.add_resource(ListUserFiles, '/user/files')
api.add_resource(ListUserNetwork, '/user/networks')
api.add_resource(AddSSHKey, '/user/add-ssh')
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
api.add_resource(GetSSHKeys, '/user/get-ssh')
api.add_resource(CreateHost, '/host/create')
api.add_resource(ListHost, '/host/list')
api.add_resource(CreateNetwork, '/network/create')
def main(arguments):
debug = arguments['debug']
port = arguments['port']
try:
image_stores = list(
shared.etcd_client.get_prefix(
shared.settings['etcd']['image_store_prefix'], value_in_json=True
)
)
except KeyError:
image_stores = False
# Do not inject default values that might be very wrong
# fail when required, not before
#
# if not image_stores:
# data = {
# 'is_public': True,
# 'type': 'ceph',
# 'name': 'images',
# 'description': 'first ever public image-store',
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
# }
# shared.etcd_client.put(
# join_path(
# shared.settings['etcd']['image_store_prefix'], uuid4().hex
# ),
# json.dumps(data),
# )
try:
app.run(host='::', port=port, debug=debug)
except OSError as e:
raise UncloudException('Failed to start Flask: {}'.format(e))

View file

@ -0,0 +1,557 @@
"""
This module contain classes thats validates and intercept/modify
data coming from uncloud-cli (user)
It was primarily developed as an alternative to argument parser
of Flask_Restful which is going to be deprecated. I also tried
marshmallow for that purpose but it was an overkill (because it
do validation + serialization + deserialization) and little
inflexible for our purpose.
"""
# TODO: Fix error message when user's mentioned VM (referred by name)
# does not exists.
#
# Currently, it says uuid is a required field.
import json
import os
import bitmath
from uncloud.common.host import HostStatus
from uncloud.common.vm import VMStatus
from uncloud.common.shared import shared
from . import helper, logger
from .common_fields import Field, VmUUIDField
from .helper import check_otp, resolve_vm_name
class BaseSchema:
def __init__(self, data, fields=None):
_ = data # suppress linter warning
self.__errors = []
if fields is None:
self.fields = []
else:
self.fields = fields
def validation(self):
# custom validation is optional
return True
def is_valid(self):
for field in self.fields:
field.is_valid()
self.add_field_errors(field)
for parent in self.__class__.__bases__:
try:
parent.validation(self)
except AttributeError:
pass
if not self.__errors:
self.validation()
if self.__errors:
return False
return True
def get_errors(self):
return {"message": self.__errors}
def add_field_errors(self, field: Field):
self.__errors += field.get_errors()
def add_error(self, error):
self.__errors.append(error)
class OTPSchema(BaseSchema):
def __init__(self, data: dict, fields=None):
self.name = Field("name", str, data.get("name", KeyError))
self.realm = Field("realm", str, data.get("realm", KeyError))
self.token = Field("token", str, data.get("token", KeyError))
_fields = [self.name, self.realm, self.token]
if fields:
_fields += fields
super().__init__(data=data, fields=_fields)
def validation(self):
if (
check_otp(
self.name.value, self.realm.value, self.token.value
)
!= 200
):
self.add_error("Wrong Credentials")
########################## Image Operations ###############################################
class CreateImageSchema(BaseSchema):
def __init__(self, data):
# Fields
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
self.name = Field("name", str, data.get("name", KeyError))
self.image_store = Field(
"image_store", str, data.get("image_store", KeyError)
)
# Validations
self.uuid.validation = self.file_uuid_validation
self.image_store.validation = self.image_store_name_validation
# All Fields
fields = [self.uuid, self.name, self.image_store]
super().__init__(data, fields)
def file_uuid_validation(self):
file_entry = shared.etcd_client.get(
os.path.join(
shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
)
)
if file_entry is None:
self.add_error(
"Image File with uuid '{}' Not Found".format(
self.uuid.value
)
)
def image_store_name_validation(self):
image_stores = list(
shared.etcd_client.get_prefix(
shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
)
)
image_store = next(
filter(
lambda s: json.loads(s.value)["name"]
== self.image_store.value,
image_stores,
),
None,
)
if not image_store:
self.add_error(
"Store '{}' does not exists".format(
self.image_store.value
)
)
# Host Operations
class CreateHostSchema(OTPSchema):
def __init__(self, data):
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
self.hostname = Field(
"hostname", str, data.get("hostname", KeyError)
)
# Validation
self.specs.validation = self.specs_validation
fields = [self.hostname, self.specs]
super().__init__(data=data, fields=fields)
def specs_validation(self):
ALLOWED_BASE = 10
_cpu = self.specs.value.get("cpu", KeyError)
_ram = self.specs.value.get("ram", KeyError)
_os_ssd = self.specs.value.get("os-ssd", KeyError)
_hdd = self.specs.value.get("hdd", KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
self.add_error(
"You must specify CPU, RAM and OS-SSD in your specs"
)
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
self.add_error(
"Your specified RAM is not in correct units"
)
if parsed_os_ssd.base != ALLOWED_BASE:
self.add_error(
"Your specified OS-SSD is not in correct units"
)
if _cpu < 1:
self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1):
self.add_error("RAM must be atleast 1 GB")
if parsed_os_ssd < bitmath.GB(10):
self.add_error("OS-SSD must be atleast 10 GB")
parsed_hdd = []
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
self.add_error(
"Your specified HDD is not in correct units"
)
break
else:
parsed_hdd.append(str(_parsed_hdd))
except ValueError:
# TODO: Find some good error message
self.add_error("Specs are not correct.")
else:
if self.get_errors():
self.specs = {
"cpu": _cpu,
"ram": str(parsed_ram),
"os-ssd": str(parsed_os_ssd),
"hdd": parsed_hdd,
}
def validation(self):
if self.realm.value != "ungleich-admin":
self.add_error(
"Invalid Credentials/Insufficient Permission"
)
# VM Operations
class CreateVMSchema(OTPSchema):
def __init__(self, data):
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
self.vm_name = Field(
"vm_name", str, data.get("vm_name", KeyError)
)
self.image = Field("image", str, data.get("image", KeyError))
self.network = Field(
"network", list, data.get("network", KeyError)
)
# Validation
self.image.validation = self.image_validation
self.vm_name.validation = self.vm_name_validation
self.specs.validation = self.specs_validation
self.network.validation = self.network_validation
fields = [self.vm_name, self.image, self.specs, self.network]
super().__init__(data=data, fields=fields)
def image_validation(self):
try:
image_uuid = helper.resolve_image_name(
self.image.value, shared.etcd_client
)
except Exception as e:
logger.exception(
"Cannot resolve image name = %s", self.image.value
)
self.add_error(str(e))
else:
self.image_uuid = image_uuid
def vm_name_validation(self):
if resolve_vm_name(
name=self.vm_name.value, owner=self.name.value
):
self.add_error(
'VM with same name "{}" already exists'.format(
self.vm_name.value
)
)
def network_validation(self):
_network = self.network.value
if _network:
for net in _network:
network = shared.etcd_client.get(
os.path.join(
shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
self.name.value,
net,
),
value_in_json=True,
)
if not network:
self.add_error(
"Network with name {} does not exists".format(
net
)
)
def specs_validation(self):
ALLOWED_BASE = 10
_cpu = self.specs.value.get("cpu", KeyError)
_ram = self.specs.value.get("ram", KeyError)
_os_ssd = self.specs.value.get("os-ssd", KeyError)
_hdd = self.specs.value.get("hdd", KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
self.add_error(
"You must specify CPU, RAM and OS-SSD in your specs"
)
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
self.add_error(
"Your specified RAM is not in correct units"
)
if parsed_os_ssd.base != ALLOWED_BASE:
self.add_error(
"Your specified OS-SSD is not in correct units"
)
if int(_cpu) < 1:
self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1):
self.add_error("RAM must be atleast 1 GB")
if parsed_os_ssd < bitmath.GB(1):
self.add_error("OS-SSD must be atleast 1 GB")
parsed_hdd = []
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
self.add_error(
"Your specified HDD is not in correct units"
)
break
else:
parsed_hdd.append(str(_parsed_hdd))
except ValueError:
# TODO: Find some good error message
self.add_error("Specs are not correct.")
else:
if self.get_errors():
self.specs = {
"cpu": _cpu,
"ram": str(parsed_ram),
"os-ssd": str(parsed_os_ssd),
"hdd": parsed_hdd,
}
class VMStatusSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
resolve_vm_name(
name=data.get("vm_name", None),
owner=(
data.get("in_support_of", None)
or data.get("name", None)
),
)
or KeyError
)
self.uuid = VmUUIDField(data)
fields = [self.uuid]
super().__init__(data, fields)
def validation(self):
vm = shared.vm_pool.get(self.uuid.value)
if not (
vm.value["owner"] == self.name.value
or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
class VmActionSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
resolve_vm_name(
name=data.get("vm_name", None),
owner=(
data.get("in_support_of", None)
or data.get("name", None)
),
)
or KeyError
)
self.uuid = VmUUIDField(data)
self.action = Field("action", str, data.get("action", KeyError))
self.action.validation = self.action_validation
_fields = [self.uuid, self.action]
super().__init__(data=data, fields=_fields)
def action_validation(self):
allowed_actions = ["start", "stop", "delete"]
if self.action.value not in allowed_actions:
self.add_error(
"Invalid Action. Allowed Actions are {}".format(
allowed_actions
)
)
def validation(self):
vm = shared.vm_pool.get(self.uuid.value)
if not (
vm.value["owner"] == self.name.value
or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if (
self.action.value == "start"
and vm.status == VMStatus.running
and vm.hostname != ""
):
self.add_error("VM Already Running")
if self.action.value == "stop":
if vm.status == VMStatus.stopped:
self.add_error("VM Already Stopped")
elif vm.status != VMStatus.running:
self.add_error("Cannot stop non-running VM")
class VmMigrationSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
resolve_vm_name(
name=data.get("vm_name", None),
owner=(
data.get("in_support_of", None)
or data.get("name", None)
),
)
or KeyError
)
self.uuid = VmUUIDField(data)
self.destination = Field(
"destination", str, data.get("destination", KeyError)
)
self.destination.validation = self.destination_validation
fields = [self.destination]
super().__init__(data=data, fields=fields)
def destination_validation(self):
hostname = self.destination.value
host = next(
filter(
lambda h: h.hostname == hostname, shared.host_pool.hosts
),
None,
)
if not host:
self.add_error(
"No Such Host ({}) exists".format(
self.destination.value
)
)
elif host.status != HostStatus.alive:
self.add_error("Destination Host is dead")
else:
self.destination.value = host.key
def validation(self):
vm = shared.vm_pool.get(self.uuid.value)
if not (
vm.value["owner"] == self.name.value
or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if vm.status != VMStatus.running:
self.add_error("Can't migrate non-running VM")
if vm.hostname == os.path.join(
shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
):
self.add_error(
"Destination host couldn't be same as Source Host"
)
class AddSSHSchema(OTPSchema):
def __init__(self, data):
self.key_name = Field(
"key_name", str, data.get("key_name", KeyError)
)
self.key = Field("key", str, data.get("key_name", KeyError))
fields = [self.key_name, self.key]
super().__init__(data=data, fields=fields)
class RemoveSSHSchema(OTPSchema):
def __init__(self, data):
self.key_name = Field(
"key_name", str, data.get("key_name", KeyError)
)
fields = [self.key_name]
super().__init__(data=data, fields=fields)
class GetSSHSchema(OTPSchema):
def __init__(self, data):
self.key_name = Field(
"key_name", str, data.get("key_name", None)
)
fields = [self.key_name]
super().__init__(data=data, fields=fields)
class CreateNetwork(OTPSchema):
def __init__(self, data):
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
self.type = Field("type", str, data.get("type", KeyError))
self.user = Field("user", bool, bool(data.get("user", False)))
self.network_name.validation = self.network_name_validation
self.type.validation = self.network_type_validation
fields = [self.network_name, self.type, self.user]
super().__init__(data, fields=fields)
def network_name_validation(self):
key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
network = shared.etcd_client.get(key, value_in_json=True)
if network:
self.add_error(
"Network with name {} already exists".format(
self.network_name.value
)
)
def network_type_validation(self):
supported_network_types = ["vxlan"]
if self.type.value not in supported_network_types:
self.add_error(
"Unsupported Network Type. Supported network types are {}".format(
supported_network_types
)
)

View file

@ -0,0 +1,46 @@
import requests
import json
import argparse
import binascii
from pyotp import TOTP
from os.path import join as join_path
from uncloud.common.shared import shared
def get_otp_parser():
otp_parser = argparse.ArgumentParser('otp')
otp_parser.add_argument('--name')
otp_parser.add_argument('--realm')
otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
return otp_parser
def load_dump_pretty(content):
if isinstance(content, bytes):
content = content.decode('utf-8')
parsed = json.loads(content)
return json.dumps(parsed, indent=4, sort_keys=True)
def make_request(*args, data=None, request_method=requests.post):
try:
r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
except requests.exceptions.RequestException:
print('Error occurred while connecting to API server.')
else:
try:
print(load_dump_pretty(r.content))
except Exception:
print('Error occurred while getting output from api server.')
def get_token(seed):
if seed is not None:
try:
token = TOTP(seed).now()
except binascii.Error:
raise argparse.ArgumentTypeError('Invalid seed')
else:
return token

View file

@ -0,0 +1,45 @@
import requests
from uncloud.cli.helper import make_request, get_otp_parser
from uncloud.common.parser import BaseParser
class HostParser(BaseParser):
def __init__(self):
super().__init__('host')
def create(self, **kwargs):
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
p.add_argument('--hostname', required=True)
p.add_argument('--cpu', required=True, type=int)
p.add_argument('--ram', required=True)
p.add_argument('--os-ssd', required=True)
p.add_argument('--hdd', default=list())
def list(self, **kwargs):
self.subparser.add_parser('list', **kwargs)
parser = HostParser()
arg_parser = parser.arg_parser
def main(**kwargs):
subcommand = kwargs.pop('host_subcommand')
if not subcommand:
arg_parser.print_help()
else:
request_method = requests.post
data = None
if subcommand == 'create':
kwargs['specs'] = {
'cpu': kwargs.pop('cpu'),
'ram': kwargs.pop('ram'),
'os-ssd': kwargs.pop('os_ssd'),
'hdd': kwargs.pop('hdd')
}
data = kwargs
elif subcommand == 'list':
request_method = requests.get
make_request('host', subcommand, data=data, request_method=request_method)

View file

@ -0,0 +1,38 @@
import requests
from uncloud.cli.helper import make_request
from uncloud.common.parser import BaseParser
class ImageParser(BaseParser):
def __init__(self):
super().__init__('image')
def create(self, **kwargs):
p = self.subparser.add_parser('create', **kwargs)
p.add_argument('--name', required=True)
p.add_argument('--uuid', required=True)
p.add_argument('--image-store', required=True, dest='image_store')
def list(self, **kwargs):
self.subparser.add_parser('list', **kwargs)
parser = ImageParser()
arg_parser = parser.arg_parser
def main(**kwargs):
subcommand = kwargs.pop('image_subcommand')
if not subcommand:
arg_parser.print_help()
else:
data = None
request_method = requests.post
if subcommand == 'list':
subcommand = 'list-public'
request_method = requests.get
elif subcommand == 'create':
data = kwargs
make_request('image', subcommand, data=data, request_method=request_method)

View file

@ -0,0 +1,23 @@
#!/usr/bin/env python3
import argparse
import importlib
arg_parser = argparse.ArgumentParser('cli', add_help=False)
subparser = arg_parser.add_subparsers(dest='subcommand')
for component in ['user', 'host', 'image', 'network', 'vm']:
module = importlib.import_module('uncloud.cli.{}'.format(component))
parser = getattr(module, 'arg_parser')
subparser.add_parser(name=parser.prog, parents=[parser])
def main(arguments):
if not arguments['subcommand']:
arg_parser.print_help()
else:
name = arguments.pop('subcommand')
arguments.pop('debug')
mod = importlib.import_module('uncloud.cli.{}'.format(name))
_main = getattr(mod, 'main')
_main(**arguments)

View file

@ -0,0 +1,32 @@
import requests
from uncloud.cli.helper import make_request, get_otp_parser
from uncloud.common.parser import BaseParser
class NetworkParser(BaseParser):
def __init__(self):
super().__init__('network')
def create(self, **kwargs):
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
p.add_argument('--network-name', required=True)
p.add_argument('--network-type', required=True, dest='type')
p.add_argument('--user', action='store_true')
parser = NetworkParser()
arg_parser = parser.arg_parser
def main(**kwargs):
subcommand = kwargs.pop('network_subcommand')
if not subcommand:
arg_parser.print_help()
else:
data = None
request_method = requests.post
if subcommand == 'create':
data = kwargs
make_request('network', subcommand, data=data, request_method=request_method)

View file

@ -0,0 +1,41 @@
from uncloud.cli.helper import make_request, get_otp_parser
from uncloud.common.parser import BaseParser
class UserParser(BaseParser):
def __init__(self):
super().__init__('user')
def files(self, **kwargs):
self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
def vms(self, **kwargs):
self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
def networks(self, **kwargs):
self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
def add_ssh(self, **kwargs):
p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
p.add_argument('--key-name', required=True)
p.add_argument('--key', required=True)
def get_ssh(self, **kwargs):
p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
p.add_argument('--key-name', default='')
def remove_ssh(self, **kwargs):
p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
p.add_argument('--key-name', required=True)
parser = UserParser()
arg_parser = parser.arg_parser
def main(**kwargs):
subcommand = kwargs.pop('user_subcommand')
if not subcommand:
arg_parser.print_help()
else:
make_request('user', subcommand, data=kwargs)

View file

@ -0,0 +1,62 @@
from uncloud.common.parser import BaseParser
from uncloud.cli.helper import make_request, get_otp_parser
class VMParser(BaseParser):
def __init__(self):
super().__init__('vm')
def start(self, **args):
p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
p.add_argument('--vm-name', required=True)
def stop(self, **args):
p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
p.add_argument('--vm-name', required=True)
def status(self, **args):
p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
p.add_argument('--vm-name', required=True)
def delete(self, **args):
p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
p.add_argument('--vm-name', required=True)
def migrate(self, **args):
p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
p.add_argument('--vm-name', required=True)
p.add_argument('--destination', required=True)
def create(self, **args):
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
p.add_argument('--cpu', required=True)
p.add_argument('--ram', required=True)
p.add_argument('--os-ssd', required=True)
p.add_argument('--hdd', action='append', default=list())
p.add_argument('--image', required=True)
p.add_argument('--network', action='append', default=[])
p.add_argument('--vm-name', required=True)
parser = VMParser()
arg_parser = parser.arg_parser
def main(**kwargs):
subcommand = kwargs.pop('vm_subcommand')
if not subcommand:
arg_parser.print_help()
else:
data = kwargs
endpoint = subcommand
if subcommand in ['start', 'stop', 'delete']:
endpoint = 'action'
data['action'] = subcommand
elif subcommand == 'create':
kwargs['specs'] = {
'cpu': kwargs.pop('cpu'),
'ram': kwargs.pop('ram'),
'os-ssd': kwargs.pop('os_ssd'),
'hdd': kwargs.pop('hdd')
}
make_request('vm', endpoint, data=data)

View file

@ -0,0 +1,23 @@
import argparse
import etcd3
from uncloud.common.etcd_wrapper import Etcd3Wrapper
arg_parser = argparse.ArgumentParser('client', add_help=False)
arg_parser.add_argument('--dump-etcd-contents-prefix', help="Dump contents below the given prefix")
def dump_etcd_contents(prefix):
etcd = Etcd3Wrapper()
for k,v in etcd.get_prefix_raw(prefix):
k = k.decode('utf-8')
v = v.decode('utf-8')
print("{} = {}".format(k,v))
# print("{} = {}".format(k,v))
# for k,v in etcd.get_prefix(prefix):
#
print("done")
def main(arguments):
if 'dump_etcd_contents_prefix' in arguments:
dump_etcd_contents(prefix=arguments['dump_etcd_contents_prefix'])

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,26 @@
from .etcd_wrapper import EtcdEntry
class SpecificEtcdEntryBase:
def __init__(self, e: EtcdEntry):
self.key = e.key
for k in e.value.keys():
self.__setattr__(k, e.value[k])
def original_keys(self):
r = dict(self.__dict__)
if "key" in r:
del r["key"]
return r
@property
def value(self):
return self.original_keys()
@value.setter
def value(self, v):
self.__dict__ = v
def __repr__(self):
return str(dict(self.__dict__))

View file

@ -0,0 +1,26 @@
from uncloud.common.shared import shared
from pyotp import TOTP
def get_token(seed):
if seed is not None:
try:
token = TOTP(seed).now()
except Exception:
raise Exception('Invalid seed')
else:
return token
def resolve_otp_credentials(kwargs):
d = {
'name': shared.settings['client']['name'],
'realm': shared.settings['client']['realm'],
'token': get_token(shared.settings['client']['seed'])
}
for k, v in d.items():
if k in kwargs and kwargs[k] is None:
kwargs.update({k: v})
return d

View file

@ -0,0 +1,21 @@
from .etcd_wrapper import Etcd3Wrapper
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
kv = etcd_client.get(key)
if kv:
counter = int(kv.value)
counter = counter + 1
else:
counter = 1
etcd_client.put(key, str(counter))
return counter
def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
kv = etcd_client.get(key)
if kv:
return int(kv.value)
return None

View file

@ -0,0 +1,75 @@
import etcd3
import json
from functools import wraps
from uncloud import UncloudException
from uncloud.common import logger
class EtcdEntry:
def __init__(self, meta_or_key, value, value_in_json=False):
if hasattr(meta_or_key, 'key'):
# if meta has attr 'key' then get it
self.key = meta_or_key.key.decode('utf-8')
else:
# otherwise meta is the 'key'
self.key = meta_or_key
self.value = value.decode('utf-8')
if value_in_json:
self.value = json.loads(self.value)
def readable_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except etcd3.exceptions.ConnectionFailedError:
raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
except etcd3.exceptions.ConnectionTimeoutError as err:
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
except Exception:
logger.exception('Some etcd error occured. See syslog for details.')
return wrapper
class Etcd3Wrapper:
@readable_errors
def __init__(self, *args, **kwargs):
self.client = etcd3.client(*args, **kwargs)
@readable_errors
def get(self, *args, value_in_json=False, **kwargs):
_value, _key = self.client.get(*args, **kwargs)
if _key is None or _value is None:
return None
return EtcdEntry(_key, _value, value_in_json=value_in_json)
@readable_errors
def put(self, *args, value_in_json=False, **kwargs):
_key, _value = args
if value_in_json:
_value = json.dumps(_value)
if not isinstance(_key, str):
_key = _key.decode('utf-8')
return self.client.put(_key, _value, **kwargs)
@readable_errors
def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
event_iterator = self.client.get_prefix(*args, **kwargs)
for e in event_iterator:
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
@readable_errors
def watch_prefix(self, key, raise_exception=True, value_in_json=False):
event_iterator, cancel = self.client.watch_prefix(key)
for e in event_iterator:
if hasattr(e, '_event'):
e = e._event
if e.type == e.PUT:
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)

View file

@ -0,0 +1,69 @@
import time
from datetime import datetime
from os.path import join
from typing import List
from .classes import SpecificEtcdEntryBase
class HostStatus:
"""Possible Statuses of uncloud host."""
alive = "ALIVE"
dead = "DEAD"
class HostEntry(SpecificEtcdEntryBase):
"""Represents Host Entry Structure and its supporting methods."""
def __init__(self, e):
self.specs = None # type: dict
self.hostname = None # type: str
self.status = None # type: str
self.last_heartbeat = None # type: str
super().__init__(e)
def update_heartbeat(self):
self.status = HostStatus.alive
self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
def is_alive(self):
last_heartbeat = datetime.strptime(
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
)
delta = datetime.utcnow() - last_heartbeat
if delta.total_seconds() > 60:
return False
return True
def declare_dead(self):
self.status = HostStatus.dead
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
class HostPool:
def __init__(self, etcd_client, host_prefix):
self.client = etcd_client
self.prefix = host_prefix
@property
def hosts(self) -> List[HostEntry]:
_hosts = self.client.get_prefix(self.prefix, value_in_json=True)
return [HostEntry(host) for host in _hosts]
def get(self, key):
if not key.startswith(self.prefix):
key = join(self.prefix, key)
v = self.client.get(key, value_in_json=True)
if v:
return HostEntry(v)
return None
def put(self, obj: HostEntry):
self.client.put(obj.key, obj.value, value_in_json=True)
def by_status(self, status, _hosts=None):
if _hosts is None:
_hosts = self.hosts
return list(filter(lambda x: x.status == status, _hosts))

View file

@ -0,0 +1,70 @@
import subprocess as sp
import random
import logging
logger = logging.getLogger(__name__)
def random_bytes(num=6):
return [random.randrange(256) for _ in range(num)]
def generate_mac(
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
):
mac = random_bytes()
if oui:
if type(oui) == str:
oui = [int(chunk) for chunk in oui.split(separator)]
mac = oui + random_bytes(num=6 - len(oui))
else:
if multicast:
mac[0] |= 1 # set bit 0
else:
mac[0] &= ~1 # clear bit 0
if uaa:
mac[0] &= ~(1 << 1) # clear bit 1
else:
mac[0] |= 1 << 1 # set bit 1
return separator.join(byte_fmt % b for b in mac)
def create_dev(script, _id, dev, ip=None):
command = [
"sudo",
"-p",
"Enter password to create network devices for vm: ",
script,
str(_id),
dev,
]
if ip:
command.append(ip)
try:
output = sp.check_output(command, stderr=sp.PIPE)
except Exception:
logger.exception("Creation of interface %s failed.", dev)
return None
else:
return output.decode("utf-8").strip()
def delete_network_interface(iface):
try:
sp.check_output(
[
"sudo",
"-p",
"Enter password to remove {} network device: ".format(
iface
),
"ip",
"link",
"del",
iface,
],
stderr=sp.PIPE,
)
except Exception:
logger.exception("Interface %s Deletion failed", iface)

View file

@ -0,0 +1,13 @@
import argparse
class BaseParser:
def __init__(self, command):
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
self.common_args = {'add_help': False}
methods = [attr for attr in dir(self) if not attr.startswith('__')
and type(getattr(self, attr)).__name__ == 'method']
for method in methods:
getattr(self, method)(**self.common_args)

View file

@ -0,0 +1,46 @@
import json
from os.path import join
from uuid import uuid4
from uncloud.common.etcd_wrapper import EtcdEntry
from uncloud.common.classes import SpecificEtcdEntryBase
class RequestType:
CreateVM = "CreateVM"
ScheduleVM = "ScheduleVM"
StartVM = "StartVM"
StopVM = "StopVM"
InitVMMigration = "InitVMMigration"
TransferVM = "TransferVM"
DeleteVM = "DeleteVM"
class RequestEntry(SpecificEtcdEntryBase):
def __init__(self, e):
self.destination_sock_path = None
self.destination_host_key = None
self.type = None # type: str
self.migration = None # type: bool
self.destination = None # type: str
self.uuid = None # type: str
self.hostname = None # type: str
super().__init__(e)
@classmethod
def from_scratch(cls, request_prefix, **kwargs):
e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
return cls(e)
class RequestPool:
def __init__(self, etcd_client, request_prefix):
self.client = etcd_client
self.prefix = request_prefix
def put(self, obj: RequestEntry):
if not obj.key.startswith(self.prefix):
obj.key = join(self.prefix, obj.key)
self.client.put(obj.key, obj.value, value_in_json=True)

View file

@ -0,0 +1,41 @@
import bitmath
from marshmallow import fields, Schema
class StorageUnit(fields.Field):
def _serialize(self, value, attr, obj, **kwargs):
return str(value)
def _deserialize(self, value, attr, data, **kwargs):
return bitmath.parse_string_unsafe(value)
class SpecsSchema(Schema):
cpu = fields.Int()
ram = StorageUnit()
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
hdd = fields.List(StorageUnit())
class VMSchema(Schema):
name = fields.Str()
owner = fields.Str()
owner_realm = fields.Str()
specs = fields.Nested(SpecsSchema)
status = fields.Str()
log = fields.List(fields.Str())
vnc_socket = fields.Str()
image_uuid = fields.Str()
hostname = fields.Str()
metadata = fields.Dict()
network = fields.List(
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
)
in_migration = fields.Bool()
class NetworkSchema(Schema):
_id = fields.Int(data_key="id", attribute="id")
_type = fields.Str(data_key="type", attribute="type")
ipv6 = fields.Str()

View file

@ -0,0 +1,136 @@
import configparser
import logging
import sys
import os
from datetime import datetime
from uncloud.common.etcd_wrapper import Etcd3Wrapper
from os.path import join as join_path
logger = logging.getLogger(__name__)
settings = None
class CustomConfigParser(configparser.RawConfigParser):
def __getitem__(self, key):
try:
result = super().__getitem__(key)
except KeyError as err:
raise KeyError(
'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
key
)
) from err
else:
return result
class Settings(object):
def __init__(self, conf_dir, seed_value=None):
conf_name = 'uncloud.conf'
self.config_file = join_path(conf_dir, conf_name)
# this is used to cache config from etcd for 1 minutes. Without this we
# would make a lot of requests to etcd which slows down everything.
self.last_config_update = datetime.fromtimestamp(0)
self.config_parser = CustomConfigParser(allow_no_value=True)
self.config_parser.add_section('etcd')
self.config_parser.set('etcd', 'base_prefix', '/')
if os.access(self.config_file, os.R_OK):
self.config_parser.read(self.config_file)
else:
raise FileNotFoundError('Config file %s not found!', self.config_file)
self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
self.read_internal_values()
if seed_value is None:
seed_value = dict()
self.config_parser.read_dict(seed_value)
def get_etcd_client(self):
args = tuple()
try:
kwargs = {
'host': self.config_parser.get('etcd', 'url'),
'port': self.config_parser.get('etcd', 'port'),
'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
'cert_key': self.config_parser.get('etcd', 'cert_key'),
}
except configparser.Error as err:
raise configparser.Error(
'{} in config file {}'.format(
err.message, self.config_file
)
) from err
else:
try:
wrapper = Etcd3Wrapper(*args, **kwargs)
except Exception as err:
logger.error(
'etcd connection not successfull. Please check your config file.'
'\nDetails: %s\netcd connection parameters: %s',
err,
kwargs,
)
sys.exit(1)
else:
return wrapper
def read_internal_values(self):
base_prefix = self['etcd']['base_prefix']
self.config_parser.read_dict(
{
'etcd': {
'file_prefix': join_path(base_prefix, 'files/'),
'host_prefix': join_path(base_prefix, 'hosts/'),
'image_prefix': join_path(base_prefix, 'images/'),
'image_store_prefix': join_path(base_prefix, 'imagestore/'),
'network_prefix': join_path(base_prefix, 'networks/'),
'request_prefix': join_path(base_prefix, 'requests/'),
'user_prefix': join_path(base_prefix, 'users/'),
'vm_prefix': join_path(base_prefix, 'vms/'),
'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
'tap_counter': join_path(base_prefix, 'counters/tap')
}
}
)
def read_config_file_values(self, config_file):
try:
# Trying to read configuration file
with open(config_file) as config_file_handle:
self.config_parser.read_file(config_file_handle)
except FileNotFoundError:
sys.exit('Configuration file {} not found!'.format(config_file))
except Exception as err:
logger.exception(err)
sys.exit('Error occurred while reading configuration file')
def read_values_from_etcd(self):
etcd_client = self.get_etcd_client()
if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
if config_from_etcd:
self.config_parser.read_dict(config_from_etcd.value)
self.last_config_update = datetime.utcnow()
else:
raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
def __getitem__(self, key):
# Allow failing to read from etcd if we have
# it locally
if key not in self.config_parser.sections():
try:
self.read_values_from_etcd()
except KeyError:
pass
return self.config_parser[key]
def get_settings():
return settings

View file

@ -0,0 +1,34 @@
from uncloud.common.settings import get_settings
from uncloud.common.vm import VmPool
from uncloud.common.host import HostPool
from uncloud.common.request import RequestPool
import uncloud.common.storage_handlers as storage_handlers
class Shared:
@property
def settings(self):
return get_settings()
@property
def etcd_client(self):
return self.settings.get_etcd_client()
@property
def host_pool(self):
return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
@property
def vm_pool(self):
return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
@property
def request_pool(self):
return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
@property
def storage_handler(self):
return storage_handlers.get_storage_handler()
shared = Shared()

View file

@ -0,0 +1,207 @@
import shutil
import subprocess as sp
import os
import stat
from abc import ABC
from . import logger
from os.path import join as join_path
import uncloud.common.shared as shared
class ImageStorageHandler(ABC):
handler_name = "base"
def __init__(self, image_base, vm_base):
self.image_base = image_base
self.vm_base = vm_base
def import_image(self, image_src, image_dest, protect=False):
"""Put an image at the destination
:param image_src: An Image file
:param image_dest: A path where :param src: is to be put.
:param protect: If protect is true then the dest is protect (readonly etc)
The obj must exist on filesystem.
"""
raise NotImplementedError()
def make_vm_image(self, image_path, path):
"""Copy image from src to dest
:param image_path: A path
:param path: A path
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
"""
raise NotImplementedError()
def resize_vm_image(self, path, size):
"""Resize image located at :param path:
:param path: The file which is to be resized
:param size: Size must be in Megabytes
"""
raise NotImplementedError()
def delete_vm_image(self, path):
raise NotImplementedError()
def execute_command(self, command, report=True, error_origin=None):
if not error_origin:
error_origin = self.handler_name
command = list(map(str, command))
try:
sp.check_output(command, stderr=sp.PIPE)
except sp.CalledProcessError as e:
_stderr = e.stderr.decode("utf-8").strip()
if report:
logger.exception("%s:- %s", error_origin, _stderr)
return False
return True
def vm_path_string(self, path):
raise NotImplementedError()
def qemu_path_string(self, path):
raise NotImplementedError()
def is_vm_image_exists(self, path):
raise NotImplementedError()
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
handler_name = "Filesystem"
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
try:
shutil.copy(src, dest)
if protect:
os.chmod(
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
)
except Exception as e:
logger.exception(e)
return False
return True
def make_vm_image(self, src, dest):
src = join_path(self.image_base, src)
dest = join_path(self.vm_base, dest)
try:
shutil.copyfile(src, dest)
except Exception as e:
logger.exception(e)
return False
return True
def resize_vm_image(self, path, size):
path = join_path(self.vm_base, path)
command = [
"qemu-img",
"resize",
"-f",
"raw",
path,
"{}M".format(size),
]
if self.execute_command(command):
return True
else:
self.delete_vm_image(path)
return False
def delete_vm_image(self, path):
path = join_path(self.vm_base, path)
try:
os.remove(path)
except Exception as e:
logger.exception(e)
return False
return True
def vm_path_string(self, path):
return join_path(self.vm_base, path)
def qemu_path_string(self, path):
return self.vm_path_string(path)
def is_vm_image_exists(self, path):
path = join_path(self.vm_base, path)
command = ["ls", path]
return self.execute_command(command, report=False)
class CEPHBasedImageStorageHandler(ImageStorageHandler):
handler_name = "Ceph"
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
import_command = ["rbd", "import", src, dest]
commands = [import_command]
if protect:
snap_create_command = [
"rbd",
"snap",
"create",
"{}@protected".format(dest),
]
snap_protect_command = [
"rbd",
"snap",
"protect",
"{}@protected".format(dest),
]
commands.append(snap_create_command)
commands.append(snap_protect_command)
result = True
for command in commands:
result = result and self.execute_command(command)
return result
def make_vm_image(self, src, dest):
src = join_path(self.image_base, src)
dest = join_path(self.vm_base, dest)
command = ["rbd", "clone", "{}@protected".format(src), dest]
return self.execute_command(command)
def resize_vm_image(self, path, size):
path = join_path(self.vm_base, path)
command = ["rbd", "resize", path, "--size", size]
return self.execute_command(command)
def delete_vm_image(self, path):
path = join_path(self.vm_base, path)
command = ["rbd", "rm", path]
return self.execute_command(command)
def vm_path_string(self, path):
return join_path(self.vm_base, path)
def qemu_path_string(self, path):
return "rbd:{}".format(self.vm_path_string(path))
def is_vm_image_exists(self, path):
path = join_path(self.vm_base, path)
command = ["rbd", "info", path]
return self.execute_command(command, report=False)
def get_storage_handler():
__storage_backend = shared.shared.settings["storage"]["storage_backend"]
if __storage_backend == "filesystem":
return FileSystemBasedImageStorageHandler(
vm_base=shared.shared.settings["storage"]["vm_dir"],
image_base=shared.shared.settings["storage"]["image_dir"],
)
elif __storage_backend == "ceph":
return CEPHBasedImageStorageHandler(
vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
image_base=shared.shared.settings["storage"]["ceph_image_pool"],
)
else:
raise Exception("Unknown Image Storage Handler")

View file

@ -0,0 +1,102 @@
from contextlib import contextmanager
from datetime import datetime
from os.path import join
from .classes import SpecificEtcdEntryBase
class VMStatus:
stopped = "STOPPED" # After requested_shutdown
killed = "KILLED" # either host died or vm died itself
running = "RUNNING"
error = "ERROR" # An error occurred that cannot be resolved automatically
def declare_stopped(vm):
vm["hostname"] = ""
vm["in_migration"] = False
vm["status"] = VMStatus.stopped
class VMEntry(SpecificEtcdEntryBase):
def __init__(self, e):
self.owner = None # type: str
self.specs = None # type: dict
self.hostname = None # type: str
self.status = None # type: str
self.image_uuid = None # type: str
self.log = None # type: list
self.in_migration = None # type: bool
super().__init__(e)
@property
def uuid(self):
return self.key.split("/")[-1]
def declare_killed(self):
self.hostname = ""
self.in_migration = False
if self.status == VMStatus.running:
self.status = VMStatus.killed
def declare_stopped(self):
self.hostname = ""
self.in_migration = False
self.status = VMStatus.stopped
def add_log(self, msg):
self.log = self.log[:5]
self.log.append(
"{} - {}".format(datetime.now().isoformat(), msg)
)
class VmPool:
def __init__(self, etcd_client, vm_prefix):
self.client = etcd_client
self.prefix = vm_prefix
@property
def vms(self):
_vms = self.client.get_prefix(self.prefix, value_in_json=True)
return [VMEntry(vm) for vm in _vms]
def by_host(self, host, _vms=None):
if _vms is None:
_vms = self.vms
return list(filter(lambda x: x.hostname == host, _vms))
def by_status(self, status, _vms=None):
if _vms is None:
_vms = self.vms
return list(filter(lambda x: x.status == status, _vms))
def by_owner(self, owner, _vms=None):
if _vms is None:
_vms = self.vms
return list(filter(lambda x: x.owner == owner, _vms))
def except_status(self, status, _vms=None):
if _vms is None:
_vms = self.vms
return list(filter(lambda x: x.status != status, _vms))
def get(self, key):
if not key.startswith(self.prefix):
key = join(self.prefix, key)
v = self.client.get(key, value_in_json=True)
if v:
return VMEntry(v)
return None
def put(self, obj: VMEntry):
self.client.put(obj.key, obj.value, value_in_json=True)
@contextmanager
def get_put(self, key) -> VMEntry:
# Updates object at key on exit
obj = self.get(key)
yield obj
if obj:
self.put(obj)

View file

@ -0,0 +1,57 @@
import os
import argparse
from uncloud.common.shared import shared
arg_parser = argparse.ArgumentParser('configure', add_help=False)
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
otp_parser = configure_subparsers.add_parser('otp')
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
network_parser = configure_subparsers.add_parser('network')
network_parser.add_argument('--prefix-length', required=True, type=int)
network_parser.add_argument('--prefix', required=True)
network_parser.add_argument('--vxlan-phy-dev', required=True)
netbox_parser = configure_subparsers.add_parser('netbox')
netbox_parser.add_argument('--url', required=True)
netbox_parser.add_argument('--token', required=True)
ssh_parser = configure_subparsers.add_parser('ssh')
ssh_parser.add_argument('--username', default='root')
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
storage_parser = configure_subparsers.add_parser('storage')
storage_parser.add_argument('--file-dir', required=True)
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
filesystem_storage_parser.add_argument('--vm-dir', required=True)
filesystem_storage_parser.add_argument('--image-dir', required=True)
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
def update_config(section, kwargs):
uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
if not uncloud_config:
uncloud_config = {}
else:
uncloud_config = uncloud_config.value
uncloud_config[section] = kwargs
shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
def main(arguments):
subcommand = arguments['subcommand']
if not subcommand:
arg_parser.print_help()
else:
update_config(subcommand, arguments)

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,85 @@
import glob
import os
import pathlib
import subprocess as sp
import time
import argparse
import bitmath
from uuid import uuid4
from . import logger
from uncloud.common.shared import shared
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
arg_parser.add_argument('--hostname', required=True)
def sha512sum(file: str):
"""Use sha512sum utility to compute sha512 sum of arg:file
IF arg:file does not exists:
raise FileNotFoundError exception
ELSE IF sum successfully computer:
return computed sha512 sum
ELSE:
return None
"""
if not isinstance(file, str):
raise TypeError
try:
output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
except sp.CalledProcessError as e:
error = e.stderr.decode('utf-8')
if 'No such file or directory' in error:
raise FileNotFoundError from None
else:
output = output.decode('utf-8').strip()
output = output.split(' ')
return output[0]
return None
def track_file(file, base_dir, host):
file_path = file.relative_to(base_dir)
file_str = str(file)
# Get Username
try:
owner = file_path.parts[0]
except IndexError:
pass
else:
file_path = file_path.relative_to(owner)
creation_date = time.ctime(os.stat(file_str).st_ctime)
entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
entry_value = {
'filename': str(file_path),
'owner': owner,
'sha512sum': sha512sum(file_str),
'creation_date': creation_date,
'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
'host': host
}
logger.info('Tracking %s', file_str)
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
def main(arguments):
hostname = arguments['hostname']
base_dir = shared.settings['storage']['file_dir']
# Recursively Get All Files and Folder below BASE_DIR
files = glob.glob('{}/**'.format(base_dir), recursive=True)
files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
# Files that are already tracked
tracked_files = [
pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
if f.value['host'] == hostname
]
untracked_files = set(files) - set(tracked_files)
for file in untracked_files:
track_file(file, base_dir, hostname)

View file

@ -0,0 +1,13 @@
This directory contains unfinishe hacks / inspirations
* firewalling / networking in ucloud
** automatically route a network per VM - /64?
** nft: one chain per VM on each vm host (?)
*** might have scaling issues?
** firewall rules on each VM host
- mac filtering:
* To add / block
** TODO arp poisoning
** TODO ndp "poisoning"
** TODO ipv4 dhcp server
*** drop dhcpv4 requests
*** drop dhcpv4 answers

View file

@ -0,0 +1 @@

View file

@ -0,0 +1 @@
HOSTNAME=server1.place10

View file

@ -0,0 +1,39 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
#
# This file is part of uncloud.
#
# uncloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# uncloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
#
#
class Config(object):
def __init__(self, arguments):
""" read arguments dicts as a base """
self.arguments = arguments
# Split them so *etcd_args can be used and we can
# iterate over etcd_hosts
self.etcd_hosts = [ arguments['etcd_host'] ]
self.etcd_args = {
'ca_cert': arguments['etcd_ca_cert'],
'cert_cert': arguments['etcd_cert_cert'],
'cert_key': arguments['etcd_cert_key'],
# 'user': None,
# 'password': None
}
self.etcd_prefix = '/nicohack/'

View file

@ -0,0 +1,149 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
#
# This file is part of uncloud.
#
# uncloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# uncloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
#
#
import etcd3
import json
import logging
import datetime
import re
from functools import wraps
from uncloud import UncloudException
log = logging.getLogger(__name__)
def db_logentry(message):
timestamp = datetime.datetime.now()
return {
"timestamp": str(timestamp),
"message": message
}
def readable_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except etcd3.exceptions.ConnectionFailedError as e:
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
except etcd3.exceptions.ConnectionTimeoutError as e:
raise UncloudException('etcd connection timeout. {}'.format(e))
return wrapper
class DB(object):
def __init__(self, config, prefix="/"):
self.config = config
# Root for everything
self.base_prefix= '/nicohack'
# Can be set from outside
self.prefix = prefix
try:
self.connect()
except FileNotFoundError as e:
raise UncloudException("Is the path to the etcd certs correct? {}".format(e))
@readable_errors
def connect(self):
self._db_clients = []
for endpoint in self.config.etcd_hosts:
client = etcd3.client(host=endpoint, **self.config.etcd_args)
self._db_clients.append(client)
def realkey(self, key):
return "{}{}/{}".format(self.base_prefix,
self.prefix,
key)
@readable_errors
def get(self, key, as_json=False, **kwargs):
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
if as_json:
value = json.loads(value)
return value
@readable_errors
def get_prefix(self, key, as_json=False, **kwargs):
for value, meta in self._db_clients[0].get_prefix(self.realkey(key), **kwargs):
k = meta.key.decode("utf-8")
value = value.decode("utf-8")
if as_json:
value = json.loads(value)
yield (k, value)
@readable_errors
def set(self, key, value, as_json=False, **kwargs):
if as_json:
value = json.dumps(value)
log.debug("Setting {} = {}".format(self.realkey(key), value))
# FIXME: iterate over clients in case of failure ?
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
@readable_errors
def list_and_filter(self, key, filter_key=None, filter_regexp=None):
for k,v in self.get_prefix(key, as_json=True):
if filter_key and filter_regexp:
if filter_key in v:
if re.match(filter_regexp, v[filter_key]):
yield v
else:
yield v
@readable_errors
def increment(self, key, **kwargs):
print(self.realkey(key))
print("prelock")
lock = self._db_clients[0].lock('/nicohack/foo')
print("prelockacq")
lock.acquire()
print("prelockrelease")
lock.release()
with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
print("in lock")
pass
# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
# self.set(self.realkey(key), str(value + 1), **kwargs)
if __name__ == '__main__':
endpoints = [ "https://etcd1.ungleich.ch:2379",
"https://etcd2.ungleich.ch:2379",
"https://etcd3.ungleich.ch:2379" ]
db = DB(url=endpoints)

View file

@ -0,0 +1,3 @@
*.iso
radvdpid
foo

View file

@ -0,0 +1 @@

View file

@ -0,0 +1,6 @@
#!/bin/sh
etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
--key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
--cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
--endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"

View file

@ -0,0 +1,3 @@
#!/bin/sh
echo $@

View file

@ -0,0 +1,7 @@
#!/bin/sh
dev=$1; shift
# bridge is setup from outside
ip link set dev "$dev" master ${bridge}
ip link set dev "$dev" up

View file

@ -0,0 +1 @@
000000000252

View file

@ -0,0 +1 @@
02:00

View file

@ -0,0 +1,29 @@
#!/bin/sh
set -x
netid=100
dev=wlp2s0
dev=wlp0s20f3
#dev=wlan0
ip=2a0a:e5c1:111:888::48/64
vxlandev=vxlan${netid}
bridgedev=br${netid}
ip -6 link add ${vxlandev} type vxlan \
id ${netid} \
dstport 4789 \
group ff05::${netid} \
dev ${dev} \
ttl 5
ip link set ${vxlandev} up
ip link add ${bridgedev} type bridge
ip link set ${bridgedev} up
ip link set ${vxlandev} master ${bridgedev} up
ip addr add ${ip} dev ${bridgedev}

View file

@ -0,0 +1,31 @@
flush ruleset
table bridge filter {
chain prerouting {
type filter hook prerouting priority 0;
policy accept;
ibrname br100 jump br100
}
chain br100 {
# Allow all incoming traffic from outside
iifname vxlan100 accept
# Default blocks: router advertisements, dhcpv6, dhcpv4
icmpv6 type nd-router-advert drop
ip6 version 6 udp sport 547 drop
ip version 4 udp sport 67 drop
jump br100_vmlist
drop
}
chain br100_vmlist {
# VM1
iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
# VM2
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
}
}

View file

@ -0,0 +1,13 @@
interface br100
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 5;
AdvDefaultLifetime 3600;
prefix 2a0a:e5c1:111:888::/64 {
};
RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
};

View file

@ -0,0 +1,3 @@
#!/bin/sh
radvd -C ./radvd.conf -n -p ./radvdpid

View file

@ -0,0 +1,29 @@
#!/bin/sh
# if [ $# -ne 1 ]; then
# echo "$0: owner"
# exit 1
# fi
qemu=/usr/bin/qemu-system-x86_64
accel=kvm
#accel=tcg
memory=1024
cores=2
uuid=$(uuidgen)
mac=$(./mac-gen.py)
owner=nico
export bridge=br100
set -x
$qemu -name "uncloud-${uuid}" \
-machine pc,accel=${accel} \
-m ${memory} \
-smp ${cores} \
-uuid ${uuid} \
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
-netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
-device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}

View file

@ -0,0 +1,75 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
#
# This file is part of uncloud.
#
# uncloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# uncloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
import uuid
from uncloud.hack.db import DB
from uncloud import UncloudException
class Host(object):
def __init__(self, config, db_entry=None):
self.config = config
self.db = DB(self.config, prefix="/hosts")
if db_entry:
self.db_entry = db_entry
def list_hosts(self, filter_key=None, filter_regexp=None):
""" Return list of all hosts """
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
yield self.__class__(self.config, db_entry=entry)
def cmdline_add_host(self):
""" FIXME: make this a bit smarter and less redundant """
for required_arg in [
'add_vm_host',
'max_cores_per_vm',
'max_cores_total',
'max_memory_in_gb' ]:
if not required_arg in self.config.arguments:
raise UncloudException("Missing argument: {}".format(required_arg))
return self.add_host(
self.config.arguments['add_vm_host'],
self.config.arguments['max_cores_per_vm'],
self.config.arguments['max_cores_total'],
self.config.arguments['max_memory_in_gb'])
def add_host(self,
hostname,
max_cores_per_vm,
max_cores_total,
max_memory_in_gb):
db_entry = {}
db_entry['uuid'] = str(uuid.uuid4())
db_entry['hostname'] = hostname
db_entry['max_cores_per_vm'] = max_cores_per_vm
db_entry['max_cores_total'] = max_cores_total
db_entry['max_memory_in_gb'] = max_memory_in_gb
db_entry["db_version"] = 1
db_entry["log"] = []
self.db.set(db_entry['uuid'], db_entry, as_json=True)
return self.__class__(self.config, db_entry)

View file

@ -0,0 +1,104 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2012 Nico Schottelius (nico-cinv at schottelius.org)
#
# This file is part of cinv.
#
# cinv is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cinv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cinv. If not, see <http://www.gnu.org/licenses/>.
#
#
import argparse
import logging
import os.path
import os
import re
import json
from uncloud import UncloudException
from uncloud.hack.db import DB
log = logging.getLogger(__name__)
class MAC(object):
def __init__(self, config):
self.config = config
self.no_db = self.config.arguments['no_db']
if not self.no_db:
self.db = DB(config, prefix="/mac")
self.prefix = 0x420000000000
self._number = 0 # Not set by default
@staticmethod
def validate_mac(mac):
if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
raise UncloudException("Not a valid mac address: %s" % mac)
else:
return True
def last_used_index(self):
if not self.no_db:
value = self.db.get("last_used_index")
if not value:
self.db.set("last_used_index", "0")
value = self.db.get("last_used_index")
else:
value = "0"
return int(value)
def last_used_mac(self):
return self.int_to_mac(self.prefix + self.last_used_index())
def to_colon_format(self):
b = self._number.to_bytes(6, byteorder="big")
return ':'.join(format(s, '02x') for s in b)
def to_str_format(self):
b = self._number.to_bytes(6, byteorder="big")
return ''.join(format(s, '02x') for s in b)
def create(self):
last_number = self.last_used_index()
if last_number == int('0xffffffff', 16):
raise UncloudException("Exhausted all possible mac addresses - try to free some")
next_number = last_number + 1
self._number = self.prefix + next_number
#next_number_string = "{:012x}".format(next_number)
#next_mac = self.int_to_mac(next_mac_number)
# db_entry = {}
# db_entry['vm_uuid'] = vmuuid
# db_entry['index'] = next_number
# db_entry['mac_address'] = next_mac
# should be one transaction
# self.db.increment("last_used_index")
# self.db.set("used/{}".format(next_mac),
# db_entry, as_json=True)
def __int__(self):
return self._number
def __repr__(self):
return self.to_str_format()
def __str__(self):
return self.to_colon_format()

View file

@ -0,0 +1,186 @@
import argparse
import logging
import re
import ldap3
from uncloud.hack.vm import VM
from uncloud.hack.host import Host
from uncloud.hack.config import Config
from uncloud.hack.mac import MAC
from uncloud.hack.net import VXLANBridge, DNSRA
from uncloud import UncloudException
from uncloud.hack.product import ProductOrder
arg_parser = argparse.ArgumentParser('hack', add_help=False)
#description="Commands that are unfinished - use at own risk")
arg_parser.add_argument('--last-used-mac', action='store_true')
arg_parser.add_argument('--get-new-mac', action='store_true')
arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
arg_parser.add_argument('--network', help="/64 IPv6 network")
arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
arg_parser.add_argument('--run-dns-ra', action='store_true',
help="Provide router advertisements and DNS resolution via dnsmasq")
arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
arg_parser.add_argument('--create-vm', action='store_true')
arg_parser.add_argument('--destroy-vm', action='store_true')
arg_parser.add_argument('--get-vm-status', action='store_true')
arg_parser.add_argument('--get-vm-vnc', action='store_true')
arg_parser.add_argument('--list-vms', action='store_true')
arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int, default=2)
arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int, default=1)
arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
arg_parser.add_argument('--image-format', help="Image format: qcow2 or raw", choices=['raw', 'qcow2'])
arg_parser.add_argument('--uuid', help="VM UUID")
arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
# order based commands => later to be shifted below "order"
arg_parser.add_argument('--order', action='store_true')
arg_parser.add_argument('--list-orders', help="List all orders", action='store_true')
arg_parser.add_argument('--filter-order-key', help="Which key to filter on")
arg_parser.add_argument('--filter-order-regexp', help="Which regexp the value should match")
arg_parser.add_argument('--process-orders', help="Process all (pending) orders", action='store_true')
arg_parser.add_argument('--product', choices=["dualstack-vm"])
arg_parser.add_argument('--os-image-name', help="Name of OS image (successor to --image)")
arg_parser.add_argument('--os-image-size', help="Size of OS image in GB", type=int, default=10)
arg_parser.add_argument('--username')
arg_parser.add_argument('--password')
arg_parser.add_argument('--api', help="Run the API")
arg_parser.add_argument('--mode',
choices=["direct", "api", "client"],
default="client",
help="Directly manipulate etcd, spawn the API server or behave as a client")
arg_parser.add_argument('--add-vm-host', help="Add a host that can run VMs")
arg_parser.add_argument('--list-vm-hosts', action='store_true')
arg_parser.add_argument('--max-cores-per-vm')
arg_parser.add_argument('--max-cores-total')
arg_parser.add_argument('--max-memory-in-gb')
log = logging.getLogger(__name__)
def authenticate(username, password, totp_token=None):
server = ldap3.Server("ldaps://ldap1.ungleich.ch")
dn = "uid={},ou=customer,dc=ungleich,dc=ch".format(username)
log.debug("LDAP: connecting to {} as {}".format(server, dn))
try:
conn = ldap3.Connection(server, dn, password, auto_bind=True)
except ldap3.core.exceptions.LDAPBindError as e:
raise UncloudException("Credentials not verified by LDAP server: {}".format(e))
def order(config):
for required_arg in [ 'product', 'username', 'password' ]:
if not config.arguments[required_arg]:
raise UncloudException("Missing required argument: {}".format(required_arg))
if config.arguments['product'] == 'dualstack-vm':
for required_arg in [ 'cores', 'memory', 'os_image_name', 'os_image_size' ]:
if not config.arguments[required_arg]:
raise UncloudException("Missing required argument: {}".format(required_arg))
log.debug(config.arguments)
authenticate(config.arguments['username'], config.arguments['password'])
# create DB entry for VM
vm = VM(config)
return vm.product.place_order(owner=config.arguments['username'])
def main(arguments):
config = Config(arguments)
if arguments['add_vm_host']:
h = Host(config)
h.cmdline_add_host()
if arguments['list_vm_hosts']:
h = Host(config)
for host in h.list_hosts(filter_key=arguments['filter_order_key'],
filter_regexp=arguments['filter_order_regexp']):
print("Host {}: {}".format(host.db_entry['uuid'], host.db_entry))
if arguments['order']:
print("Created order: {}".format(order(config)))
if arguments['list_orders']:
p = ProductOrder(config)
for product_order in p.list_orders(filter_key=arguments['filter_order_key'],
filter_regexp=arguments['filter_order_regexp']):
print("Order {}: {}".format(product_order.db_entry['uuid'], product_order.db_entry))
if arguments['process_orders']:
p = ProductOrder(config)
p.process_orders()
if arguments['create_vm']:
vm = VM(config)
vm.create()
if arguments['destroy_vm']:
vm = VM(config)
vm.stop()
if arguments['get_vm_status']:
vm = VM(config)
vm.status()
if arguments['get_vm_vnc']:
vm = VM(config)
vm.vnc_addr()
if arguments['list_vms']:
vm = VM(config)
vm.list()
if arguments['last_used_mac']:
m = MAC(config)
print(m.last_used_mac())
if arguments['get_new_mac']:
print(MAC(config).get_next())
#if arguments['init_network']:
if arguments['create_vxlan']:
if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
vb = VXLANBridge(vni=arguments['vni'],
route=arguments['network'],
uplinkdev=arguments['vxlan_uplink_device'],
use_sudo=arguments['use_sudo'])
vb._setup_vxlan()
vb._setup_bridge()
vb._add_vxlan_to_bridge()
vb._route_network()
if arguments['run_dns_ra']:
if not arguments['network'] or not arguments['vni']:
raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
dnsra = DNSRA(route=arguments['network'],
vni=arguments['vni'],
use_sudo=arguments['use_sudo'])
dnsra._setup_dnsmasq()

View file

@ -0,0 +1,116 @@
import subprocess
import ipaddress
import logging
from uncloud import UncloudException
log = logging.getLogger(__name__)
class VXLANBridge(object):
cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
cmd_up_dev = "{sudo}ip link set {dev} up"
cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
# VXLAN ids are at maximum 24 bit - use a /104
multicast_network = ipaddress.IPv6Network("ff05::/104")
max_vni = (2**24)-1
def __init__(self,
vni,
uplinkdev,
route=None,
use_sudo=False):
self.config = {}
if vni > self.max_vni:
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
if use_sudo:
self.config['sudo'] = 'sudo '
else:
self.config['sudo'] = ''
self.config['vni_dec'] = vni
self.config['vni_hex'] = "{:x}".format(vni)
self.config['multicast_address'] = self.multicast_network[vni]
self.config['route_network'] = ipaddress.IPv6Network(route)
self.config['route'] = route
self.config['uplinkdev'] = uplinkdev
self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
def setup_networking(self):
pass
def _setup_vxlan(self):
self._execute_cmd(self.cmd_create_vxlan)
self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
def _setup_bridge(self):
self._execute_cmd(self.cmd_create_bridge)
self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
def _route_network(self):
self._execute_cmd(self.cmd_add_route_dev)
def _add_vxlan_to_bridge(self):
self._execute_cmd(self.cmd_add_to_bridge)
def _execute_cmd(self, cmd_string, **kwargs):
cmd = cmd_string.format(**self.config, **kwargs)
log.info("Executing: {}".format(cmd))
subprocess.run(cmd.split())
class ManagementBridge(VXLANBridge):
pass
class DNSRA(object):
# VXLAN ids are at maximum 24 bit
max_vni = (2**24)-1
# Command to start dnsmasq
cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra --no-daemon"
def __init__(self,
vni,
route=None,
use_sudo=False):
self.config = {}
if vni > self.max_vni:
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
if use_sudo:
self.config['sudo'] = 'sudo '
else:
self.config['sudo'] = ''
#TODO: remove if not needed
#self.config['vni_dec'] = vni
self.config['vni_hex'] = "{:x}".format(vni)
# dnsmasq only wants the network without the prefix, therefore, cut it off
self.config['route'] = ipaddress.IPv6Network(route).network_address
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
def _setup_dnsmasq(self):
self._execute_cmd(self.cmd_start_dnsmasq)
def _execute_cmd(self, cmd_string, **kwargs):
cmd = cmd_string.format(**self.config, **kwargs)
log.info("Executing: {}".format(cmd))
print("Executing: {}".format(cmd))
subprocess.run(cmd.split())
class Firewall(object):
pass

View file

@ -0,0 +1,94 @@
flush ruleset
table bridge filter {
chain prerouting {
type filter hook prerouting priority 0;
policy accept;
ibrname br100 jump netpublic
}
chain netpublic {
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
}
}
table ip6 filter {
chain forward {
type filter hook forward priority 0;
# this would be nice...
policy drop;
ct state established,related accept;
}
chain prerouting {
type filter hook prerouting priority 0;
policy accept;
# not supporting in here!
iifname vmXXXX jump vmXXXX
iifname vmYYYY jump vmYYYY
iifname brXX jump brXX
iifname vxlan100 jump vxlan100
iifname br100 jump br100
}
# 1. Rules per VM (names: vmXXXXX?
# 2. Rules per network (names: vxlanXXXX, what about non vxlan?)
# 3. Rules per bridge:
# vxlanXX is inside brXX
# This is effectively a network filter
# 4. Kill all malicous traffic:
# - router advertisements from VMs in which they should not announce RAs
chain vxlan100 {
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
}
chain br100 {
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
}
chain netpublic {
# drop router advertisements that don't come from us
iifname != vxlanpublic icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
# icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
}
# This vlan
chain brXX {
ip6 saddr != 2001:db8:1::/64 drop;
}
chain vmXXXX {
ether saddr != 00:0f:54:0c:11:04 drop;
ip6 saddr != 2001:db8:1:000f::540c:11ff:fe04 drop;
jump drop_from_vm_without_ipam
}
chain net_2a0ae5c05something {
}
chain drop_from_vm_without_ipam {
}
chain vmYYYY {
ether saddr != 00:0f:54:0c:11:05 drop;
jump drop_from_vm_with_ipam
}
# Drop stuff from every VM
chain drop_from_vm_with_ipam {
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
}
}

View file

@ -0,0 +1,206 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
#
# This file is part of uncloud.
#
# uncloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# uncloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
import json
import uuid
import logging
import re
import importlib
from uncloud import UncloudException
from uncloud.hack.db import DB, db_logentry
log = logging.getLogger(__name__)
class ProductOrder(object):
def __init__(self, config, product_entry=None, db_entry=None):
self.config = config
self.db = DB(self.config, prefix="/orders")
self.db_entry = {}
self.db_entry["product"] = product_entry
# Overwrite if we are loading an existing product order
if db_entry:
self.db_entry = db_entry
# FIXME: this should return a list of our class!
def list_orders(self, filter_key=None, filter_regexp=None):
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
yield self.__class__(self.config, db_entry=entry)
def set_required_values(self):
"""Set values that are required to make the db entry valid"""
if not "uuid" in self.db_entry:
self.db_entry["uuid"] = str(uuid.uuid4())
if not "status" in self.db_entry:
self.db_entry["status"] = "NEW"
if not "owner" in self.db_entry:
self.db_entry["owner"] = "UNKNOWN"
if not "log" in self.db_entry:
self.db_entry["log"] = []
if not "db_version" in self.db_entry:
self.db_entry["db_version"] = 1
def validate_status(self):
if "status" in self.db_entry:
if self.db_entry["status"] in [ "NEW",
"SCHEDULED",
"CREATED_ACTIVE",
"CANCELLED",
"REJECTED" ]:
return False
return True
def order(self):
self.set_required_values()
if not self.db_entry["status"] == "NEW":
raise UncloudException("Cannot re-order same order. Status: {}".format(self.db_entry["status"]))
self.db.set(self.db_entry["uuid"], self.db_entry, as_json=True)
return self.db_entry["uuid"]
def process_orders(self):
"""processing orders can be done stand alone on server side"""
for order in self.list_orders():
if order.db_entry["status"] == "NEW":
log.info("Handling new order: {}".format(order))
# FIXME: these all should be a transactions! -> fix concurrent access! !
if not "log" in order.db_entry:
order.db_entry['log'] = []
is_valid = True
# Verify the order entry
for must_attribute in [ "owner", "product" ]:
if not must_attribute in order.db_entry:
message = "Missing {} entry in order, rejecting order".format(must_attribute)
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
order.db_entry['log'].append(db_logentry(message))
order.db_entry['status'] = "REJECTED"
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
is_valid = False
# Rejected the order
if not is_valid:
continue
# Verify the product entry
for must_attribute in [ "python_product_class", "python_product_module" ]:
if not must_attribute in order.db_entry['product']:
message = "Missing {} entry in product of order, rejecting order".format(must_attribute)
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
order.db_entry['log'].append(db_logentry(message))
order.db_entry['status'] = "REJECTED"
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
is_valid = False
# Rejected the order
if not is_valid:
continue
print(order.db_entry["product"]["python_product_class"])
# Create the product
m = importlib.import_module(order.db_entry["product"]["python_product_module"])
c = getattr(m, order.db_entry["product"]["python_product_class"])
product = c(config, db_entry=order.db_entry["product"])
# STOPPED
product.create_product()
order.db_entry['status'] = "SCHEDULED"
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
def __str__(self):
return str(self.db_entry)
class Product(object):
def __init__(self,
config,
product_name,
product_class,
db_entry=None):
self.config = config
self.db = DB(self.config, prefix="/orders")
self.db_entry = {}
self.db_entry["product_name"] = product_name
self.db_entry["python_product_class"] = product_class.__qualname__
self.db_entry["python_product_module"] = product_class.__module__
self.db_entry["db_version"] = 1
self.db_entry["log"] = []
self.db_entry["features"] = {}
# Existing product? Read in db_entry
if db_entry:
self.db_entry = db_entry
self.valid_periods = [ "per_year", "per_month", "per_week",
"per_day", "per_hour",
"per_minute", "per_second" ]
def define_feature(self,
name,
one_time_price,
recurring_price,
recurring_period,
minimum_period):
self.db_entry['features'][name] = {}
self.db_entry['features'][name]['one_time_price'] = one_time_price
self.db_entry['features'][name]['recurring_price'] = recurring_price
if not recurring_period in self.valid_periods:
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
self.db_entry['features'][name]['recurring_period'] = recurring_period
if not minimum_period in self.valid_periods:
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
recurring_index = self.valid_periods.index(recurring_period)
minimum_index = self.valid_periods.index(minimum_period)
if minimum_index < recurring_index:
raise UncloudException("Minimum period for product '{}' feature '{}' must be shorter or equal than/as recurring period: {} > {}".format(self.db_entry['product_name'], name, minimum_period, recurring_period))
self.db_entry['features'][name]['minimum_period'] = minimum_period
def validate_product(self):
for feature in self.db_entry['features']:
pass
def place_order(self, owner):
""" Schedule creating the product in etcd """
order = ProductOrder(self.config, product_entry=self.db_entry)
order.db_entry["owner"] = owner
return order.order()
def __str__(self):
return json.dumps(self.db_entry)

View file

@ -0,0 +1,8 @@
#!/sbin/openrc-run
name="$RC_SVCNAME"
pidfile="/var/run/${name}.pid"
command="$(which pipenv)"
command_args="run python ucloud.py api"
command_background="true"
directory="/root/ucloud"

View file

@ -0,0 +1,8 @@
#!/sbin/openrc-run
name="$RC_SVCNAME"
pidfile="/var/run/${name}.pid"
command="$(which pipenv)"
command_args="run python ucloud.py host ${HOSTNAME}"
command_background="true"
directory="/root/ucloud"

View file

@ -0,0 +1,8 @@
#!/sbin/openrc-run
name="$RC_SVCNAME"
pidfile="/var/run/${name}.pid"
command="$(which pipenv)"
command_args="run python ucloud.py metadata"
command_background="true"
directory="/root/ucloud"

View file

@ -0,0 +1,8 @@
#!/sbin/openrc-run
name="$RC_SVCNAME"
pidfile="/var/run/${name}.pid"
command="$(which pipenv)"
command_args="run python ucloud.py scheduler"
command_background="true"
directory="/root/ucloud"

View file

@ -0,0 +1,26 @@
id=100
rawdev=eth0
# create vxlan
ip -6 link add vxlan${id} type vxlan \
id ${id} \
dstport 4789 \
group ff05::${id} \
dev ${rawdev} \
ttl 5
ip link set vxlan${id} up
# create bridge
ip link set vxlan${id} up
ip link set br${id} up
# Add vxlan into bridge
ip link set vxlan${id} master br${id}
# useradd -m uncloud
# [18:05] tablett.place10:~# id uncloud
# uid=1000(uncloud) gid=1000(uncloud) groups=1000(uncloud),34(kvm),36(qemu)
# apk add qemu-system-x86_64
# also needs group netdev

View file

@ -0,0 +1,25 @@
#!/bin/sh
if [ $# -ne 1 ]; then
echo $0 vmid
exit 1
fi
id=$1; shift
memory=512
macaddress=02:00:b9:cb:70:${id}
netname=net${id}-1
qemu-system-x86_64 \
-name uncloud-${id} \
-accel kvm \
-m ${memory} \
-smp 2,sockets=2,cores=1,threads=1 \
-device virtio-net-pci,netdev=net0,mac=$macaddress \
-netdev tap,id=net0,ifname=${netname},script=no,downscript=no \
-vnc [::]:0
# To be changed:
# -vnc to unix path
# or -spice

View file

@ -0,0 +1,193 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
#
# This file is part of uncloud.
#
# uncloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# uncloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
# This module is directly called from the hack module, and can be used as follow:
#
# Create a new VM with default CPU/Memory. The path of the image file is relative to $hackprefix.
# `uncloud hack --hackprefix /tmp/hackcloud --create-vm --image mysuperimage.qcow2`
#
# List running VMs (returns a list of UUIDs).
# `uncloud hack --hackprefix /tmp/hackcloud --list-vms
#
# Get VM status:
# `uncloud hack --hackprefix /tmp/hackcloud --get-vm-status --uuid my-vm-uuid`
#
# Stop a VM:
# `uncloud hack --hackprefix /tmp/hackcloud --destroy-vm --uuid my-vm-uuid`
# ``
import subprocess
import uuid
import os
import logging
from uncloud.hack.db import DB
from uncloud.hack.mac import MAC
from uncloud.vmm import VMM
from uncloud.hack.product import Product
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class VM(object):
def __init__(self, config, db_entry=None):
self.config = config
#TODO: Enable etcd lookup
self.no_db = self.config.arguments['no_db']
if not self.no_db:
self.db = DB(self.config, prefix="/vm")
if db_entry:
self.db_entry = db_entry
# General CLI arguments.
self.hackprefix = self.config.arguments['hackprefix']
self.uuid = self.config.arguments['uuid']
self.memory = self.config.arguments['memory'] or '1024M'
self.cores = self.config.arguments['cores'] or 1
if self.config.arguments['image']:
self.image = os.path.join(self.hackprefix, self.config.arguments['image'])
else:
self.image = None
if self.config.arguments['image_format']:
self.image_format=self.config.arguments['image_format']
else:
self.image_format='qcow2'
# External components.
# This one is broken:
# TypeError: expected str, bytes or os.PathLike object, not NoneType
# Fix before re-enabling
# self.vmm = VMM(vmm_backend=self.hackprefix)
self.mac = MAC(self.config)
# Harcoded & generated values.
self.owner = 'uncloud'
self.accel = 'kvm'
self.threads = 1
self.ifup = os.path.join(self.hackprefix, "ifup.sh")
self.ifdown = os.path.join(self.hackprefix, "ifdown.sh")
self.ifname = "uc{}".format(self.mac.to_str_format())
self.vm = {}
self.product = Product(config, product_name="dualstack-vm",
product_class=self.__class__)
self.product.define_feature(name="base",
one_time_price=0,
recurring_price=9,
recurring_period="per_month",
minimum_period="per_hour")
self.features = []
def get_qemu_args(self):
command = (
"-name {owner}-{name}"
" -machine pc,accel={accel}"
" -drive file={image},format={image_format},if=virtio"
" -device virtio-rng-pci"
" -m {memory} -smp cores={cores},threads={threads}"
" -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname}"
" -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
).format(
owner=self.owner, name=self.uuid,
accel=self.accel,
image=self.image, image_format=self.image_format,
memory=self.memory, cores=self.cores, threads=self.threads,
ifup=self.ifup, ifdown=self.ifdown, ifname=self.ifname,
mac=self.mac
)
return command.split(" ")
def create_product(self):
"""Find a VM host and schedule on it"""
pass
def create(self):
# New VM: new UUID, new MAC.
self.uuid = str(uuid.uuid4())
self.mac=MAC(self.config)
self.mac.create()
qemu_args = self.get_qemu_args()
log.debug("QEMU args passed to VMM: {}".format(qemu_args))
self.vmm.start(
uuid=self.uuid,
migration=False,
*qemu_args
)
self.mac.create()
self.vm['mac'] = self.mac
self.vm['ifname'] = "uc{}".format(self.mac.__repr__())
# FIXME: TODO: turn this into a string and THEN
# .split() it later -- easier for using .format()
#self.vm['commandline'] = [ "{}".format(self.sudo),
self.vm['commandline'] = "{sudo}{qemu} -name uncloud-{uuid} -machine pc,accel={accel} -m {memory} -smp {cores} -uuid {uuid} -drive file={os_image},media=cdrom -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname} -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
# self.vm['commandline'] = [ "{}".format(self.sudo),
# "{}".format(self.qemu),
# "-name", "uncloud-{}".format(self.vm['uuid']),
# "-machine", "pc,accel={}".format(self.accel),
# "-m", "{}".format(self.vm['memory']),
# "-smp", "{}".format(self.vm['cores']),
# "-uuid", "{}".format(self.vm['uuid']),
# "-drive", "file={},media=cdrom".format(self.vm['os_image']),
# "-netdev", "tap,id=netmain,script={},downscript={},ifname={}".format(self.ifup, self.ifdown, self.vm['ifname']),
# "-device", "virtio-net-pci,netdev=netmain,id=net0,mac={}".format(self.vm['mac'])
# ]
def _execute_cmd(self, cmd_string, **kwargs):
cmd = cmd_string.format(**self.vm, **kwargs)
log.info("Executing: {}".format(cmd))
subprocess.run(cmd.split())
def stop(self):
if not self.uuid:
print("Please specific an UUID with the --uuid flag.")
exit(1)
self.vmm.stop(self.uuid)
def status(self):
if not self.uuid:
print("Please specific an UUID with the --uuid flag.")
exit(1)
print(self.vmm.get_status(self.uuid))
def vnc_addr(self):
if not self.uuid:
print("Please specific an UUID with the --uuid flag.")
exit(1)
print(self.vmm.get_vnc(self.uuid))
def list(self):
print(self.vmm.discover())

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,123 @@
import argparse
import multiprocessing as mp
import time
from uuid import uuid4
from uncloud.common.request import RequestEntry, RequestType
from uncloud.common.shared import shared
from uncloud.common.vm import VMStatus
from uncloud.vmm import VMM
from os.path import join as join_path
from . import virtualmachine, logger
arg_parser = argparse.ArgumentParser('host', add_help=False)
arg_parser.add_argument('--hostname', required=True)
def update_heartbeat(hostname):
"""Update Last HeartBeat Time for :param hostname: in etcd"""
host_pool = shared.host_pool
this_host = next(
filter(lambda h: h.hostname == hostname, host_pool.hosts), None
)
while True:
this_host.update_heartbeat()
host_pool.put(this_host)
time.sleep(10)
def maintenance(host):
vmm = VMM()
running_vms = vmm.discover()
for vm_uuid in running_vms:
if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == 'running':
logger.debug('VM {} is running on {}'.format(vm_uuid, host))
vm = shared.vm_pool.get(
join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
)
vm.status = VMStatus.running
vm.vnc_socket = vmm.get_vnc(vm_uuid)
vm.hostname = host
shared.vm_pool.put(vm)
def main(arguments):
hostname = arguments['hostname']
host_pool = shared.host_pool
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
# Does not yet exist, create it
if not host:
host_key = join_path(
shared.settings['etcd']['host_prefix'], uuid4().hex
)
host_entry = {
'specs': '',
'hostname': hostname,
'status': 'DEAD',
'last_heartbeat': '',
}
shared.etcd_client.put(
host_key, host_entry, value_in_json=True
)
# update, get ourselves now for sure
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
try:
heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
heartbeat_updating_process.start()
except Exception as e:
raise Exception('uncloud-host heartbeat updating mechanism is not working') from e
# The below while True is neccessary for gracefully handling leadership transfer and temporary
# unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
# iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
# which ends the loop immediately. So, having it inside infinite loop we try again and again to
# get prefix until either success or deamon death comes.
while True:
for events_iterator in [
shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
raise_exception=False),
shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
raise_exception=False)
]:
for request_event in events_iterator:
request_event = RequestEntry(request_event)
maintenance(host.key)
if request_event.hostname == host.key:
logger.debug('VM Request: %s on Host %s', request_event, host.hostname)
shared.request_pool.client.client.delete(request_event.key)
vm_entry = shared.etcd_client.get(
join_path(shared.settings['etcd']['vm_prefix'], request_event.uuid)
)
logger.debug('VM hostname: {}'.format(vm_entry.value))
vm = virtualmachine.VM(vm_entry)
if request_event.type == RequestType.StartVM:
vm.start()
elif request_event.type == RequestType.StopVM:
vm.stop()
elif request_event.type == RequestType.DeleteVM:
vm.delete()
elif request_event.type == RequestType.InitVMMigration:
vm.start(destination_host_key=host.key)
elif request_event.type == RequestType.TransferVM:
destination_host = host_pool.get(request_event.destination_host_key)
if destination_host:
vm.migrate(
destination_host=destination_host.hostname,
destination_sock_path=request_event.destination_sock_path,
)
else:
logger.error('Host %s not found!', request_event.destination_host_key)

View file

@ -0,0 +1,303 @@
# QEMU Manual
# https://qemu.weilnetz.de/doc/qemu-doc.html
# For QEMU Monitor Protocol Commands Information, See
# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
import os
import subprocess as sp
import ipaddress
from string import Template
from os.path import join as join_path
from uncloud.common.request import RequestEntry, RequestType
from uncloud.common.vm import VMStatus, declare_stopped
from uncloud.common.network import create_dev, delete_network_interface
from uncloud.common.schemas import VMSchema, NetworkSchema
from uncloud.host import logger
from uncloud.common.shared import shared
from uncloud.vmm import VMM
from marshmallow import ValidationError
class VM:
def __init__(self, vm_entry):
self.schema = VMSchema()
self.vmm = VMM()
self.key = vm_entry.key
try:
self.vm = self.schema.loads(vm_entry.value)
except ValidationError:
logger.exception(
"Couldn't validate VM Entry", vm_entry.value
)
self.vm = None
else:
self.uuid = vm_entry.key.split("/")[-1]
self.host_key = self.vm["hostname"]
logger.debug('VM Hostname {}'.format(self.host_key))
def get_qemu_args(self):
command = (
"-drive file={file},format=raw,if=virtio"
" -device virtio-rng-pci"
" -m {memory} -smp cores={cores},threads={threads}"
" -name {owner}_{name}"
).format(
owner=self.vm["owner"],
name=self.vm["name"],
memory=int(self.vm["specs"]["ram"].to_MB()),
cores=self.vm["specs"]["cpu"],
threads=1,
file=shared.storage_handler.qemu_path_string(self.uuid),
)
return command.split(" ")
def start(self, destination_host_key=None):
migration = False
if destination_host_key:
migration = True
self.create()
try:
network_args = self.create_network_dev()
except Exception as err:
declare_stopped(self.vm)
self.vm["log"].append("Cannot Setup Network Properly")
logger.error("Cannot Setup Network Properly for vm %s", self.uuid, exc_info=err)
else:
self.vmm.start(
uuid=self.uuid,
migration=migration,
*self.get_qemu_args(),
*network_args
)
status = self.vmm.get_status(self.uuid)
logger.debug('VM {} status is {}'.format(self.uuid, status))
if status == "running":
self.vm["status"] = VMStatus.running
self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
elif status == "inmigrate":
r = RequestEntry.from_scratch(
type=RequestType.TransferVM, # Transfer VM
hostname=self.host_key, # Which VM should get this request. It is source host
uuid=self.uuid, # uuid of VM
destination_sock_path=join_path(
self.vmm.socket_dir, self.uuid
),
destination_host_key=destination_host_key, # Where source host transfer VM
request_prefix=shared.settings["etcd"]["request_prefix"],
)
shared.request_pool.put(r)
else:
self.stop()
declare_stopped(self.vm)
logger.debug('VM {} has hostname {}'.format(self.uuid, self.vm['hostname']))
self.sync()
def stop(self):
self.vmm.stop(self.uuid)
self.delete_network_dev()
declare_stopped(self.vm)
self.sync()
def migrate(self, destination_host, destination_sock_path):
self.vmm.transfer(
src_uuid=self.uuid,
destination_sock_path=destination_sock_path,
host=destination_host,
)
def create_network_dev(self):
command = ""
for network_mac_and_tap in self.vm["network"]:
network_name, mac, tap = network_mac_and_tap
_key = os.path.join(
shared.settings["etcd"]["network_prefix"],
self.vm["owner"],
network_name,
)
network = shared.etcd_client.get(_key, value_in_json=True)
network_schema = NetworkSchema()
try:
network = network_schema.load(network.value)
except ValidationError:
continue
if network["type"] == "vxlan":
tap = create_vxlan_br_tap(
_id=network["id"],
_dev=shared.settings["network"]["vxlan_phy_dev"],
tap_id=tap,
ip=network["ipv6"],
)
all_networks = shared.etcd_client.get_prefix(
shared.settings["etcd"]["network_prefix"],
value_in_json=True,
)
if ipaddress.ip_network(network["ipv6"]).is_global:
update_radvd_conf(all_networks)
command += (
"-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
tap=tap, net_id=network["id"], mac=mac
)
)
if command:
command = command.split(' ')
return command
def delete_network_dev(self):
try:
for network in self.vm["network"]:
network_name = network[0]
_ = network[1] # tap_mac
tap_id = network[2]
delete_network_interface("tap{}".format(tap_id))
owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
owners_running_vms = shared.vm_pool.by_status(
VMStatus.running, _vms=owners_vms
)
networks = map(
lambda n: n[0],
map(lambda vm: vm.network, owners_running_vms),
)
networks_in_use_by_user_vms = [vm[0] for vm in networks]
if network_name not in networks_in_use_by_user_vms:
network_entry = resolve_network(
network[0], self.vm["owner"]
)
if network_entry:
network_type = network_entry.value["type"]
network_id = network_entry.value["id"]
if network_type == "vxlan":
delete_network_interface(
"br{}".format(network_id)
)
delete_network_interface(
"vxlan{}".format(network_id)
)
except Exception:
logger.exception("Exception in network interface deletion")
def create(self):
if shared.storage_handler.is_vm_image_exists(self.uuid):
# File Already exists. No Problem Continue
logger.debug("Image for vm %s exists", self.uuid)
else:
if shared.storage_handler.make_vm_image(
src=self.vm["image_uuid"], dest=self.uuid
):
if not shared.storage_handler.resize_vm_image(
path=self.uuid,
size=int(self.vm["specs"]["os-ssd"].to_MB()),
):
self.vm["status"] = VMStatus.error
else:
logger.info("New VM Created")
def sync(self):
shared.etcd_client.put(
self.key, self.schema.dump(self.vm), value_in_json=True
)
def delete(self):
self.stop()
if shared.storage_handler.is_vm_image_exists(self.uuid):
r_status = shared.storage_handler.delete_vm_image(self.uuid)
if r_status:
shared.etcd_client.client.delete(self.key)
else:
shared.etcd_client.client.delete(self.key)
def resolve_network(network_name, network_owner):
network = shared.etcd_client.get(
join_path(
shared.settings["etcd"]["network_prefix"],
network_owner,
network_name,
),
value_in_json=True,
)
return network
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
network_script_base = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "network"
)
vxlan = create_dev(
script=os.path.join(network_script_base, "create-vxlan.sh"),
_id=_id,
dev=_dev,
)
if vxlan:
bridge = create_dev(
script=os.path.join(
network_script_base, "create-bridge.sh"
),
_id=_id,
dev=vxlan,
ip=ip,
)
if bridge:
tap = create_dev(
script=os.path.join(
network_script_base, "create-tap.sh"
),
_id=str(tap_id),
dev=bridge,
)
if tap:
return tap
def update_radvd_conf(all_networks):
network_script_base = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "network"
)
networks = {
net.value["ipv6"]: net.value["id"]
for net in all_networks
if net.value.get("ipv6")
and ipaddress.ip_network(net.value.get("ipv6")).is_global
}
radvd_template = open(
os.path.join(network_script_base, "radvd-template.conf"), "r"
).read()
radvd_template = Template(radvd_template)
content = [
radvd_template.safe_substitute(
bridge="br{}".format(networks[net]), prefix=net
)
for net in networks
if networks.get(net)
]
with open("/etc/radvd.conf", "w") as radvd_conf:
radvd_conf.writelines(content)
try:
sp.check_output(["systemctl", "restart", "radvd"])
except sp.CalledProcessError:
try:
sp.check_output(["service", "radvd", "restart"])
except sp.CalledProcessError as err:
raise err.__class__(
"Cannot start/restart radvd service", err.cmd
) from err

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,121 @@
import json
import os
import argparse
import subprocess as sp
from os.path import join as join_path
from uncloud.common.shared import shared
from uncloud.imagescanner import logger
arg_parser = argparse.ArgumentParser('imagescanner', add_help=False)
def qemu_img_type(path):
qemu_img_info_command = [
"qemu-img",
"info",
"--output",
"json",
path,
]
try:
qemu_img_info = sp.check_output(qemu_img_info_command)
except Exception as e:
logger.exception(e)
return None
else:
qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
return qemu_img_info["format"]
def main(arguments):
# We want to get images entries that requests images to be created
images = shared.etcd_client.get_prefix(
shared.settings["etcd"]["image_prefix"], value_in_json=True
)
images_to_be_created = list(
filter(lambda im: im.value["status"] == "TO_BE_CREATED", images)
)
for image in images_to_be_created:
try:
image_uuid = image.key.split("/")[-1]
image_owner = image.value["owner"]
image_filename = image.value["filename"]
image_store_name = image.value["store_name"]
image_full_path = join_path(
shared.settings["storage"]["file_dir"],
image_owner,
image_filename,
)
image_stores = shared.etcd_client.get_prefix(
shared.settings["etcd"]["image_store_prefix"],
value_in_json=True,
)
user_image_store = next(
filter(
lambda s, store_name=image_store_name: s.value[
"name"
]
== store_name,
image_stores,
)
)
image_store_pool = user_image_store.value["attributes"][
"pool"
]
except Exception as e:
logger.exception(e)
else:
# At least our basic data is available
qemu_img_convert_command = [
"qemu-img",
"convert",
"-f",
"qcow2",
"-O",
"raw",
image_full_path,
"image.raw",
]
if qemu_img_type(image_full_path) == "qcow2":
try:
# Convert .qcow2 to .raw
sp.check_output(qemu_img_convert_command,)
except sp.CalledProcessError:
logger.exception(
"Image convertion from .qcow2 to .raw failed."
)
else:
# Import and Protect
r_status = shared.storage_handler.import_image(
src="image.raw", dest=image_uuid, protect=True
)
if r_status:
# Everything is successfully done
image.value["status"] = "CREATED"
shared.etcd_client.put(
image.key, json.dumps(image.value)
)
finally:
try:
os.remove("image.raw")
except Exception:
pass
else:
# The user provided image is either not found or of invalid format
image.value["status"] = "INVALID_IMAGE"
shared.etcd_client.put(
image.key, json.dumps(image.value)
)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,95 @@
import os
import argparse
from flask import Flask, request
from flask_restful import Resource, Api
from werkzeug.exceptions import HTTPException
from uncloud.common.shared import shared
app = Flask(__name__)
api = Api(app)
app.logger.handlers.clear()
DEFAULT_PORT=1234
arg_parser = argparse.ArgumentParser('metadata', add_help=False)
arg_parser.add_argument('--port', '-p', default=DEFAULT_PORT, help='By default bind to port {}'.format(DEFAULT_PORT))
@app.errorhandler(Exception)
def handle_exception(e):
app.logger.error(e)
# pass through HTTP errors
if isinstance(e, HTTPException):
return e
# now you're handling non-HTTP exceptions only
return {"message": "Server Error"}, 500
def get_vm_entry(mac_addr):
return next(
filter(
lambda vm: mac_addr in list(zip(*vm.network))[1],
shared.vm_pool.vms,
),
None,
)
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
def ipv62mac(ipv6):
# remove subnet info if given
subnet_index = ipv6.find("/")
if subnet_index != -1:
ipv6 = ipv6[:subnet_index]
ipv6_parts = ipv6.split(":")
mac_parts = list()
for ipv6_part in ipv6_parts[-4:]:
while len(ipv6_part) < 4:
ipv6_part = "0" + ipv6_part
mac_parts.append(ipv6_part[:2])
mac_parts.append(ipv6_part[-2:])
# modify parts to match MAC value
mac_parts[0] = "%02x" % (int(mac_parts[0], 16) ^ 2)
del mac_parts[4]
del mac_parts[3]
return ":".join(mac_parts)
class Root(Resource):
@staticmethod
def get():
data = get_vm_entry(ipv62mac(request.remote_addr))
if not data:
return (
{"message": "Metadata for such VM does not exists."},
404,
)
else:
etcd_key = os.path.join(
shared.settings["etcd"]["user_prefix"],
data.value["owner_realm"],
data.value["owner"],
"key",
)
etcd_entry = shared.etcd_client.get_prefix(
etcd_key, value_in_json=True
)
user_personal_ssh_keys = [key.value for key in etcd_entry]
data.value["metadata"]["ssh-keys"] += user_personal_ssh_keys
return data.value["metadata"], 200
api.add_resource(Root, "/")
def main(arguments):
port = arguments['port']
debug = arguments['debug']
app.run(debug=debug, host="::", port=port)

View file

@ -0,0 +1,195 @@
The network base - experimental
We want to have 1 "main" network for convience.
We want to be able to create networks automatically, once a new
customer is created -> need hooks!
Mapping:
- each network is a "virtual" network. We use vxlan by default, but
could be any technology!
- we need a counter for vxlan mappings / network IDs -> cannot use
Model in etcd:
/v1/networks/
Tests
see
https://vincent.bernat.ch/en/blog/2017-vxlan-linux
# local 2001:db8:1::1 \
netid=100
dev=wlp2s0
dev=wlp0s20f3
ip -6 link add vxlan${netid} type vxlan \
id ${netid} \
dstport 4789 \
group ff05::${netid} \
dev ${dev} \
ttl 5
[root@diamond ~]# ip addr add 2a0a:e5c0:5::1/48 dev vxlan100
root@manager:~/.ssh# ip addr add 2a0a:e5c0:5::2/48 dev vxlan100
root@manager:~/.ssh# ping -c3 2a0a:e5c0:5::1
PING 2a0a:e5c0:5::1(2a0a:e5c0:5::1) 56 data bytes
64 bytes from 2a0a:e5c0:5::1: icmp_seq=1 ttl=64 time=15.6 ms
64 bytes from 2a0a:e5c0:5::1: icmp_seq=2 ttl=64 time=30.3 ms
64 bytes from 2a0a:e5c0:5::1: icmp_seq=3 ttl=64 time=84.4 ms
--- 2a0a:e5c0:5::1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 15.580/43.437/84.417/29.594 ms
--> work even via wifi
--------------------------------------------------------------------------------
Creating a network:
1) part of the initialisation / demo data (?)
We should probably provide some demo sets that can easily be used.
2) manual/hook based request
- hosts might have different network interfaces (?)
-> this will make things very tricky -> don't support it
- endpoint needs only support
--------------------------------------------------------------------------------
IPAM
IP address management (IPAM) is related to networks, but needs to be
decoupled to allow pure L2 networks.
From a customer point of view, we probably want to do something like:
- ORDERING an IPv6 network can include creating a virtual network and
an IPAM service
Maybe "orders" should always be the first class citizen and ucloud
internally "hooks" or binds things together.
--------------------------------------------------------------------------------
testing / hacking:
- starting etcd as storage
[18:07] diamond:~% etcdctl put /v1/network/200 "{ some_network }"
OK
[18:08] diamond:~% etcdctl watch -w=json --prefix /v1/network
{"Header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":6,"raft_term":2},"Events":[{"kv":{"key":"L3YxL25ldHdvcmsvMjAw","create_revision":5,"mod_revision":6,"version":2,"value":"eyBzb21lX25ldHdvcmsgfQ=="}}],"CompactRevision":0,"Canceled":false,"Created":false}
--------------------------------------------------------------------------------
Flow for using and creating networks:
- a network is created -> entry in etcd is created
-> we need to keep a counter/lock so that 2 processes don't create
the same network [Ahmed]
-> nothing to be done on the hosts
- a VM using a network is created
- a VM using a network is scheduled to some host
- the local "spawn a VM" process needs to check whether there is a
vxlan interface existing -> if no, create it before creating the VM.
-> if no, also create the bridge
-> possibly adjusting the MTU (??)
-> both names should be in hexadecimal (i.e. brff01 or vxlanff01)
--> this way they are consistent with the multicast ipv6 address
--> attention, ip -6 link ... id XXX expects DECIMAL input
--------------------------------------------------------------------------------
If we also supply IPAM:
- ipam needs to be created *after* the network is created
- ipam is likely to be coupled to netbox (?)
--> we need a "get next /64 prefix" function
- when an ipam service is created in etcd, we need to create a new
radvd instance on all routers (this will be a different service on
BSDs)
- we will need to create a new vxlan device on the routers
- we need to create a new / modify radvd.conf
- only after all of the routers reloaded radvd the ipam service is
available!
--------------------------------------------------------------------------------
If the user requests an IPv4 VM:
- we need to get the next free IPv4 address (again, netbox?)
- we need to create a mapping entry on the routers for NAT64
--> this requires the VM to be in a network with IPAM
--> we always assume that the VM embeds itself using EUI64
--------------------------------------------------------------------------------
mac address handling!
Example
--------------------------------------------------------------------------------
TODOs
- create-vxlan-on-dev.sh -> the multicast group
needs to be ff05:: +int(vxlan_id)
--------------------------------------------------------------------------------
Python hints:
>>> vxlan_id = 3400
>>> b = ipaddress.IPv6Network("ff05::/16")
>>> b[vxlan_id]
IPv6Address('ff05::d48')
we need / should assign hex values for vxlan ids in etcd!
--> easier to read
>>> b[0x3400]
IPv6Address('ff05::3400')
--------------------------------------------------------------------------------
Bridge names are limited to 15 characters
Maximum/highest number of vxlan:
>>> 2**24
16777216
>>> (2**25)-1
33554431
>>> b[33554431]
IPv6Address('ff05::1ff:ffff')
Last interface:
br1ffffff
vxlan1ffffff
root@manager:~/ucloud/network# ip -6 link add vxlan1ffffff type vxlan id 33554431 dstport 4789 group ff05::1ff:ffff dev wlp2s0 ttl 5
Error: argument "33554431" is wrong: invalid id
root@manager:~/ucloud/network# ip -6 link add vxlanffffff type vxlan id 16777215 dstport 4789 group ff05::ff:ffff dev wlp2s0 ttl 5
# id needs to be decimal
root@manager:~# ip -6 link add vxlanff01 type vxlan id ff01 dstport 4789 group ff05::ff01 dev ttl 5
Error: argument "ff01" is wrong: invalid id
root@manager:~# ip -6 link add vxlanff01 type vxlan id 65281 dstport 4789 group ff05::ff01 dev wlp2s0 ttl 5

View file

@ -0,0 +1,24 @@
#!/bin/sh
if [ $# -ne 3 ]; then
echo "$0 brid dev ip"
echo "f.g. $0 100 vxlan100 fd00:/64"
echo "Missing arguments" >&2
exit 1
fi
brid=$1; shift
dev=$1; shift
ip=$1; shift
bridge=br${brid}
sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
if ! ip link show $bridge > /dev/null 2> /dev/null; then
ip link add name $bridge type bridge
ip link set $bridge up
ip link set $dev master $bridge
ip address add $ip dev $bridge
fi
echo $bridge

View file

@ -0,0 +1,22 @@
#!/bin/sh
if [ $# -ne 2 ]; then
echo "$0 tapid dev"
echo "f.g. $0 100 br100"
echo "Missing arguments" >&2
exit 1
fi
tapid=$1; shift
bridge=$1; shift
vxlan=vxlan${tapid}
tap=tap${tapid}
if ! ip link show $tap > /dev/null 2> /dev/null; then
ip tuntap add $tap mode tap user `whoami`
ip link set $tap up
sleep 0.5s
ip link set $tap master $bridge
fi
echo $tap

View file

@ -0,0 +1,26 @@
#!/bin/sh
if [ $# -ne 2 ]; then
echo "$0 vxlanid dev"
echo "f.i. $0 100 eno1"
echo "Missing arguments" >&2
exit 1
fi
netid=$1; shift
dev=$1; shift
vxlan=vxlan${netid}
if ! ip link show $vxlan > /dev/null 2> /dev/null; then
ip -6 link add $vxlan type vxlan \
id $netid \
dstport 4789 \
group ff05::$netid \
dev $dev \
ttl 5
ip link set $dev up
ip link set $vxlan up
fi
echo $vxlan

View file

@ -0,0 +1,13 @@
interface $bridge
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 5;
AdvDefaultLifetime 10;
prefix $prefix { };
RDNSS 2a0a:e5c0:2:1::5 2a0a:e5c0:2:1::6 { AdvRDNSSLifetime 6000; };
DNSSL place6.ungleich.ch { AdvDNSSLLifetime 6000; } ;
};

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,123 @@
import argparse
import os
from pathlib import Path
from uncloud.vmm import VMM
from uncloud.host.virtualmachine import update_radvd_conf, create_vxlan_br_tap
from . import virtualmachine, logger
###
# Argument parser loaded by scripts/uncloud.
arg_parser = argparse.ArgumentParser('oneshot', add_help=False)
# Actions.
arg_parser.add_argument('--list', action='store_true',
help='list UUID and name of running VMs')
arg_parser.add_argument('--start', nargs=4,
metavar=('NAME', 'IMAGE', 'UPSTREAM_INTERFACE', 'NETWORK'),
help='start a VM using the OS IMAGE (full path), configuring networking on NETWORK IPv6 prefix')
arg_parser.add_argument('--stop', metavar='UUID',
help='stop a VM')
arg_parser.add_argument('--get-status', metavar='UUID',
help='return the status of the VM')
arg_parser.add_argument('--get-vnc', metavar='UUID',
help='return the path of the VNC socket of the VM')
arg_parser.add_argument('--reconfigure-radvd', metavar='NETWORK',
help='regenerate and reload RADVD configuration for NETWORK IPv6 prefix')
# Arguments.
arg_parser.add_argument('--workdir', default=Path.home(),
help='Working directory, defaulting to $HOME')
arg_parser.add_argument('--mac',
help='MAC address of the VM to create (--start)')
arg_parser.add_argument('--memory', type=int,
help='Memory (MB) to allocate (--start)')
arg_parser.add_argument('--cores', type=int,
help='Number of cores to allocate (--start)')
arg_parser.add_argument('--threads', type=int,
help='Number of threads to allocate (--start)')
arg_parser.add_argument('--image-format', choices=['raw', 'qcow2'],
help='Format of OS image (--start)')
arg_parser.add_argument('--accel', choices=['kvm', 'tcg'], default='kvm',
help='QEMU acceleration to use (--start)')
arg_parser.add_argument('--upstream-interface', default='eth0',
help='Name of upstream interface (--start)')
###
# Helpers.
# XXX: check if it is possible to use the type returned by ETCD queries.
class UncloudEntryWrapper:
def __init__(self, value):
self.value = value
def value(self):
return self.value
def status_line(vm):
return "VM: {} {} {}".format(vm.get_uuid(), vm.get_name(), vm.get_status())
###
# Entrypoint.
def main(arguments):
# Initialize VMM.
workdir = arguments['workdir']
vmm = VMM(vmm_backend=workdir)
# Harcoded debug values.
net_id = 0
# Build VM configuration.
vm_config = {}
vm_options = [
'mac', 'memory', 'cores', 'threads', 'image', 'image_format',
'--upstream_interface', 'upstream_interface', 'network', 'accel'
]
for option in vm_options:
if arguments.get(option):
vm_config[option] = arguments[option]
vm_config['net_id'] = net_id
# Execute requested VM action.
if arguments['reconfigure_radvd']:
# TODO: check that RADVD is available.
prefix = arguments['reconfigure_radvd']
network = UncloudEntryWrapper({
'id': net_id,
'ipv6': prefix
})
# Make use of uncloud.host.virtualmachine for network configuration.
update_radvd_conf([network])
elif arguments['start']:
# Extract from --start positional arguments. Quite fragile.
vm_config['name'] = arguments['start'][0]
vm_config['image'] = arguments['start'][1]
vm_config['network'] = arguments['start'][2]
vm_config['upstream_interface'] = arguments['start'][3]
vm_config['tap_interface'] = "uc{}".format(len(vmm.discover()))
vm = virtualmachine.VM(vmm, vm_config)
vm.start()
elif arguments['stop']:
vm = virtualmachine.VM(vmm, {'uuid': arguments['stop']})
vm.stop()
elif arguments['get_status']:
vm = virtualmachine.VM(vmm, {'uuid': arguments['get_status']})
print(status_line(vm))
elif arguments['get_vnc']:
vm = virtualmachine.VM(vmm, {'uuid': arguments['get_vnc']})
print(vm.get_vnc_addr())
elif arguments['list']:
vms = vmm.discover()
print("Found {} VMs.".format(len(vms)))
for uuid in vms:
vm = virtualmachine.VM(vmm, {'uuid': uuid})
print(status_line(vm))
else:
print('Please specify an action: --start, --stop, --list,\
--get-status, --get-vnc, --reconfigure-radvd')

View file

@ -0,0 +1,81 @@
import uuid
import os
from uncloud.host.virtualmachine import create_vxlan_br_tap
from uncloud.oneshot import logger
class VM(object):
def __init__(self, vmm, config):
self.config = config
self.vmm = vmm
# Extract VM specs/metadata from configuration.
self.name = config.get('name', 'no-name')
self.memory = config.get('memory', 1024)
self.cores = config.get('cores', 1)
self.threads = config.get('threads', 1)
self.image_format = config.get('image_format', 'qcow2')
self.image = config.get('image')
self.uuid = config.get('uuid', str(uuid.uuid4()))
self.mac = config.get('mac')
self.accel = config.get('accel', 'kvm')
self.net_id = config.get('net_id', 0)
self.upstream_interface = config.get('upstream_interface', 'eth0')
self.tap_interface = config.get('tap_interface', 'uc0')
self.network = config.get('network')
def get_qemu_args(self):
command = (
"-uuid {uuid} -name {name} -machine pc,accel={accel}"
" -drive file={image},format={image_format},if=virtio"
" -device virtio-rng-pci"
" -m {memory} -smp cores={cores},threads={threads}"
" -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}"
).format(
uuid=self.uuid, name=self.name, accel=self.accel,
image=self.image, image_format=self.image_format,
memory=self.memory, cores=self.cores, threads=self.threads,
net_id=self.net_id, tap=self.tap_interface, mac=self.mac
)
return command.split(" ")
def start(self):
# Check that VM image is available.
if not os.path.isfile(self.image):
logger.error("Image {} does not exist. Aborting.".format(self.image))
# Create Bridge, VXLAN and tap interface for VM.
create_vxlan_br_tap(
self.net_id, self.upstream_interface, self.tap_interface, self.network
)
# Generate config for and run QEMU.
qemu_args = self.get_qemu_args()
logger.debug("QEMU args for VM {}: {}".format(self.uuid, qemu_args))
self.vmm.start(
uuid=self.uuid,
migration=False,
*qemu_args
)
def stop(self):
self.vmm.stop(self.uuid)
def get_status(self):
return self.vmm.get_status(self.uuid)
def get_vnc_addr(self):
return self.vmm.get_vnc(self.uuid)
def get_uuid(self):
return self.uuid
def get_name(self):
success, json = self.vmm.execute_command(self.uuid, 'query-name')
if success:
return json['return']['name']
return None

View file

@ -0,0 +1,3 @@
import logging
logger = logging.getLogger(__name__)

View file

@ -0,0 +1,137 @@
from collections import Counter
from functools import reduce
import bitmath
from uncloud.common.host import HostStatus
from uncloud.common.request import RequestEntry, RequestType
from uncloud.common.vm import VMStatus
from uncloud.common.shared import shared
def accumulated_specs(vms_specs):
if not vms_specs:
return {}
return reduce((lambda x, y: Counter(x) + Counter(y)), vms_specs)
def remaining_resources(host_specs, vms_specs):
# Return remaining resources host_specs - vms
_vms_specs = Counter(vms_specs)
_remaining = Counter(host_specs)
for component in _vms_specs:
if isinstance(_vms_specs[component], str):
_vms_specs[component] = int(
bitmath.parse_string_unsafe(
_vms_specs[component]
).to_MB()
)
elif isinstance(_vms_specs[component], list):
_vms_specs[component] = map(
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
_vms_specs[component],
)
_vms_specs[component] = reduce(
lambda x, y: x + y, _vms_specs[component], 0
)
for component in _remaining:
if isinstance(_remaining[component], str):
_remaining[component] = int(
bitmath.parse_string_unsafe(
_remaining[component]
).to_MB()
)
elif isinstance(_remaining[component], list):
_remaining[component] = map(
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
_remaining[component],
)
_remaining[component] = reduce(
lambda x, y: x + y, _remaining[component], 0
)
_remaining.subtract(_vms_specs)
return _remaining
class NoSuitableHostFound(Exception):
"""Exception when no host found that can host a VM."""
def get_suitable_host(vm_specs, hosts=None):
if hosts is None:
hosts = shared.host_pool.by_status(HostStatus.alive)
for host in hosts:
# Filter them by host_name
vms = shared.vm_pool.by_host(host.key)
# Filter them by status
vms = shared.vm_pool.by_status(VMStatus.running, vms)
running_vms_specs = [vm.specs for vm in vms]
# Accumulate all of their combined specs
running_vms_accumulated_specs = accumulated_specs(
running_vms_specs
)
# Find out remaining resources after
# host_specs - already running vm_specs
remaining = remaining_resources(
host.specs, running_vms_accumulated_specs
)
# Find out remaining - new_vm_specs
remaining = remaining_resources(remaining, vm_specs)
if all(map(lambda x: x >= 0, remaining.values())):
return host.key
raise NoSuitableHostFound
def dead_host_detection():
# Bring out your dead! - Monty Python and the Holy Grail
hosts = shared.host_pool.by_status(HostStatus.alive)
dead_hosts_keys = []
for host in hosts:
# Only check those who claims to be alive
if host.status == HostStatus.alive:
if not host.is_alive():
dead_hosts_keys.append(host.key)
return dead_hosts_keys
def dead_host_mitigation(dead_hosts_keys):
for host_key in dead_hosts_keys:
host = shared.host_pool.get(host_key)
host.declare_dead()
vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
for vm in vms_hosted_on_dead_host:
vm.status = "UNKNOWN"
shared.vm_pool.put(vm)
shared.host_pool.put(host)
def assign_host(vm):
vm.hostname = get_suitable_host(vm.specs)
shared.vm_pool.put(vm)
r = RequestEntry.from_scratch(
type=RequestType.StartVM,
uuid=vm.uuid,
hostname=vm.hostname,
request_prefix=shared.settings["etcd"]["request_prefix"],
)
shared.request_pool.put(r)
vm.log.append("VM scheduled for starting")
return vm.hostname

View file

@ -0,0 +1,51 @@
# TODO
# 1. send an email to an email address defined by env['admin-email']
# if resources are finished
# 2. Introduce a status endpoint of the scheduler -
# maybe expose a prometheus compatible output
import argparse
from uncloud.common.request import RequestEntry, RequestType
from uncloud.common.shared import shared
from uncloud.scheduler import logger
from uncloud.scheduler.helper import (dead_host_mitigation, dead_host_detection,
assign_host, NoSuitableHostFound)
arg_parser = argparse.ArgumentParser('scheduler', add_help=False)
def main(arguments):
# The below while True is neccessary for gracefully handling leadership transfer and temporary
# unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
# iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
# which ends the loop immediately. So, having it inside infinite loop we try again and again to
# get prefix until either success or deamon death comes.
while True:
for request_iterator in [
shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
raise_exception=False),
shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
raise_exception=False),
]:
for request_event in request_iterator:
dead_host_mitigation(dead_host_detection())
request_entry = RequestEntry(request_event)
if request_entry.type == RequestType.ScheduleVM:
logger.debug('%s, %s', request_entry.key, request_entry.value)
vm_entry = shared.vm_pool.get(request_entry.uuid)
if vm_entry is None:
logger.info('Trying to act on {} but it is deleted'.format(request_entry.uuid))
continue
shared.etcd_client.client.delete(request_entry.key) # consume Request
try:
assign_host(vm_entry)
except NoSuitableHostFound:
vm_entry.add_log('Can\'t schedule VM. No Resource Left.')
shared.vm_pool.put(vm_entry)
logger.info('No Resource Left. Emailing admin....')

View file

@ -0,0 +1,233 @@
import json
import multiprocessing
import sys
import unittest
from datetime import datetime
from os.path import dirname
BASE_DIR = dirname(dirname(__file__))
sys.path.insert(0, BASE_DIR)
from main import (
accumulated_specs,
remaining_resources,
VmPool,
main,
)
from uncloud.config import etcd_client
class TestFunctions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = etcd_client
cls.host_prefix = "/test/host"
cls.vm_prefix = "/test/vm"
# These deletion could also be in
# tearDown() but it is more appropriate here
# as it enable us to check the ETCD store
# even after test is run
cls.client.client.delete_prefix(cls.host_prefix)
cls.client.client.delete_prefix(cls.vm_prefix)
cls.create_hosts(cls)
cls.create_vms(cls)
cls.p = multiprocessing.Process(
target=main, args=[cls.vm_prefix, cls.host_prefix]
)
cls.p.start()
@classmethod
def tearDownClass(cls):
cls.p.terminate()
def create_hosts(self):
host1 = {
"cpu": 32,
"ram": 128,
"hdd": 1024,
"sdd": 0,
"status": "ALIVE",
"last_heartbeat": datetime.utcnow().isoformat(),
}
host2 = {
"cpu": 16,
"ram": 64,
"hdd": 512,
"sdd": 0,
"status": "ALIVE",
"last_heartbeat": datetime.utcnow().isoformat(),
}
host3 = {
"cpu": 16,
"ram": 32,
"hdd": 256,
"sdd": 256,
"status": "ALIVE",
"last_heartbeat": datetime.utcnow().isoformat(),
}
with self.client.client.lock("lock"):
self.client.put(
f"{self.host_prefix}/1", host1, value_in_json=True
)
self.client.put(
f"{self.host_prefix}/2", host2, value_in_json=True
)
self.client.put(
f"{self.host_prefix}/3", host3, value_in_json=True
)
def create_vms(self):
vm1 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm2 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm3 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 16, "ram": 32, "hdd": 128, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm4 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm5 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 2, "ram": 2, "hdd": 10, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm6 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
vm7 = json.dumps(
{
"owner": "meow",
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
}
)
self.client.put(f"{self.vm_prefix}/1", vm1)
self.client.put(f"{self.vm_prefix}/2", vm2)
self.client.put(f"{self.vm_prefix}/3", vm3)
self.client.put(f"{self.vm_prefix}/4", vm4)
self.client.put(f"{self.vm_prefix}/5", vm5)
self.client.put(f"{self.vm_prefix}/6", vm6)
self.client.put(f"{self.vm_prefix}/7", vm7)
def test_accumulated_specs(self):
vms = [
{"ssd": 10, "cpu": 4, "ram": 8},
{"hdd": 10, "cpu": 4, "ram": 8},
{"cpu": 8, "ram": 32},
]
self.assertEqual(
accumulated_specs(vms),
{"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
)
def test_remaining_resources(self):
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
self.assertEqual(
remaining_resources(host_specs, vms_specs), resultant_specs
)
def test_vmpool(self):
self.p.join(1)
vm_pool = VmPool(self.client, self.vm_prefix)
# vm_pool by host
actual = vm_pool.by_host(vm_pool.vms, f"{self.host_prefix}/3")
ground_truth = [
(
f"{self.vm_prefix}/1",
{
"owner": "meow",
"specs": {
"cpu": 4,
"ram": 8,
"hdd": 100,
"sdd": 256,
},
"hostname": f"{self.host_prefix}/3",
"status": "SCHEDULED_DEPLOY",
},
)
]
self.assertEqual(actual[0], ground_truth[0])
# vm_pool by status
actual = vm_pool.by_status(vm_pool.vms, "REQUESTED_NEW")
ground_truth = [
(
f"{self.vm_prefix}/7",
{
"owner": "meow",
"specs": {
"cpu": 10,
"ram": 22,
"hdd": 146,
"sdd": 0,
},
"hostname": "",
"status": "REQUESTED_NEW",
},
)
]
self.assertEqual(actual[0], ground_truth[0])
# vm_pool by except status
actual = vm_pool.except_status(vm_pool.vms, "SCHEDULED_DEPLOY")
ground_truth = [
(
f"{self.vm_prefix}/7",
{
"owner": "meow",
"specs": {
"cpu": 10,
"ram": 22,
"hdd": 146,
"sdd": 0,
},
"hostname": "",
"status": "REQUESTED_NEW",
},
)
]
self.assertEqual(actual[0], ground_truth[0])
if __name__ == "__main__":
unittest.main()

View file

@ -0,0 +1,83 @@
import sys
import unittest
from datetime import datetime
from os.path import dirname
BASE_DIR = dirname(dirname(__file__))
sys.path.insert(0, BASE_DIR)
from main import dead_host_detection, dead_host_mitigation, config
class TestDeadHostMechanism(unittest.TestCase):
def setUp(self):
self.client = config.etcd_client
self.host_prefix = "/test/host"
self.vm_prefix = "/test/vm"
self.client.client.delete_prefix(self.host_prefix)
self.client.client.delete_prefix(self.vm_prefix)
self.create_hosts()
def create_hosts(self):
host1 = {
"cpu": 32,
"ram": 128,
"hdd": 1024,
"sdd": 0,
"status": "ALIVE",
"last_heartbeat": datetime.utcnow().isoformat(),
}
host2 = {
"cpu": 16,
"ram": 64,
"hdd": 512,
"sdd": 0,
"status": "ALIVE",
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
}
host3 = {"cpu": 16, "ram": 32, "hdd": 256, "sdd": 256}
host4 = {
"cpu": 16,
"ram": 32,
"hdd": 256,
"sdd": 256,
"status": "DEAD",
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
}
with self.client.client.lock("lock"):
self.client.put(
f"{self.host_prefix}/1", host1, value_in_json=True
)
self.client.put(
f"{self.host_prefix}/2", host2, value_in_json=True
)
self.client.put(
f"{self.host_prefix}/3", host3, value_in_json=True
)
self.client.put(
f"{self.host_prefix}/4", host4, value_in_json=True
)
def test_dead_host_detection(self):
hosts = self.client.get_prefix(
self.host_prefix, value_in_json=True
)
deads = dead_host_detection(hosts)
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
return deads
def test_dead_host_mitigation(self):
deads = self.test_dead_host_detection()
dead_host_mitigation(self.client, deads)
hosts = self.client.get_prefix(
self.host_prefix, value_in_json=True
)
deads = dead_host_detection(hosts)
self.assertEqual(deads, [])
if __name__ == "__main__":
unittest.main()

View file

@ -0,0 +1 @@
VERSION = "0.0.5-30-ge91fd9e"

View file

@ -0,0 +1,284 @@
import os
import subprocess as sp
import logging
import socket
import json
import tempfile
import time
from contextlib import suppress
from multiprocessing import Process
from os.path import join as join_path
from os.path import isdir
logger = logging.getLogger(__name__)
class VMQMPHandles:
def __init__(self, path):
self.path = path
self.sock = socket.socket(socket.AF_UNIX)
self.file = self.sock.makefile()
def __enter__(self):
self.sock.connect(self.path)
# eat qmp greetings
self.file.readline()
# init qmp
self.sock.sendall(b'{ "execute": "qmp_capabilities" }')
self.file.readline()
return self.sock, self.file
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
self.sock.close()
if exc_type:
logger.error(
"Couldn't get handle for VM.", exc_type, exc_val, exc_tb
)
raise exc_type("Couldn't get handle for VM.") from exc_type
class TransferVM(Process):
def __init__(self, src_uuid, dest_sock_path, host, socket_dir):
self.src_uuid = src_uuid
self.host = host
self.src_sock_path = os.path.join(socket_dir, self.src_uuid)
self.dest_sock_path = dest_sock_path
super().__init__()
def run(self):
with suppress(FileNotFoundError):
os.remove(self.src_sock_path)
command = [
"ssh",
"-nNT",
"-L",
"{}:{}".format(self.src_sock_path, self.dest_sock_path),
"root@{}".format(self.host),
]
try:
p = sp.Popen(command)
except Exception as e:
logger.error(
"Couldn' forward unix socks over ssh.", exc_info=e
)
else:
time.sleep(2)
vmm = VMM()
logger.debug("Executing: ssh forwarding command: %s", command)
vmm.execute_command(
self.src_uuid,
command="migrate",
arguments={"uri": "unix:{}".format(self.src_sock_path)},
)
while p.poll() is None:
success, output = vmm.execute_command(self.src_uuid, command="query-migrate")
if success:
status = output["return"]["status"]
logger.info('Migration Status: {}'.format(status))
if status == "completed":
vmm.stop(self.src_uuid)
return
elif status in ['failed', 'cancelled']:
return
else:
logger.error("Couldn't be able to query VM {} that was in migration".format(self.src_uuid))
return
time.sleep(2)
class VMM:
# Virtual Machine Manager
def __init__(
self,
qemu_path="/usr/bin/qemu-system-x86_64",
vmm_backend=os.path.expanduser("~/uncloud/vmm/"),
):
self.qemu_path = qemu_path
self.vmm_backend = vmm_backend
self.socket_dir = os.path.join(self.vmm_backend, "sock")
if not os.path.isdir(self.vmm_backend):
logger.info(
"{} does not exists. Creating it...".format(
self.vmm_backend
)
)
os.makedirs(self.vmm_backend, exist_ok=True)
if not os.path.isdir(self.socket_dir):
logger.info(
"{} does not exists. Creating it...".format(
self.socket_dir
)
)
os.makedirs(self.socket_dir, exist_ok=True)
def is_running(self, uuid):
sock_path = os.path.join(self.socket_dir, uuid)
try:
sock = socket.socket(socket.AF_UNIX)
sock.connect(sock_path)
recv = sock.recv(4096)
except Exception as err:
# unix sock doesn't exists or it is closed
logger.debug(
"VM {} sock either don' exists or it is closed. It mean VM is stopped.".format(
uuid
),
exc_info=err,
)
else:
# if we receive greetings from qmp it mean VM is running
if len(recv) > 0:
return True
with suppress(FileNotFoundError):
os.remove(sock_path)
return False
def start(self, *args, uuid, migration=False):
# start --> sucess?
migration_args = ()
if migration:
migration_args = (
"-incoming",
"unix:{}".format(os.path.join(self.socket_dir, uuid)),
)
if self.is_running(uuid):
logger.warning("Cannot start VM. It is already running.")
else:
qmp_arg = (
"-qmp",
"unix:{},server,nowait".format(
join_path(self.socket_dir, uuid)
),
)
vnc_arg = (
"-vnc",
"unix:{}".format(tempfile.NamedTemporaryFile().name),
)
command = [
"sudo",
"-p",
"Enter password to start VM {}: ".format(uuid),
self.qemu_path,
*args,
*qmp_arg,
*migration_args,
*vnc_arg,
"-daemonize",
]
try:
sp.check_output(command, stderr=sp.PIPE)
except sp.CalledProcessError as err:
logger.exception(
"Error occurred while starting VM.\nDetail %s",
err.stderr.decode("utf-8"),
)
else:
sp.check_output(
["sudo", "-p", "Enter password to correct permission for uncloud-vmm's directory",
"chmod", "-R", "o=rwx,g=rwx", self.vmm_backend]
)
# TODO: Find some good way to check whether the virtual machine is up and
# running without relying on non-guarenteed ways.
for _ in range(10):
time.sleep(2)
status = self.get_status(uuid)
if status in ["running", "inmigrate"]:
return status
logger.warning(
"Timeout on VM's status. Shutting down VM %s", uuid
)
self.stop(uuid)
# TODO: What should we do more. VM can still continue to run in background.
# If we have pid of vm we can kill it using OS.
def execute_command(self, uuid, command, **kwargs):
# execute_command -> sucess?, output
try:
with VMQMPHandles(os.path.join(self.socket_dir, uuid)) as (
sock_handle,
file_handle,
):
command_to_execute = {"execute": command, **kwargs}
sock_handle.sendall(
json.dumps(command_to_execute).encode("utf-8")
)
output = file_handle.readline()
except Exception:
logger.exception(
"Error occurred while executing command and getting valid output from qmp"
)
else:
try:
output = json.loads(output)
except Exception:
logger.exception(
"QMP Output isn't valid JSON. %s", output
)
else:
return "return" in output, output
return False, None
def stop(self, uuid):
success, output = self.execute_command(
command="quit", uuid=uuid
)
return success
def get_status(self, uuid):
success, output = self.execute_command(
command="query-status", uuid=uuid
)
if success:
return output["return"]["status"]
else:
# TODO: Think about this for a little more
return "STOPPED"
def discover(self):
vms = [
uuid
for uuid in os.listdir(self.socket_dir)
if not isdir(join_path(self.socket_dir, uuid))
]
return vms
def get_vnc(self, uuid):
success, output = self.execute_command(
uuid, command="query-vnc"
)
if success:
return output["return"]["service"]
return None
def transfer(self, src_uuid, destination_sock_path, host):
p = TransferVM(
src_uuid,
destination_sock_path,
socket_dir=self.socket_dir,
host=host,
)
p.start()
# TODO: the following method should clean things that went wrong
# e.g If VM migration fails or didn't start for long time
# i.e 15 minutes we should stop the waiting VM.
def maintenace(self):
pass