Shutdown Source VM (PAUSED) on successfull migration + blackened all .py files
This commit is contained in:
parent
29e938dc74
commit
9bdf4d2180
31 changed files with 1307 additions and 638 deletions
71
setup.py
71
setup.py
|
@ -7,41 +7,48 @@ with open("README.md", "r") as fh:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import ucloud.version
|
import ucloud.version
|
||||||
|
|
||||||
version = ucloud.version.VERSION
|
version = ucloud.version.VERSION
|
||||||
except:
|
except:
|
||||||
import subprocess
|
import subprocess
|
||||||
c = subprocess.check_output(['git', 'describe'])
|
|
||||||
|
c = subprocess.check_output(["git", "describe"])
|
||||||
version = c.decode("utf-8").strip()
|
version = c.decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
setup(name='ucloud',
|
setup(
|
||||||
version=version,
|
name="ucloud",
|
||||||
description='All ucloud server components.',
|
version=version,
|
||||||
url='https://code.ungleich.ch/ucloud/ucloud',
|
description="All ucloud server components.",
|
||||||
long_description=long_description,
|
url="https://code.ungleich.ch/ucloud/ucloud",
|
||||||
long_description_content_type='text/markdown',
|
long_description=long_description,
|
||||||
classifiers=[
|
long_description_content_type="text/markdown",
|
||||||
'Development Status :: 3 - Alpha',
|
classifiers=[
|
||||||
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
|
"Development Status :: 3 - Alpha",
|
||||||
'Programming Language :: Python :: 3'
|
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||||
],
|
"Programming Language :: Python :: 3",
|
||||||
author='ungleich',
|
],
|
||||||
author_email='technik@ungleich.ch',
|
author="ungleich",
|
||||||
packages=find_packages(),
|
author_email="technik@ungleich.ch",
|
||||||
install_requires=[
|
packages=find_packages(),
|
||||||
'requests',
|
install_requires=[
|
||||||
'Flask>=1.1.1',
|
"requests",
|
||||||
'flask-restful',
|
"Flask>=1.1.1",
|
||||||
'bitmath',
|
"flask-restful",
|
||||||
'pyotp',
|
"bitmath",
|
||||||
'sshtunnel',
|
"pyotp",
|
||||||
'sphinx',
|
"sshtunnel",
|
||||||
'pynetbox',
|
"sphinx",
|
||||||
'colorama',
|
"pynetbox",
|
||||||
'sphinx-rtd-theme',
|
"colorama",
|
||||||
'etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3',
|
"sphinx-rtd-theme",
|
||||||
'werkzeug', 'marshmallow'
|
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
|
||||||
],
|
"werkzeug",
|
||||||
scripts=['scripts/ucloud'],
|
"marshmallow",
|
||||||
data_files=[(os.path.expanduser('~/ucloud/'), ['conf/ucloud.conf'])],
|
],
|
||||||
zip_safe=False)
|
scripts=["scripts/ucloud"],
|
||||||
|
data_files=[
|
||||||
|
(os.path.expanduser("~/ucloud/"), ["conf/ucloud.conf"])
|
||||||
|
],
|
||||||
|
zip_safe=False,
|
||||||
|
)
|
||||||
|
|
|
@ -20,12 +20,16 @@ class Field:
|
||||||
|
|
||||||
def is_valid(self):
|
def is_valid(self):
|
||||||
if self.value == KeyError:
|
if self.value == KeyError:
|
||||||
self.add_error("'{}' field is a required field".format(self.name))
|
self.add_error(
|
||||||
|
"'{}' field is a required field".format(self.name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
if isinstance(self.value, Optional):
|
if isinstance(self.value, Optional):
|
||||||
pass
|
pass
|
||||||
elif not isinstance(self.value, self.type):
|
elif not isinstance(self.value, self.type):
|
||||||
self.add_error("Incorrect Type for '{}' field".format(self.name))
|
self.add_error(
|
||||||
|
"Incorrect Type for '{}' field".format(self.name)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.validation()
|
self.validation()
|
||||||
|
|
||||||
|
@ -49,6 +53,10 @@ class VmUUIDField(Field):
|
||||||
self.validation = self.vm_uuid_validation
|
self.validation = self.vm_uuid_validation
|
||||||
|
|
||||||
def vm_uuid_validation(self):
|
def vm_uuid_validation(self):
|
||||||
r = shared.etcd_client.get(os.path.join(settings['etcd']['vm_prefix'], self.uuid))
|
r = shared.etcd_client.get(
|
||||||
|
os.path.join(settings["etcd"]["vm_prefix"], self.uuid)
|
||||||
|
)
|
||||||
if not r:
|
if not r:
|
||||||
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
self.add_error(
|
||||||
|
"VM with uuid {} does not exists".format(self.uuid)
|
||||||
|
)
|
||||||
|
|
|
@ -14,4 +14,7 @@ data = {
|
||||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
"attributes": {"list": [], "key": [], "pool": "images"},
|
||||||
}
|
}
|
||||||
|
|
||||||
shared.etcd_client.put(os.path.join(settings['etcd']['image_store_prefix'], uuid4().hex), json.dumps(data))
|
shared.etcd_client.put(
|
||||||
|
os.path.join(settings["etcd"]["image_store_prefix"], uuid4().hex),
|
||||||
|
json.dumps(data),
|
||||||
|
)
|
||||||
|
|
|
@ -16,21 +16,23 @@ logger = logging.getLogger(__name__)
|
||||||
def check_otp(name, realm, token):
|
def check_otp(name, realm, token):
|
||||||
try:
|
try:
|
||||||
data = {
|
data = {
|
||||||
"auth_name": settings['otp']['auth_name'],
|
"auth_name": settings["otp"]["auth_name"],
|
||||||
"auth_token": TOTP(settings['otp']['auth_seed']).now(),
|
"auth_token": TOTP(settings["otp"]["auth_seed"]).now(),
|
||||||
"auth_realm": settings['otp']['auth_realm'],
|
"auth_realm": settings["otp"]["auth_realm"],
|
||||||
"name": name,
|
"name": name,
|
||||||
"realm": realm,
|
"realm": realm,
|
||||||
"token": token,
|
"token": token,
|
||||||
}
|
}
|
||||||
except binascii.Error as err:
|
except binascii.Error as err:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Cannot compute OTP for seed: {}".format(settings['otp']['auth_seed'])
|
"Cannot compute OTP for seed: {}".format(
|
||||||
|
settings["otp"]["auth_seed"]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return 400
|
return 400
|
||||||
|
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
settings['otp']['verification_controller_url'], json=data
|
settings["otp"]["verification_controller_url"], json=data
|
||||||
)
|
)
|
||||||
return response.status_code
|
return response.status_code
|
||||||
|
|
||||||
|
@ -43,7 +45,8 @@ def resolve_vm_name(name, owner):
|
||||||
"""
|
"""
|
||||||
result = next(
|
result = next(
|
||||||
filter(
|
filter(
|
||||||
lambda vm: vm.value["owner"] == owner and vm.value["name"] == name,
|
lambda vm: vm.value["owner"] == owner
|
||||||
|
and vm.value["name"] == name,
|
||||||
shared.vm_pool.vms,
|
shared.vm_pool.vms,
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
|
@ -80,18 +83,27 @@ def resolve_image_name(name, etcd_client):
|
||||||
"""
|
"""
|
||||||
store_name, image_name = store_name_and_image_name
|
store_name, image_name = store_name_and_image_name
|
||||||
except Exception:
|
except Exception:
|
||||||
raise ValueError("Image name not in correct format i.e {store_name}:{image_name}")
|
raise ValueError(
|
||||||
|
"Image name not in correct format i.e {store_name}:{image_name}"
|
||||||
|
)
|
||||||
|
|
||||||
images = etcd_client.get_prefix(settings['etcd']['image_prefix'], value_in_json=True)
|
images = etcd_client.get_prefix(
|
||||||
|
settings["etcd"]["image_prefix"], value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
# Try to find image with name == image_name and store_name == store_name
|
# Try to find image with name == image_name and store_name == store_name
|
||||||
try:
|
try:
|
||||||
image = next(filter(lambda im: im.value['name'] == image_name
|
image = next(
|
||||||
and im.value['store_name'] == store_name, images))
|
filter(
|
||||||
|
lambda im: im.value["name"] == image_name
|
||||||
|
and im.value["store_name"] == store_name,
|
||||||
|
images,
|
||||||
|
)
|
||||||
|
)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
raise KeyError("No image with name {} found.".format(name))
|
raise KeyError("No image with name {} found.".format(name))
|
||||||
else:
|
else:
|
||||||
image_uuid = image.key.split('/')[-1]
|
image_uuid = image.key.split("/")[-1]
|
||||||
|
|
||||||
return image_uuid
|
return image_uuid
|
||||||
|
|
||||||
|
@ -100,7 +112,9 @@ def random_bytes(num=6):
|
||||||
return [random.randrange(256) for _ in range(num)]
|
return [random.randrange(256) for _ in range(num)]
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
|
def generate_mac(
|
||||||
|
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||||
|
):
|
||||||
mac = random_bytes()
|
mac = random_bytes()
|
||||||
if oui:
|
if oui:
|
||||||
if type(oui) == str:
|
if type(oui) == str:
|
||||||
|
@ -131,7 +145,9 @@ def get_ip_addr(mac_address, device):
|
||||||
and is connected/neighbor of arg:device
|
and is connected/neighbor of arg:device
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
output = sp.check_output(['ip', '-6', 'neigh', 'show', 'dev', device], stderr=sp.PIPE)
|
output = sp.check_output(
|
||||||
|
["ip", "-6", "neigh", "show", "dev", device], stderr=sp.PIPE
|
||||||
|
)
|
||||||
except sp.CalledProcessError:
|
except sp.CalledProcessError:
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
|
@ -160,7 +176,7 @@ def mac2ipv6(mac, prefix):
|
||||||
# format output
|
# format output
|
||||||
ipv6_parts = [str(0)] * 4
|
ipv6_parts = [str(0)] * 4
|
||||||
for i in range(0, len(parts), 2):
|
for i in range(0, len(parts), 2):
|
||||||
ipv6_parts.append("".join(parts[i:i + 2]))
|
ipv6_parts.append("".join(parts[i : i + 2]))
|
||||||
|
|
||||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
||||||
prefix = ipaddress.IPv6Address(prefix)
|
prefix = ipaddress.IPv6Address(prefix)
|
||||||
|
|
|
@ -43,7 +43,7 @@ def handle_exception(e):
|
||||||
return e
|
return e
|
||||||
|
|
||||||
# now you're handling non-HTTP exceptions only
|
# now you're handling non-HTTP exceptions only
|
||||||
return {'message': 'Server Error'}, 500
|
return {"message": "Server Error"}, 500
|
||||||
|
|
||||||
|
|
||||||
class CreateVM(Resource):
|
class CreateVM(Resource):
|
||||||
|
@ -55,7 +55,7 @@ class CreateVM(Resource):
|
||||||
validator = schemas.CreateVMSchema(data)
|
validator = schemas.CreateVMSchema(data)
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
vm_uuid = uuid4().hex
|
vm_uuid = uuid4().hex
|
||||||
vm_key = join_path(settings['etcd']['vm_prefix'], vm_uuid)
|
vm_key = join_path(settings["etcd"]["vm_prefix"], vm_uuid)
|
||||||
specs = {
|
specs = {
|
||||||
"cpu": validator.specs["cpu"],
|
"cpu": validator.specs["cpu"],
|
||||||
"ram": validator.specs["ram"],
|
"ram": validator.specs["ram"],
|
||||||
|
@ -63,8 +63,12 @@ class CreateVM(Resource):
|
||||||
"hdd": validator.specs["hdd"],
|
"hdd": validator.specs["hdd"],
|
||||||
}
|
}
|
||||||
macs = [generate_mac() for _ in range(len(data["network"]))]
|
macs = [generate_mac() for _ in range(len(data["network"]))]
|
||||||
tap_ids = [counters.increment_etcd_counter(shared.etcd_client, "/v1/counter/tap")
|
tap_ids = [
|
||||||
for _ in range(len(data["network"]))]
|
counters.increment_etcd_counter(
|
||||||
|
shared.etcd_client, "/v1/counter/tap"
|
||||||
|
)
|
||||||
|
for _ in range(len(data["network"]))
|
||||||
|
]
|
||||||
vm_entry = {
|
vm_entry = {
|
||||||
"name": data["vm_name"],
|
"name": data["vm_name"],
|
||||||
"owner": data["name"],
|
"owner": data["name"],
|
||||||
|
@ -77,14 +81,15 @@ class CreateVM(Resource):
|
||||||
"vnc_socket": "",
|
"vnc_socket": "",
|
||||||
"network": list(zip(data["network"], macs, tap_ids)),
|
"network": list(zip(data["network"], macs, tap_ids)),
|
||||||
"metadata": {"ssh-keys": []},
|
"metadata": {"ssh-keys": []},
|
||||||
"in_migration": False
|
"in_migration": False,
|
||||||
}
|
}
|
||||||
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||||
|
|
||||||
# Create ScheduleVM Request
|
# Create ScheduleVM Request
|
||||||
r = RequestEntry.from_scratch(
|
r = RequestEntry.from_scratch(
|
||||||
type=RequestType.ScheduleVM, uuid=vm_uuid,
|
type=RequestType.ScheduleVM,
|
||||||
request_prefix=settings['etcd']['request_prefix']
|
uuid=vm_uuid,
|
||||||
|
request_prefix=settings["etcd"]["request_prefix"],
|
||||||
)
|
)
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
|
|
||||||
|
@ -99,7 +104,7 @@ class VmStatus(Resource):
|
||||||
validator = schemas.VMStatusSchema(data)
|
validator = schemas.VMStatusSchema(data)
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
vm = shared.vm_pool.get(
|
vm = shared.vm_pool.get(
|
||||||
join_path(settings['etcd']['vm_prefix'], data["uuid"])
|
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
|
||||||
)
|
)
|
||||||
vm_value = vm.value.copy()
|
vm_value = vm.value.copy()
|
||||||
vm_value["ip"] = []
|
vm_value["ip"] = []
|
||||||
|
@ -107,13 +112,15 @@ class VmStatus(Resource):
|
||||||
network_name, mac, tap = network_mac_and_tap
|
network_name, mac, tap = network_mac_and_tap
|
||||||
network = shared.etcd_client.get(
|
network = shared.etcd_client.get(
|
||||||
join_path(
|
join_path(
|
||||||
settings['etcd']['network_prefix'],
|
settings["etcd"]["network_prefix"],
|
||||||
data["name"],
|
data["name"],
|
||||||
network_name,
|
network_name,
|
||||||
),
|
),
|
||||||
value_in_json=True,
|
value_in_json=True,
|
||||||
)
|
)
|
||||||
ipv6_addr = network.value.get("ipv6").split("::")[0] + "::"
|
ipv6_addr = (
|
||||||
|
network.value.get("ipv6").split("::")[0] + "::"
|
||||||
|
)
|
||||||
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
|
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
|
||||||
vm.value = vm_value
|
vm.value = vm_value
|
||||||
return vm.value
|
return vm.value
|
||||||
|
@ -128,7 +135,7 @@ class CreateImage(Resource):
|
||||||
validator = schemas.CreateImageSchema(data)
|
validator = schemas.CreateImageSchema(data)
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
file_entry = shared.etcd_client.get(
|
file_entry = shared.etcd_client.get(
|
||||||
join_path(settings['etcd']['file_prefix'], data["uuid"])
|
join_path(settings["etcd"]["file_prefix"], data["uuid"])
|
||||||
)
|
)
|
||||||
file_entry_value = json.loads(file_entry.value)
|
file_entry_value = json.loads(file_entry.value)
|
||||||
|
|
||||||
|
@ -141,7 +148,9 @@ class CreateImage(Resource):
|
||||||
"visibility": "public",
|
"visibility": "public",
|
||||||
}
|
}
|
||||||
shared.etcd_client.put(
|
shared.etcd_client.put(
|
||||||
join_path(settings['etcd']['image_prefix'], data["uuid"]),
|
join_path(
|
||||||
|
settings["etcd"]["image_prefix"], data["uuid"]
|
||||||
|
),
|
||||||
json.dumps(image_entry_json),
|
json.dumps(image_entry_json),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -153,11 +162,9 @@ class ListPublicImages(Resource):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get():
|
def get():
|
||||||
images = shared.etcd_client.get_prefix(
|
images = shared.etcd_client.get_prefix(
|
||||||
settings['etcd']['image_prefix'], value_in_json=True
|
settings["etcd"]["image_prefix"], value_in_json=True
|
||||||
)
|
)
|
||||||
r = {
|
r = {"images": []}
|
||||||
"images": []
|
|
||||||
}
|
|
||||||
for image in images:
|
for image in images:
|
||||||
image_key = "{}:{}".format(
|
image_key = "{}:{}".format(
|
||||||
image.value["store_name"], image.value["name"]
|
image.value["store_name"], image.value["name"]
|
||||||
|
@ -176,7 +183,7 @@ class VMAction(Resource):
|
||||||
|
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
vm_entry = shared.vm_pool.get(
|
vm_entry = shared.vm_pool.get(
|
||||||
join_path(settings['etcd']['vm_prefix'], data["uuid"])
|
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
|
||||||
)
|
)
|
||||||
action = data["action"]
|
action = data["action"]
|
||||||
|
|
||||||
|
@ -184,13 +191,19 @@ class VMAction(Resource):
|
||||||
action = "schedule"
|
action = "schedule"
|
||||||
|
|
||||||
if action == "delete" and vm_entry.hostname == "":
|
if action == "delete" and vm_entry.hostname == "":
|
||||||
if shared.storage_handler.is_vm_image_exists(vm_entry.uuid):
|
if shared.storage_handler.is_vm_image_exists(
|
||||||
r_status = shared.storage_handler.delete_vm_image(vm_entry.uuid)
|
vm_entry.uuid
|
||||||
|
):
|
||||||
|
r_status = shared.storage_handler.delete_vm_image(
|
||||||
|
vm_entry.uuid
|
||||||
|
)
|
||||||
if r_status:
|
if r_status:
|
||||||
shared.etcd_client.client.delete(vm_entry.key)
|
shared.etcd_client.client.delete(vm_entry.key)
|
||||||
return {"message": "VM successfully deleted"}
|
return {"message": "VM successfully deleted"}
|
||||||
else:
|
else:
|
||||||
logger.error("Some Error Occurred while deleting VM")
|
logger.error(
|
||||||
|
"Some Error Occurred while deleting VM"
|
||||||
|
)
|
||||||
return {"message": "VM deletion unsuccessfull"}
|
return {"message": "VM deletion unsuccessfull"}
|
||||||
else:
|
else:
|
||||||
shared.etcd_client.client.delete(vm_entry.key)
|
shared.etcd_client.client.delete(vm_entry.key)
|
||||||
|
@ -200,10 +213,13 @@ class VMAction(Resource):
|
||||||
type="{}VM".format(action.title()),
|
type="{}VM".format(action.title()),
|
||||||
uuid=data["uuid"],
|
uuid=data["uuid"],
|
||||||
hostname=vm_entry.hostname,
|
hostname=vm_entry.hostname,
|
||||||
request_prefix=settings['etcd']['request_prefix']
|
request_prefix=settings["etcd"]["request_prefix"],
|
||||||
)
|
)
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
return {"message": "VM {} Queued".format(action.title())}, 200
|
return (
|
||||||
|
{"message": "VM {} Queued".format(action.title())},
|
||||||
|
200,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return validator.get_errors(), 400
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
@ -216,15 +232,21 @@ class VMMigration(Resource):
|
||||||
|
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
vm = shared.vm_pool.get(data["uuid"])
|
vm = shared.vm_pool.get(data["uuid"])
|
||||||
r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
|
r = RequestEntry.from_scratch(
|
||||||
uuid=vm.uuid,
|
type=RequestType.InitVMMigration,
|
||||||
hostname=join_path(
|
uuid=vm.uuid,
|
||||||
settings['etcd']['host_prefix'], validator.destination.value
|
hostname=join_path(
|
||||||
),
|
settings["etcd"]["host_prefix"],
|
||||||
request_prefix=settings['etcd']['request_prefix'])
|
validator.destination.value,
|
||||||
|
),
|
||||||
|
request_prefix=settings["etcd"]["request_prefix"],
|
||||||
|
)
|
||||||
|
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
return {"message": "VM Migration Initialization Queued"}, 200
|
return (
|
||||||
|
{"message": "VM Migration Initialization Queued"},
|
||||||
|
200,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return validator.get_errors(), 400
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
@ -237,10 +259,12 @@ class ListUserVM(Resource):
|
||||||
|
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
vms = shared.etcd_client.get_prefix(
|
vms = shared.etcd_client.get_prefix(
|
||||||
settings['etcd']['vm_prefix'], value_in_json=True
|
settings["etcd"]["vm_prefix"], value_in_json=True
|
||||||
)
|
)
|
||||||
return_vms = []
|
return_vms = []
|
||||||
user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
|
user_vms = filter(
|
||||||
|
lambda v: v.value["owner"] == data["name"], vms
|
||||||
|
)
|
||||||
for vm in user_vms:
|
for vm in user_vms:
|
||||||
return_vms.append(
|
return_vms.append(
|
||||||
{
|
{
|
||||||
|
@ -249,9 +273,7 @@ class ListUserVM(Resource):
|
||||||
"specs": vm.value["specs"],
|
"specs": vm.value["specs"],
|
||||||
"status": vm.value["status"],
|
"status": vm.value["status"],
|
||||||
"hostname": vm.value["hostname"],
|
"hostname": vm.value["hostname"],
|
||||||
"vnc_socket": None
|
"vnc_socket": vm.value.get("vnc_socket", None),
|
||||||
if vm.value.get("vnc_socket", None) is None
|
|
||||||
else vm.value["vnc_socket"],
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if return_vms:
|
if return_vms:
|
||||||
|
@ -270,11 +292,13 @@ class ListUserFiles(Resource):
|
||||||
|
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
files = shared.etcd_client.get_prefix(
|
files = shared.etcd_client.get_prefix(
|
||||||
settings['etcd']['file_prefix'], value_in_json=True
|
settings["etcd"]["file_prefix"], value_in_json=True
|
||||||
)
|
)
|
||||||
return_files = []
|
return_files = []
|
||||||
user_files = list(
|
user_files = list(
|
||||||
filter(lambda f: f.value["owner"] == data["name"], files)
|
filter(
|
||||||
|
lambda f: f.value["owner"] == data["name"], files
|
||||||
|
)
|
||||||
)
|
)
|
||||||
for file in user_files:
|
for file in user_files:
|
||||||
return_files.append(
|
return_files.append(
|
||||||
|
@ -294,14 +318,18 @@ class CreateHost(Resource):
|
||||||
data = request.json
|
data = request.json
|
||||||
validator = schemas.CreateHostSchema(data)
|
validator = schemas.CreateHostSchema(data)
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
host_key = join_path(settings['etcd']['host_prefix'], uuid4().hex)
|
host_key = join_path(
|
||||||
|
settings["etcd"]["host_prefix"], uuid4().hex
|
||||||
|
)
|
||||||
host_entry = {
|
host_entry = {
|
||||||
"specs": data["specs"],
|
"specs": data["specs"],
|
||||||
"hostname": data["hostname"],
|
"hostname": data["hostname"],
|
||||||
"status": "DEAD",
|
"status": "DEAD",
|
||||||
"last_heartbeat": "",
|
"last_heartbeat": "",
|
||||||
}
|
}
|
||||||
shared.etcd_client.put(host_key, host_entry, value_in_json=True)
|
shared.etcd_client.put(
|
||||||
|
host_key, host_entry, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
return {"message": "Host Created"}, 200
|
return {"message": "Host Created"}, 200
|
||||||
|
|
||||||
|
@ -333,7 +361,7 @@ class GetSSHKeys(Resource):
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/
|
# {user_prefix}/{realm}/{name}/key/
|
||||||
etcd_key = join_path(
|
etcd_key = join_path(
|
||||||
settings['etcd']['user_prefix'],
|
settings["etcd"]["user_prefix"],
|
||||||
data["realm"],
|
data["realm"],
|
||||||
data["name"],
|
data["name"],
|
||||||
"key",
|
"key",
|
||||||
|
@ -343,25 +371,30 @@ class GetSSHKeys(Resource):
|
||||||
)
|
)
|
||||||
|
|
||||||
keys = {
|
keys = {
|
||||||
key.key.split("/")[-1]: key.value for key in etcd_entry
|
key.key.split("/")[-1]: key.value
|
||||||
|
for key in etcd_entry
|
||||||
}
|
}
|
||||||
return {"keys": keys}
|
return {"keys": keys}
|
||||||
else:
|
else:
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
etcd_key = join_path(
|
etcd_key = join_path(
|
||||||
settings['etcd']['user_prefix'],
|
settings["etcd"]["user_prefix"],
|
||||||
data["realm"],
|
data["realm"],
|
||||||
data["name"],
|
data["name"],
|
||||||
"key",
|
"key",
|
||||||
data["key_name"],
|
data["key_name"],
|
||||||
)
|
)
|
||||||
etcd_entry = shared.etcd_client.get(etcd_key, value_in_json=True)
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
if etcd_entry:
|
if etcd_entry:
|
||||||
return {
|
return {
|
||||||
"keys": {
|
"keys": {
|
||||||
etcd_entry.key.split("/")[-1]: etcd_entry.value
|
etcd_entry.key.split("/")[
|
||||||
|
-1
|
||||||
|
]: etcd_entry.value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
|
@ -379,13 +412,15 @@ class AddSSHKey(Resource):
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
etcd_key = join_path(
|
etcd_key = join_path(
|
||||||
settings['etcd']['user_prefix'],
|
settings["etcd"]["user_prefix"],
|
||||||
data["realm"],
|
data["realm"],
|
||||||
data["name"],
|
data["name"],
|
||||||
"key",
|
"key",
|
||||||
data["key_name"],
|
data["key_name"],
|
||||||
)
|
)
|
||||||
etcd_entry = shared.etcd_client.get(etcd_key, value_in_json=True)
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
if etcd_entry:
|
if etcd_entry:
|
||||||
return {
|
return {
|
||||||
"message": "Key with name '{}' already exists".format(
|
"message": "Key with name '{}' already exists".format(
|
||||||
|
@ -394,7 +429,9 @@ class AddSSHKey(Resource):
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
# Key Not Found. It implies user' haven't added any key yet.
|
# Key Not Found. It implies user' haven't added any key yet.
|
||||||
shared.etcd_client.put(etcd_key, data["key"], value_in_json=True)
|
shared.etcd_client.put(
|
||||||
|
etcd_key, data["key"], value_in_json=True
|
||||||
|
)
|
||||||
return {"message": "Key added successfully"}
|
return {"message": "Key added successfully"}
|
||||||
else:
|
else:
|
||||||
return validator.get_errors(), 400
|
return validator.get_errors(), 400
|
||||||
|
@ -409,13 +446,15 @@ class RemoveSSHKey(Resource):
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
etcd_key = join_path(
|
etcd_key = join_path(
|
||||||
settings['etcd']['user_prefix'],
|
settings["etcd"]["user_prefix"],
|
||||||
data["realm"],
|
data["realm"],
|
||||||
data["name"],
|
data["name"],
|
||||||
"key",
|
"key",
|
||||||
data["key_name"],
|
data["key_name"],
|
||||||
)
|
)
|
||||||
etcd_entry = shared.etcd_client.get(etcd_key, value_in_json=True)
|
etcd_entry = shared.etcd_client.get(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
if etcd_entry:
|
if etcd_entry:
|
||||||
shared.etcd_client.client.delete(etcd_key)
|
shared.etcd_client.client.delete(etcd_key)
|
||||||
return {"message": "Key successfully removed."}
|
return {"message": "Key successfully removed."}
|
||||||
|
@ -446,15 +485,17 @@ class CreateNetwork(Resource):
|
||||||
if validator.user.value:
|
if validator.user.value:
|
||||||
try:
|
try:
|
||||||
nb = pynetbox.api(
|
nb = pynetbox.api(
|
||||||
url=settings['netbox']['url'],
|
url=settings["netbox"]["url"],
|
||||||
token=settings['netbox']['token'],
|
token=settings["netbox"]["token"],
|
||||||
)
|
)
|
||||||
nb_prefix = nb.ipam.prefixes.get(
|
nb_prefix = nb.ipam.prefixes.get(
|
||||||
prefix=settings['network']['prefix']
|
prefix=settings["network"]["prefix"]
|
||||||
)
|
)
|
||||||
prefix = nb_prefix.available_prefixes.create(
|
prefix = nb_prefix.available_prefixes.create(
|
||||||
data={
|
data={
|
||||||
"prefix_length": int(settings['network']['prefix_length']),
|
"prefix_length": int(
|
||||||
|
settings["network"]["prefix_length"]
|
||||||
|
),
|
||||||
"description": '{}\'s network "{}"'.format(
|
"description": '{}\'s network "{}"'.format(
|
||||||
data["name"], data["network_name"]
|
data["name"], data["network_name"]
|
||||||
),
|
),
|
||||||
|
@ -463,18 +504,22 @@ class CreateNetwork(Resource):
|
||||||
)
|
)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
app.logger.error(err)
|
app.logger.error(err)
|
||||||
return {"message": "Error occured while creating network."}
|
return {
|
||||||
|
"message": "Error occured while creating network."
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
network_entry["ipv6"] = prefix["prefix"]
|
network_entry["ipv6"] = prefix["prefix"]
|
||||||
else:
|
else:
|
||||||
network_entry["ipv6"] = "fd00::/64"
|
network_entry["ipv6"] = "fd00::/64"
|
||||||
|
|
||||||
network_key = join_path(
|
network_key = join_path(
|
||||||
settings['etcd']['network_prefix'],
|
settings["etcd"]["network_prefix"],
|
||||||
data['name'],
|
data["name"],
|
||||||
data['network_name'],
|
data["network_name"],
|
||||||
|
)
|
||||||
|
shared.etcd_client.put(
|
||||||
|
network_key, network_entry, value_in_json=True
|
||||||
)
|
)
|
||||||
shared.etcd_client.put(network_key, network_entry, value_in_json=True)
|
|
||||||
return {"message": "Network successfully added."}
|
return {"message": "Network successfully added."}
|
||||||
else:
|
else:
|
||||||
return validator.get_errors(), 400
|
return validator.get_errors(), 400
|
||||||
|
@ -488,9 +533,11 @@ class ListUserNetwork(Resource):
|
||||||
|
|
||||||
if validator.is_valid():
|
if validator.is_valid():
|
||||||
prefix = join_path(
|
prefix = join_path(
|
||||||
settings['etcd']['network_prefix'], data["name"]
|
settings["etcd"]["network_prefix"], data["name"]
|
||||||
|
)
|
||||||
|
networks = shared.etcd_client.get_prefix(
|
||||||
|
prefix, value_in_json=True
|
||||||
)
|
)
|
||||||
networks = shared.etcd_client.get_prefix(prefix, value_in_json=True)
|
|
||||||
user_networks = []
|
user_networks = []
|
||||||
for net in networks:
|
for net in networks:
|
||||||
net.value["name"] = net.key.split("/")[-1]
|
net.value["name"] = net.key.split("/")[-1]
|
||||||
|
@ -524,7 +571,11 @@ api.add_resource(CreateNetwork, "/network/create")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
image_stores = list(shared.etcd_client.get_prefix(settings['etcd']['image_store_prefix'], value_in_json=True))
|
image_stores = list(
|
||||||
|
shared.etcd_client.get_prefix(
|
||||||
|
settings["etcd"]["image_store_prefix"], value_in_json=True
|
||||||
|
)
|
||||||
|
)
|
||||||
if len(image_stores) == 0:
|
if len(image_stores) == 0:
|
||||||
data = {
|
data = {
|
||||||
"is_public": True,
|
"is_public": True,
|
||||||
|
@ -534,7 +585,12 @@ def main():
|
||||||
"attributes": {"list": [], "key": [], "pool": "images"},
|
"attributes": {"list": [], "key": [], "pool": "images"},
|
||||||
}
|
}
|
||||||
|
|
||||||
shared.etcd_client.put(join_path(settings['etcd']['image_store_prefix'], uuid4().hex), json.dumps(data))
|
shared.etcd_client.put(
|
||||||
|
join_path(
|
||||||
|
settings["etcd"]["image_store_prefix"], uuid4().hex
|
||||||
|
),
|
||||||
|
json.dumps(data),
|
||||||
|
)
|
||||||
|
|
||||||
app.run(host="::", debug=True)
|
app.run(host="::", debug=True)
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,12 @@ class OTPSchema(BaseSchema):
|
||||||
super().__init__(data=data, fields=_fields)
|
super().__init__(data=data, fields=_fields)
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
if check_otp(self.name.value, self.realm.value, self.token.value) != 200:
|
if (
|
||||||
|
check_otp(
|
||||||
|
self.name.value, self.realm.value, self.token.value
|
||||||
|
)
|
||||||
|
!= 200
|
||||||
|
):
|
||||||
self.add_error("Wrong Credentials")
|
self.add_error("Wrong Credentials")
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,7 +97,9 @@ class CreateImageSchema(BaseSchema):
|
||||||
# Fields
|
# Fields
|
||||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||||
self.name = Field("name", str, data.get("name", KeyError))
|
self.name = Field("name", str, data.get("name", KeyError))
|
||||||
self.image_store = Field("image_store", str, data.get("image_store", KeyError))
|
self.image_store = Field(
|
||||||
|
"image_store", str, data.get("image_store", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validations
|
# Validations
|
||||||
self.uuid.validation = self.file_uuid_validation
|
self.uuid.validation = self.file_uuid_validation
|
||||||
|
@ -103,34 +110,52 @@ class CreateImageSchema(BaseSchema):
|
||||||
super().__init__(data, fields)
|
super().__init__(data, fields)
|
||||||
|
|
||||||
def file_uuid_validation(self):
|
def file_uuid_validation(self):
|
||||||
file_entry = shared.etcd_client.get(os.path.join(settings['etcd']['file_prefix'], self.uuid.value))
|
file_entry = shared.etcd_client.get(
|
||||||
|
os.path.join(
|
||||||
|
settings["etcd"]["file_prefix"], self.uuid.value
|
||||||
|
)
|
||||||
|
)
|
||||||
if file_entry is None:
|
if file_entry is None:
|
||||||
self.add_error(
|
self.add_error(
|
||||||
"Image File with uuid '{}' Not Found".format(self.uuid.value)
|
"Image File with uuid '{}' Not Found".format(
|
||||||
|
self.uuid.value
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def image_store_name_validation(self):
|
def image_store_name_validation(self):
|
||||||
image_stores = list(shared.etcd_client.get_prefix(settings['etcd']['image_store_prefix']))
|
image_stores = list(
|
||||||
|
shared.etcd_client.get_prefix(
|
||||||
|
settings["etcd"]["image_store_prefix"]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
image_store = next(
|
image_store = next(
|
||||||
filter(
|
filter(
|
||||||
lambda s: json.loads(s.value)["name"] == self.image_store.value,
|
lambda s: json.loads(s.value)["name"]
|
||||||
|
== self.image_store.value,
|
||||||
image_stores,
|
image_stores,
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
if not image_store:
|
if not image_store:
|
||||||
self.add_error("Store '{}' does not exists".format(self.image_store.value))
|
self.add_error(
|
||||||
|
"Store '{}' does not exists".format(
|
||||||
|
self.image_store.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Host Operations
|
# Host Operations
|
||||||
|
|
||||||
|
|
||||||
class CreateHostSchema(OTPSchema):
|
class CreateHostSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.parsed_specs = {}
|
self.parsed_specs = {}
|
||||||
# Fields
|
# Fields
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
self.hostname = Field("hostname", str, data.get("hostname", KeyError))
|
self.hostname = Field(
|
||||||
|
"hostname", str, data.get("hostname", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
self.specs.validation = self.specs_validation
|
self.specs.validation = self.specs_validation
|
||||||
|
@ -142,22 +167,28 @@ class CreateHostSchema(OTPSchema):
|
||||||
def specs_validation(self):
|
def specs_validation(self):
|
||||||
ALLOWED_BASE = 10
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
_cpu = self.specs.value.get('cpu', KeyError)
|
_cpu = self.specs.value.get("cpu", KeyError)
|
||||||
_ram = self.specs.value.get('ram', KeyError)
|
_ram = self.specs.value.get("ram", KeyError)
|
||||||
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||||
_hdd = self.specs.value.get('hdd', KeyError)
|
_hdd = self.specs.value.get("hdd", KeyError)
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
self.add_error(
|
||||||
|
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified RAM is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified RAM is not in correct units"
|
||||||
|
)
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified OS-SSD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified OS-SSD is not in correct units"
|
||||||
|
)
|
||||||
|
|
||||||
if _cpu < 1:
|
if _cpu < 1:
|
||||||
self.add_error("CPU must be atleast 1")
|
self.add_error("CPU must be atleast 1")
|
||||||
|
@ -172,7 +203,9 @@ class CreateHostSchema(OTPSchema):
|
||||||
for hdd in _hdd:
|
for hdd in _hdd:
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified HDD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified HDD is not in correct units"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
@ -183,15 +216,17 @@ class CreateHostSchema(OTPSchema):
|
||||||
else:
|
else:
|
||||||
if self.get_errors():
|
if self.get_errors():
|
||||||
self.specs = {
|
self.specs = {
|
||||||
'cpu': _cpu,
|
"cpu": _cpu,
|
||||||
'ram': str(parsed_ram),
|
"ram": str(parsed_ram),
|
||||||
'os-ssd': str(parsed_os_ssd),
|
"os-ssd": str(parsed_os_ssd),
|
||||||
'hdd': parsed_hdd
|
"hdd": parsed_hdd,
|
||||||
}
|
}
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
if self.realm.value != "ungleich-admin":
|
if self.realm.value != "ungleich-admin":
|
||||||
self.add_error("Invalid Credentials/Insufficient Permission")
|
self.add_error(
|
||||||
|
"Invalid Credentials/Insufficient Permission"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# VM Operations
|
# VM Operations
|
||||||
|
@ -203,9 +238,13 @@ class CreateVMSchema(OTPSchema):
|
||||||
|
|
||||||
# Fields
|
# Fields
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
self.vm_name = Field("vm_name", str, data.get("vm_name", KeyError))
|
self.vm_name = Field(
|
||||||
|
"vm_name", str, data.get("vm_name", KeyError)
|
||||||
|
)
|
||||||
self.image = Field("image", str, data.get("image", KeyError))
|
self.image = Field("image", str, data.get("image", KeyError))
|
||||||
self.network = Field("network", list, data.get("network", KeyError))
|
self.network = Field(
|
||||||
|
"network", list, data.get("network", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
# Validation
|
# Validation
|
||||||
self.image.validation = self.image_validation
|
self.image.validation = self.image_validation
|
||||||
|
@ -219,17 +258,25 @@ class CreateVMSchema(OTPSchema):
|
||||||
|
|
||||||
def image_validation(self):
|
def image_validation(self):
|
||||||
try:
|
try:
|
||||||
image_uuid = helper.resolve_image_name(self.image.value, shared.etcd_client)
|
image_uuid = helper.resolve_image_name(
|
||||||
|
self.image.value, shared.etcd_client
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Cannot resolve image name = %s", self.image.value)
|
logger.exception(
|
||||||
|
"Cannot resolve image name = %s", self.image.value
|
||||||
|
)
|
||||||
self.add_error(str(e))
|
self.add_error(str(e))
|
||||||
else:
|
else:
|
||||||
self.image_uuid = image_uuid
|
self.image_uuid = image_uuid
|
||||||
|
|
||||||
def vm_name_validation(self):
|
def vm_name_validation(self):
|
||||||
if resolve_vm_name(name=self.vm_name.value, owner=self.name.value):
|
if resolve_vm_name(
|
||||||
|
name=self.vm_name.value, owner=self.name.value
|
||||||
|
):
|
||||||
self.add_error(
|
self.add_error(
|
||||||
'VM with same name "{}" already exists'.format(self.vm_name.value)
|
'VM with same name "{}" already exists'.format(
|
||||||
|
self.vm_name.value
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def network_validation(self):
|
def network_validation(self):
|
||||||
|
@ -237,32 +284,46 @@ class CreateVMSchema(OTPSchema):
|
||||||
|
|
||||||
if _network:
|
if _network:
|
||||||
for net in _network:
|
for net in _network:
|
||||||
network = shared.etcd_client.get(os.path.join(settings['etcd']['network_prefix'],
|
network = shared.etcd_client.get(
|
||||||
self.name.value,
|
os.path.join(
|
||||||
net), value_in_json=True)
|
settings["etcd"]["network_prefix"],
|
||||||
|
self.name.value,
|
||||||
|
net,
|
||||||
|
),
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
if not network:
|
if not network:
|
||||||
self.add_error("Network with name {} does not exists" \
|
self.add_error(
|
||||||
.format(net))
|
"Network with name {} does not exists".format(
|
||||||
|
net
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def specs_validation(self):
|
def specs_validation(self):
|
||||||
ALLOWED_BASE = 10
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
_cpu = self.specs.value.get('cpu', KeyError)
|
_cpu = self.specs.value.get("cpu", KeyError)
|
||||||
_ram = self.specs.value.get('ram', KeyError)
|
_ram = self.specs.value.get("ram", KeyError)
|
||||||
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||||
_hdd = self.specs.value.get('hdd', KeyError)
|
_hdd = self.specs.value.get("hdd", KeyError)
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
self.add_error(
|
||||||
|
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||||
|
)
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified RAM is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified RAM is not in correct units"
|
||||||
|
)
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified OS-SSD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified OS-SSD is not in correct units"
|
||||||
|
)
|
||||||
|
|
||||||
if _cpu < 1:
|
if _cpu < 1:
|
||||||
self.add_error("CPU must be atleast 1")
|
self.add_error("CPU must be atleast 1")
|
||||||
|
@ -277,7 +338,9 @@ class CreateVMSchema(OTPSchema):
|
||||||
for hdd in _hdd:
|
for hdd in _hdd:
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
self.add_error("Your specified HDD is not in correct units")
|
self.add_error(
|
||||||
|
"Your specified HDD is not in correct units"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
@ -288,21 +351,24 @@ class CreateVMSchema(OTPSchema):
|
||||||
else:
|
else:
|
||||||
if self.get_errors():
|
if self.get_errors():
|
||||||
self.specs = {
|
self.specs = {
|
||||||
'cpu': _cpu,
|
"cpu": _cpu,
|
||||||
'ram': str(parsed_ram),
|
"ram": str(parsed_ram),
|
||||||
'os-ssd': str(parsed_os_ssd),
|
"os-ssd": str(parsed_os_ssd),
|
||||||
'hdd': parsed_hdd
|
"hdd": parsed_hdd,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class VMStatusSchema(OTPSchema):
|
class VMStatusSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
|
|
||||||
|
@ -313,7 +379,8 @@ class VMStatusSchema(OTPSchema):
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
|
@ -321,11 +388,14 @@ class VMStatusSchema(OTPSchema):
|
||||||
class VmActionSchema(OTPSchema):
|
class VmActionSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
self.action = Field("action", str, data.get("action", KeyError))
|
self.action = Field("action", str, data.get("action", KeyError))
|
||||||
|
@ -340,20 +410,23 @@ class VmActionSchema(OTPSchema):
|
||||||
allowed_actions = ["start", "stop", "delete"]
|
allowed_actions = ["start", "stop", "delete"]
|
||||||
if self.action.value not in allowed_actions:
|
if self.action.value not in allowed_actions:
|
||||||
self.add_error(
|
self.add_error(
|
||||||
"Invalid Action. Allowed Actions are {}".format(allowed_actions)
|
"Invalid Action. Allowed Actions are {}".format(
|
||||||
|
allowed_actions
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
if (
|
if (
|
||||||
self.action.value == "start"
|
self.action.value == "start"
|
||||||
and vm.status == VMStatus.running
|
and vm.status == VMStatus.running
|
||||||
and vm.hostname != ""
|
and vm.hostname != ""
|
||||||
):
|
):
|
||||||
self.add_error("VM Already Running")
|
self.add_error("VM Already Running")
|
||||||
|
|
||||||
|
@ -367,15 +440,20 @@ class VmActionSchema(OTPSchema):
|
||||||
class VmMigrationSchema(OTPSchema):
|
class VmMigrationSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
data["uuid"] = (
|
data["uuid"] = (
|
||||||
resolve_vm_name(
|
resolve_vm_name(
|
||||||
name=data.get("vm_name", None),
|
name=data.get("vm_name", None),
|
||||||
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
owner=(
|
||||||
)
|
data.get("in_support_of", None)
|
||||||
or KeyError
|
or data.get("name", None)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
)
|
)
|
||||||
|
|
||||||
self.uuid = VmUUIDField(data)
|
self.uuid = VmUUIDField(data)
|
||||||
self.destination = Field("destination", str, data.get("destination", KeyError))
|
self.destination = Field(
|
||||||
|
"destination", str, data.get("destination", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
self.destination.validation = self.destination_validation
|
self.destination.validation = self.destination_validation
|
||||||
|
|
||||||
|
@ -384,9 +462,18 @@ class VmMigrationSchema(OTPSchema):
|
||||||
|
|
||||||
def destination_validation(self):
|
def destination_validation(self):
|
||||||
hostname = self.destination.value
|
hostname = self.destination.value
|
||||||
host = next(filter(lambda h: h.hostname == hostname, shared.host_pool.hosts), None)
|
host = next(
|
||||||
|
filter(
|
||||||
|
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
if not host:
|
if not host:
|
||||||
self.add_error("No Such Host ({}) exists".format(self.destination.value))
|
self.add_error(
|
||||||
|
"No Such Host ({}) exists".format(
|
||||||
|
self.destination.value
|
||||||
|
)
|
||||||
|
)
|
||||||
elif host.status != HostStatus.alive:
|
elif host.status != HostStatus.alive:
|
||||||
self.add_error("Destination Host is dead")
|
self.add_error("Destination Host is dead")
|
||||||
else:
|
else:
|
||||||
|
@ -395,20 +482,27 @@ class VmMigrationSchema(OTPSchema):
|
||||||
def validation(self):
|
def validation(self):
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
vm = shared.vm_pool.get(self.uuid.value)
|
||||||
if not (
|
if not (
|
||||||
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
vm.value["owner"] == self.name.value
|
||||||
|
or self.realm.value == "ungleich-admin"
|
||||||
):
|
):
|
||||||
self.add_error("Invalid User")
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
if vm.status != VMStatus.running:
|
if vm.status != VMStatus.running:
|
||||||
self.add_error("Can't migrate non-running VM")
|
self.add_error("Can't migrate non-running VM")
|
||||||
|
|
||||||
if vm.hostname == os.path.join(settings['etcd']['host_prefix'], self.destination.value):
|
if vm.hostname == os.path.join(
|
||||||
self.add_error("Destination host couldn't be same as Source Host")
|
settings["etcd"]["host_prefix"], self.destination.value
|
||||||
|
):
|
||||||
|
self.add_error(
|
||||||
|
"Destination host couldn't be same as Source Host"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class AddSSHSchema(OTPSchema):
|
class AddSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", KeyError)
|
||||||
|
)
|
||||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||||
|
|
||||||
fields = [self.key_name, self.key]
|
fields = [self.key_name, self.key]
|
||||||
|
@ -417,7 +511,9 @@ class AddSSHSchema(OTPSchema):
|
||||||
|
|
||||||
class RemoveSSHSchema(OTPSchema):
|
class RemoveSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", KeyError)
|
||||||
|
)
|
||||||
|
|
||||||
fields = [self.key_name]
|
fields = [self.key_name]
|
||||||
super().__init__(data=data, fields=fields)
|
super().__init__(data=data, fields=fields)
|
||||||
|
@ -425,7 +521,9 @@ class RemoveSSHSchema(OTPSchema):
|
||||||
|
|
||||||
class GetSSHSchema(OTPSchema):
|
class GetSSHSchema(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.key_name = Field("key_name", str, data.get("key_name", None))
|
self.key_name = Field(
|
||||||
|
"key_name", str, data.get("key_name", None)
|
||||||
|
)
|
||||||
|
|
||||||
fields = [self.key_name]
|
fields = [self.key_name]
|
||||||
super().__init__(data=data, fields=fields)
|
super().__init__(data=data, fields=fields)
|
||||||
|
@ -433,7 +531,9 @@ class GetSSHSchema(OTPSchema):
|
||||||
|
|
||||||
class CreateNetwork(OTPSchema):
|
class CreateNetwork(OTPSchema):
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
|
self.network_name = Field(
|
||||||
|
"network_name", str, data.get("network_name", KeyError)
|
||||||
|
)
|
||||||
self.type = Field("type", str, data.get("type", KeyError))
|
self.type = Field("type", str, data.get("type", KeyError))
|
||||||
self.user = Field("user", bool, bool(data.get("user", False)))
|
self.user = Field("user", bool, bool(data.get("user", False)))
|
||||||
|
|
||||||
|
@ -444,15 +544,26 @@ class CreateNetwork(OTPSchema):
|
||||||
super().__init__(data, fields=fields)
|
super().__init__(data, fields=fields)
|
||||||
|
|
||||||
def network_name_validation(self):
|
def network_name_validation(self):
|
||||||
network = shared.etcd_client.get(os.path.join(settings['etcd']['network_prefix'],
|
network = shared.etcd_client.get(
|
||||||
self.name.value,
|
os.path.join(
|
||||||
self.network_name.value),
|
settings["etcd"]["network_prefix"],
|
||||||
value_in_json=True)
|
self.name.value,
|
||||||
|
self.network_name.value,
|
||||||
|
),
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
if network:
|
if network:
|
||||||
self.add_error("Network with name {} already exists" \
|
self.add_error(
|
||||||
.format(self.network_name.value))
|
"Network with name {} already exists".format(
|
||||||
|
self.network_name.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def network_type_validation(self):
|
def network_type_validation(self):
|
||||||
supported_network_types = ["vxlan"]
|
supported_network_types = ["vxlan"]
|
||||||
if self.type.value not in supported_network_types:
|
if self.type.value not in supported_network_types:
|
||||||
self.add_error("Unsupported Network Type. Supported network types are {}".format(supported_network_types))
|
self.add_error(
|
||||||
|
"Unsupported Network Type. Supported network types are {}".format(
|
||||||
|
supported_network_types
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
|
@ -8,7 +8,7 @@ from functools import wraps
|
||||||
|
|
||||||
from . import logger
|
from . import logger
|
||||||
|
|
||||||
PseudoEtcdMeta = namedtuple('PseudoEtcdMeta', ['key'])
|
PseudoEtcdMeta = namedtuple("PseudoEtcdMeta", ["key"])
|
||||||
|
|
||||||
|
|
||||||
class EtcdEntry:
|
class EtcdEntry:
|
||||||
|
@ -16,8 +16,8 @@ class EtcdEntry:
|
||||||
# value: str
|
# value: str
|
||||||
|
|
||||||
def __init__(self, meta, value, value_in_json=False):
|
def __init__(self, meta, value, value_in_json=False):
|
||||||
self.key = meta.key.decode('utf-8')
|
self.key = meta.key.decode("utf-8")
|
||||||
self.value = value.decode('utf-8')
|
self.value = value.decode("utf-8")
|
||||||
|
|
||||||
if value_in_json:
|
if value_in_json:
|
||||||
self.value = json.loads(self.value)
|
self.value = json.loads(self.value)
|
||||||
|
@ -29,11 +29,18 @@ def readable_errors(func):
|
||||||
try:
|
try:
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
except etcd3.exceptions.ConnectionFailedError as err:
|
except etcd3.exceptions.ConnectionFailedError as err:
|
||||||
raise etcd3.exceptions.ConnectionFailedError('etcd connection failed.') from err
|
raise etcd3.exceptions.ConnectionFailedError(
|
||||||
|
"etcd connection failed."
|
||||||
|
) from err
|
||||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||||
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
raise etcd3.exceptions.ConnectionTimeoutError(
|
||||||
|
"etcd connection timeout."
|
||||||
|
) from err
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('Some etcd error occured. See syslog for details.')
|
logger.exception(
|
||||||
|
"Some etcd error occured. See syslog for details."
|
||||||
|
)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@ -56,7 +63,7 @@ class Etcd3Wrapper:
|
||||||
_value = json.dumps(_value)
|
_value = json.dumps(_value)
|
||||||
|
|
||||||
if not isinstance(_key, str):
|
if not isinstance(_key, str):
|
||||||
_key = _key.decode('utf-8')
|
_key = _key.decode("utf-8")
|
||||||
|
|
||||||
return self.client.put(_key, _value, **kwargs)
|
return self.client.put(_key, _value, **kwargs)
|
||||||
|
|
||||||
|
@ -70,18 +77,25 @@ class Etcd3Wrapper:
|
||||||
|
|
||||||
@readable_errors
|
@readable_errors
|
||||||
def watch_prefix(self, key, timeout=0, value_in_json=False):
|
def watch_prefix(self, key, timeout=0, value_in_json=False):
|
||||||
timeout_event = EtcdEntry(PseudoEtcdMeta(key=b'TIMEOUT'),
|
timeout_event = EtcdEntry(
|
||||||
value=str.encode(json.dumps({'status': 'TIMEOUT',
|
PseudoEtcdMeta(key=b"TIMEOUT"),
|
||||||
'type': 'TIMEOUT'})),
|
value=str.encode(
|
||||||
value_in_json=value_in_json)
|
json.dumps({"status": "TIMEOUT", "type": "TIMEOUT"})
|
||||||
|
),
|
||||||
|
value_in_json=value_in_json,
|
||||||
|
)
|
||||||
|
|
||||||
event_queue = queue.Queue()
|
event_queue = queue.Queue()
|
||||||
|
|
||||||
def add_event_to_queue(event):
|
def add_event_to_queue(event):
|
||||||
if hasattr(event, 'events'):
|
if hasattr(event, "events"):
|
||||||
for e in event.events:
|
for e in event.events:
|
||||||
if e.value:
|
if e.value:
|
||||||
event_queue.put(EtcdEntry(e, e.value, value_in_json=value_in_json))
|
event_queue.put(
|
||||||
|
EtcdEntry(
|
||||||
|
e, e.value, value_in_json=value_in_json
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
self.client.add_watch_prefix_callback(key, add_event_to_queue)
|
self.client.add_watch_prefix_callback(key, add_event_to_queue)
|
||||||
|
|
||||||
|
@ -96,4 +110,8 @@ class Etcd3Wrapper:
|
||||||
|
|
||||||
class PsuedoEtcdEntry(EtcdEntry):
|
class PsuedoEtcdEntry(EtcdEntry):
|
||||||
def __init__(self, key, value, value_in_json=False):
|
def __init__(self, key, value, value_in_json=False):
|
||||||
super().__init__(PseudoEtcdMeta(key=key.encode('utf-8')), value, value_in_json=value_in_json)
|
super().__init__(
|
||||||
|
PseudoEtcdMeta(key=key.encode("utf-8")),
|
||||||
|
value,
|
||||||
|
value_in_json=value_in_json,
|
||||||
|
)
|
||||||
|
|
|
@ -29,7 +29,9 @@ class HostEntry(SpecificEtcdEntryBase):
|
||||||
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
|
||||||
def is_alive(self):
|
def is_alive(self):
|
||||||
last_heartbeat = datetime.strptime(self.last_heartbeat, "%Y-%m-%d %H:%M:%S")
|
last_heartbeat = datetime.strptime(
|
||||||
|
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
delta = datetime.now() - last_heartbeat
|
delta = datetime.now() - last_heartbeat
|
||||||
if delta.total_seconds() > 60:
|
if delta.total_seconds() > 60:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -7,21 +7,21 @@ class NoTracebackStreamHandler(logging.StreamHandler):
|
||||||
info, cache = record.exc_info, record.exc_text
|
info, cache = record.exc_info, record.exc_text
|
||||||
record.exc_info, record.exc_text = None, None
|
record.exc_info, record.exc_text = None, None
|
||||||
|
|
||||||
if record.levelname in ['WARNING', 'WARN']:
|
if record.levelname in ["WARNING", "WARN"]:
|
||||||
color = colorama.Fore.LIGHTYELLOW_EX
|
color = colorama.Fore.LIGHTYELLOW_EX
|
||||||
elif record.levelname == 'ERROR':
|
elif record.levelname == "ERROR":
|
||||||
color = colorama.Fore.LIGHTRED_EX
|
color = colorama.Fore.LIGHTRED_EX
|
||||||
elif record.levelname == 'INFO':
|
elif record.levelname == "INFO":
|
||||||
color = colorama.Fore.LIGHTGREEN_EX
|
color = colorama.Fore.LIGHTGREEN_EX
|
||||||
elif record.levelname == 'CRITICAL':
|
elif record.levelname == "CRITICAL":
|
||||||
color = colorama.Fore.LIGHTCYAN_EX
|
color = colorama.Fore.LIGHTCYAN_EX
|
||||||
else:
|
else:
|
||||||
color = colorama.Fore.WHITE
|
color = colorama.Fore.WHITE
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print(color, end='', flush=True)
|
print(color, end="", flush=True)
|
||||||
super().handle(record)
|
super().handle(record)
|
||||||
finally:
|
finally:
|
||||||
record.exc_info = info
|
record.exc_info = info
|
||||||
record.exc_text = cache
|
record.exc_text = cache
|
||||||
print(colorama.Style.RESET_ALL, end='', flush=True)
|
print(colorama.Style.RESET_ALL, end="", flush=True)
|
||||||
|
|
|
@ -11,7 +11,9 @@ def random_bytes(num=6):
|
||||||
return [random.randrange(256) for _ in range(num)]
|
return [random.randrange(256) for _ in range(num)]
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
|
def generate_mac(
|
||||||
|
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||||
|
):
|
||||||
mac = random_bytes()
|
mac = random_bytes()
|
||||||
if oui:
|
if oui:
|
||||||
if type(oui) == str:
|
if type(oui) == str:
|
||||||
|
@ -30,35 +32,51 @@ def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='
|
||||||
|
|
||||||
|
|
||||||
def create_dev(script, _id, dev, ip=None):
|
def create_dev(script, _id, dev, ip=None):
|
||||||
command = ['sudo', '-p', 'Enter password to create network devices for vm: ',
|
command = [
|
||||||
script, str(_id), dev]
|
"sudo",
|
||||||
|
"-p",
|
||||||
|
"Enter password to create network devices for vm: ",
|
||||||
|
script,
|
||||||
|
str(_id),
|
||||||
|
dev,
|
||||||
|
]
|
||||||
if ip:
|
if ip:
|
||||||
command.append(ip)
|
command.append(ip)
|
||||||
try:
|
try:
|
||||||
output = sp.check_output(command, stderr=sp.PIPE)
|
output = sp.check_output(command, stderr=sp.PIPE)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('Creation of interface %s failed.', dev)
|
logger.exception("Creation of interface %s failed.", dev)
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
return output.decode('utf-8').strip()
|
return output.decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
def delete_network_interface(iface):
|
def delete_network_interface(iface):
|
||||||
try:
|
try:
|
||||||
sp.check_output(
|
sp.check_output(
|
||||||
[
|
[
|
||||||
'sudo', '-p', 'Enter password to remove {} network device: '.format(iface),
|
"sudo",
|
||||||
'ip', 'link', 'del', iface
|
"-p",
|
||||||
], stderr=sp.PIPE
|
"Enter password to remove {} network device: ".format(
|
||||||
|
iface
|
||||||
|
),
|
||||||
|
"ip",
|
||||||
|
"link",
|
||||||
|
"del",
|
||||||
|
iface,
|
||||||
|
],
|
||||||
|
stderr=sp.PIPE,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('Interface %s Deletion failed', iface)
|
logger.exception("Interface %s Deletion failed", iface)
|
||||||
|
|
||||||
|
|
||||||
def find_free_port():
|
def find_free_port():
|
||||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
with closing(
|
||||||
|
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
) as s:
|
||||||
try:
|
try:
|
||||||
s.bind(('', 0))
|
s.bind(("", 0))
|
||||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
except Exception:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -17,7 +17,6 @@ class RequestType:
|
||||||
|
|
||||||
|
|
||||||
class RequestEntry(SpecificEtcdEntryBase):
|
class RequestEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def __init__(self, e):
|
def __init__(self, e):
|
||||||
self.destination_sock_path = None
|
self.destination_sock_path = None
|
||||||
self.destination_host_key = None
|
self.destination_host_key = None
|
||||||
|
@ -30,8 +29,11 @@ class RequestEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_scratch(cls, request_prefix, **kwargs):
|
def from_scratch(cls, request_prefix, **kwargs):
|
||||||
e = PsuedoEtcdEntry(join(request_prefix, uuid4().hex),
|
e = PsuedoEtcdEntry(
|
||||||
value=json.dumps(kwargs).encode("utf-8"), value_in_json=True)
|
join(request_prefix, uuid4().hex),
|
||||||
|
value=json.dumps(kwargs).encode("utf-8"),
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
return cls(e)
|
return cls(e)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ class StorageUnit(fields.Field):
|
||||||
class SpecsSchema(Schema):
|
class SpecsSchema(Schema):
|
||||||
cpu = fields.Int()
|
cpu = fields.Int()
|
||||||
ram = StorageUnit()
|
ram = StorageUnit()
|
||||||
os_ssd = StorageUnit(data_key='os-ssd', attribute='os-ssd')
|
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
|
||||||
hdd = fields.List(StorageUnit())
|
hdd = fields.List(StorageUnit())
|
||||||
|
|
||||||
|
|
||||||
|
@ -29,11 +29,13 @@ class VMSchema(Schema):
|
||||||
image_uuid = fields.Str()
|
image_uuid = fields.Str()
|
||||||
hostname = fields.Str()
|
hostname = fields.Str()
|
||||||
metadata = fields.Dict()
|
metadata = fields.Dict()
|
||||||
network = fields.List(fields.Tuple((fields.Str(), fields.Str(), fields.Int())))
|
network = fields.List(
|
||||||
|
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
|
||||||
|
)
|
||||||
in_migration = fields.Bool()
|
in_migration = fields.Bool()
|
||||||
|
|
||||||
|
|
||||||
class NetworkSchema(Schema):
|
class NetworkSchema(Schema):
|
||||||
_id = fields.Int(data_key='id', attribute='id')
|
_id = fields.Int(data_key="id", attribute="id")
|
||||||
_type = fields.Str(data_key='type', attribute='type')
|
_type = fields.Str(data_key="type", attribute="type")
|
||||||
ipv6 = fields.Str()
|
ipv6 = fields.Str()
|
||||||
|
|
|
@ -11,7 +11,7 @@ from ucloud.settings import settings as config
|
||||||
|
|
||||||
|
|
||||||
class ImageStorageHandler(ABC):
|
class ImageStorageHandler(ABC):
|
||||||
handler_name = 'base'
|
handler_name = "base"
|
||||||
|
|
||||||
def __init__(self, image_base, vm_base):
|
def __init__(self, image_base, vm_base):
|
||||||
self.image_base = image_base
|
self.image_base = image_base
|
||||||
|
@ -55,9 +55,9 @@ class ImageStorageHandler(ABC):
|
||||||
try:
|
try:
|
||||||
sp.check_output(command, stderr=sp.PIPE)
|
sp.check_output(command, stderr=sp.PIPE)
|
||||||
except sp.CalledProcessError as e:
|
except sp.CalledProcessError as e:
|
||||||
_stderr = e.stderr.decode('utf-8').strip()
|
_stderr = e.stderr.decode("utf-8").strip()
|
||||||
if report:
|
if report:
|
||||||
logger.exception('%s:- %s', error_origin, _stderr)
|
logger.exception("%s:- %s", error_origin, _stderr)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -72,14 +72,16 @@ class ImageStorageHandler(ABC):
|
||||||
|
|
||||||
|
|
||||||
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
handler_name = 'Filesystem'
|
handler_name = "Filesystem"
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
def import_image(self, src, dest, protect=False):
|
||||||
dest = join_path(self.image_base, dest)
|
dest = join_path(self.image_base, dest)
|
||||||
try:
|
try:
|
||||||
shutil.copy(src, dest)
|
shutil.copy(src, dest)
|
||||||
if protect:
|
if protect:
|
||||||
os.chmod(dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
|
os.chmod(
|
||||||
|
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return False
|
return False
|
||||||
|
@ -97,7 +99,14 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
|
||||||
def resize_vm_image(self, path, size):
|
def resize_vm_image(self, path, size):
|
||||||
path = join_path(self.vm_base, path)
|
path = join_path(self.vm_base, path)
|
||||||
command = ["qemu-img", "resize", "-f", "raw", path, "{}M".format(size)]
|
command = [
|
||||||
|
"qemu-img",
|
||||||
|
"resize",
|
||||||
|
"-f",
|
||||||
|
"raw",
|
||||||
|
path,
|
||||||
|
"{}M".format(size),
|
||||||
|
]
|
||||||
if self.execute_command(command):
|
if self.execute_command(command):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
|
@ -126,15 +135,25 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
|
||||||
|
|
||||||
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||||
handler_name = 'Ceph'
|
handler_name = "Ceph"
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
def import_image(self, src, dest, protect=False):
|
||||||
dest = join_path(self.image_base, dest)
|
dest = join_path(self.image_base, dest)
|
||||||
import_command = ["rbd", "import", src, dest]
|
import_command = ["rbd", "import", src, dest]
|
||||||
commands = [import_command]
|
commands = [import_command]
|
||||||
if protect:
|
if protect:
|
||||||
snap_create_command = ["rbd", "snap", "create", "{}@protected".format(dest)]
|
snap_create_command = [
|
||||||
snap_protect_command = ["rbd", "snap", "protect", "{}@protected".format(dest)]
|
"rbd",
|
||||||
|
"snap",
|
||||||
|
"create",
|
||||||
|
"{}@protected".format(dest),
|
||||||
|
]
|
||||||
|
snap_protect_command = [
|
||||||
|
"rbd",
|
||||||
|
"snap",
|
||||||
|
"protect",
|
||||||
|
"{}@protected".format(dest),
|
||||||
|
]
|
||||||
commands.append(snap_create_command)
|
commands.append(snap_create_command)
|
||||||
commands.append(snap_protect_command)
|
commands.append(snap_protect_command)
|
||||||
|
|
||||||
|
@ -174,16 +193,16 @@ class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||||
|
|
||||||
|
|
||||||
def get_storage_handler():
|
def get_storage_handler():
|
||||||
__storage_backend = config['storage']['storage_backend']
|
__storage_backend = config["storage"]["storage_backend"]
|
||||||
if __storage_backend == 'filesystem':
|
if __storage_backend == "filesystem":
|
||||||
return FileSystemBasedImageStorageHandler(
|
return FileSystemBasedImageStorageHandler(
|
||||||
vm_base=config['storage']['vm_dir'],
|
vm_base=config["storage"]["vm_dir"],
|
||||||
image_base=config['storage']['image_dir']
|
image_base=config["storage"]["image_dir"],
|
||||||
)
|
)
|
||||||
elif __storage_backend == 'ceph':
|
elif __storage_backend == "ceph":
|
||||||
return CEPHBasedImageStorageHandler(
|
return CEPHBasedImageStorageHandler(
|
||||||
vm_base=config['storage']['ceph_vm_pool'],
|
vm_base=config["storage"]["ceph_vm_pool"],
|
||||||
image_base=config['storage']['ceph_image_pool']
|
image_base=config["storage"]["ceph_image_pool"],
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise Exception('Unknown Image Storage Handler')
|
raise Exception("Unknown Image Storage Handler")
|
||||||
|
|
|
@ -13,13 +13,12 @@ class VMStatus:
|
||||||
|
|
||||||
|
|
||||||
def declare_stopped(vm):
|
def declare_stopped(vm):
|
||||||
vm['hostname'] = ''
|
vm["hostname"] = ""
|
||||||
vm['in_migration'] = False
|
vm["in_migration"] = False
|
||||||
vm['status'] = VMStatus.stopped
|
vm["status"] = VMStatus.stopped
|
||||||
|
|
||||||
|
|
||||||
class VMEntry(SpecificEtcdEntryBase):
|
class VMEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def __init__(self, e):
|
def __init__(self, e):
|
||||||
self.owner = None # type: str
|
self.owner = None # type: str
|
||||||
self.specs = None # type: dict
|
self.specs = None # type: dict
|
||||||
|
@ -48,7 +47,9 @@ class VMEntry(SpecificEtcdEntryBase):
|
||||||
|
|
||||||
def add_log(self, msg):
|
def add_log(self, msg):
|
||||||
self.log = self.log[:5]
|
self.log = self.log[:5]
|
||||||
self.log.append("{} - {}".format(datetime.now().isoformat(), msg))
|
self.log.append(
|
||||||
|
"{} - {}".format(datetime.now().isoformat(), msg)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VmPool:
|
class VmPool:
|
||||||
|
|
|
@ -5,31 +5,41 @@ from ucloud.shared import shared
|
||||||
|
|
||||||
|
|
||||||
def update_config(section, kwargs):
|
def update_config(section, kwargs):
|
||||||
uncloud_config = shared.etcd_client.get(settings.config_key, value_in_json=True)
|
uncloud_config = shared.etcd_client.get(
|
||||||
|
settings.config_key, value_in_json=True
|
||||||
|
)
|
||||||
if not uncloud_config:
|
if not uncloud_config:
|
||||||
uncloud_config = {}
|
uncloud_config = {}
|
||||||
else:
|
else:
|
||||||
uncloud_config = uncloud_config.value
|
uncloud_config = uncloud_config.value
|
||||||
|
|
||||||
uncloud_config[section] = kwargs
|
uncloud_config[section] = kwargs
|
||||||
shared.etcd_client.put(settings.config_key, uncloud_config, value_in_json=True)
|
shared.etcd_client.put(
|
||||||
|
settings.config_key, uncloud_config, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def configure_parser(parser):
|
def configure_parser(parser):
|
||||||
configure_subparsers = parser.add_subparsers(dest="subcommand")
|
configure_subparsers = parser.add_subparsers(dest="subcommand")
|
||||||
|
|
||||||
otp_parser = configure_subparsers.add_parser("otp")
|
otp_parser = configure_subparsers.add_parser("otp")
|
||||||
otp_parser.add_argument("--verification-controller-url",
|
otp_parser.add_argument(
|
||||||
required=True, metavar="URL")
|
"--verification-controller-url", required=True, metavar="URL"
|
||||||
otp_parser.add_argument("--auth-name", required=True,
|
)
|
||||||
metavar="OTP-NAME")
|
otp_parser.add_argument(
|
||||||
otp_parser.add_argument("--auth-realm", required=True,
|
"--auth-name", required=True, metavar="OTP-NAME"
|
||||||
metavar="OTP-REALM")
|
)
|
||||||
otp_parser.add_argument("--auth-seed", required=True,
|
otp_parser.add_argument(
|
||||||
metavar="OTP-SEED")
|
"--auth-realm", required=True, metavar="OTP-REALM"
|
||||||
|
)
|
||||||
|
otp_parser.add_argument(
|
||||||
|
"--auth-seed", required=True, metavar="OTP-SEED"
|
||||||
|
)
|
||||||
|
|
||||||
network_parser = configure_subparsers.add_parser("network")
|
network_parser = configure_subparsers.add_parser("network")
|
||||||
network_parser.add_argument("--prefix-length", required=True, type=int)
|
network_parser.add_argument(
|
||||||
|
"--prefix-length", required=True, type=int
|
||||||
|
)
|
||||||
network_parser.add_argument("--prefix", required=True)
|
network_parser.add_argument("--prefix", required=True)
|
||||||
network_parser.add_argument("--vxlan-phy-dev", required=True)
|
network_parser.add_argument("--vxlan-phy-dev", required=True)
|
||||||
|
|
||||||
|
@ -38,25 +48,31 @@ def configure_parser(parser):
|
||||||
netbox_parser.add_argument("--token", required=True)
|
netbox_parser.add_argument("--token", required=True)
|
||||||
|
|
||||||
ssh_parser = configure_subparsers.add_parser("ssh")
|
ssh_parser = configure_subparsers.add_parser("ssh")
|
||||||
ssh_parser.add_argument('--username', default="root")
|
ssh_parser.add_argument("--username", default="root")
|
||||||
ssh_parser.add_argument('--private-key-path',
|
ssh_parser.add_argument(
|
||||||
default=os.path.expanduser("~/.ssh/id_rsa"))
|
"--private-key-path",
|
||||||
|
default=os.path.expanduser("~/.ssh/id_rsa"),
|
||||||
|
)
|
||||||
|
|
||||||
storage_parser = configure_subparsers.add_parser("storage")
|
storage_parser = configure_subparsers.add_parser("storage")
|
||||||
storage_parser.add_argument('--file-dir', required=True)
|
storage_parser.add_argument("--file-dir", required=True)
|
||||||
storage_parser_subparsers = storage_parser.add_subparsers(dest="storage_backend")
|
storage_parser_subparsers = storage_parser.add_subparsers(
|
||||||
|
dest="storage_backend"
|
||||||
filesystem_storage_parser = storage_parser_subparsers.add_parser("filesystem")
|
)
|
||||||
filesystem_storage_parser.add_argument('--vm-dir', required=True)
|
|
||||||
filesystem_storage_parser.add_argument('--image-dir', required=True)
|
filesystem_storage_parser = storage_parser_subparsers.add_parser(
|
||||||
|
"filesystem"
|
||||||
|
)
|
||||||
|
filesystem_storage_parser.add_argument("--vm-dir", required=True)
|
||||||
|
filesystem_storage_parser.add_argument("--image-dir", required=True)
|
||||||
|
|
||||||
ceph_storage_parser = storage_parser_subparsers.add_parser("ceph")
|
ceph_storage_parser = storage_parser_subparsers.add_parser("ceph")
|
||||||
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
|
ceph_storage_parser.add_argument("--ceph-vm-pool", required=True)
|
||||||
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
|
ceph_storage_parser.add_argument("--ceph-image-pool", required=True)
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
def main(**kwargs):
|
||||||
subcommand = kwargs.pop('subcommand')
|
subcommand = kwargs.pop("subcommand")
|
||||||
if not subcommand:
|
if not subcommand:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -17,9 +17,9 @@
|
||||||
|
|
||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = 'ucloud'
|
project = "ucloud"
|
||||||
copyright = '2019, ungleich'
|
copyright = "2019, ungleich"
|
||||||
author = 'ungleich'
|
author = "ungleich"
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
|
@ -27,12 +27,12 @@ author = 'ungleich'
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
extensions = [
|
extensions = [
|
||||||
'sphinx.ext.autodoc',
|
"sphinx.ext.autodoc",
|
||||||
'sphinx_rtd_theme',
|
"sphinx_rtd_theme",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ["_templates"]
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
# List of patterns, relative to source directory, that match files and
|
||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
|
@ -50,4 +50,4 @@ html_theme = "sphinx_rtd_theme"
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
html_static_path = ['_static']
|
html_static_path = ["_static"]
|
||||||
|
|
|
@ -21,7 +21,8 @@ def sha512sum(file: str):
|
||||||
ELSE:
|
ELSE:
|
||||||
return None
|
return None
|
||||||
"""
|
"""
|
||||||
if not isinstance(file, str): raise TypeError
|
if not isinstance(file, str):
|
||||||
|
raise TypeError
|
||||||
try:
|
try:
|
||||||
output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
|
output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
|
||||||
except sp.CalledProcessError as e:
|
except sp.CalledProcessError as e:
|
||||||
|
@ -49,23 +50,25 @@ def track_file(file, base_dir):
|
||||||
file_path = pathlib.Path(file).parts[-1]
|
file_path = pathlib.Path(file).parts[-1]
|
||||||
|
|
||||||
# Create Entry
|
# Create Entry
|
||||||
entry_key = os.path.join(settings['etcd']['file_prefix'], str(file_id))
|
entry_key = os.path.join(
|
||||||
|
settings["etcd"]["file_prefix"], str(file_id)
|
||||||
|
)
|
||||||
entry_value = {
|
entry_value = {
|
||||||
"filename": file_path,
|
"filename": file_path,
|
||||||
"owner": owner,
|
"owner": owner,
|
||||||
"sha512sum": sha512sum(file),
|
"sha512sum": sha512sum(file),
|
||||||
"creation_date": creation_date,
|
"creation_date": creation_date,
|
||||||
"size": os.path.getsize(file)
|
"size": os.path.getsize(file),
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("Tracking %s", file)
|
logger.info("Tracking %s", file)
|
||||||
|
|
||||||
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
||||||
os.setxattr(file, 'user.utracked', b'True')
|
os.setxattr(file, "user.utracked", b"True")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
base_dir = settings['storage']['file_dir']
|
base_dir = settings["storage"]["file_dir"]
|
||||||
|
|
||||||
# Recursively Get All Files and Folder below BASE_DIR
|
# Recursively Get All Files and Folder below BASE_DIR
|
||||||
files = glob.glob("{}/**".format(base_dir), recursive=True)
|
files = glob.glob("{}/**".format(base_dir), recursive=True)
|
||||||
|
@ -76,7 +79,7 @@ def main():
|
||||||
untracked_files = []
|
untracked_files = []
|
||||||
for file in files:
|
for file in files:
|
||||||
try:
|
try:
|
||||||
os.getxattr(file, 'user.utracked')
|
os.getxattr(file, "user.utracked")
|
||||||
except OSError:
|
except OSError:
|
||||||
track_file(file, base_dir)
|
track_file(file, base_dir)
|
||||||
untracked_files.append(file)
|
untracked_files.append(file)
|
||||||
|
|
|
@ -15,49 +15,79 @@ from . import virtualmachine, logger
|
||||||
def update_heartbeat(hostname):
|
def update_heartbeat(hostname):
|
||||||
"""Update Last HeartBeat Time for :param hostname: in etcd"""
|
"""Update Last HeartBeat Time for :param hostname: in etcd"""
|
||||||
host_pool = shared.host_pool
|
host_pool = shared.host_pool
|
||||||
this_host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
this_host = next(
|
||||||
|
filter(lambda h: h.hostname == hostname, host_pool.hosts), None
|
||||||
|
)
|
||||||
while True:
|
while True:
|
||||||
this_host.update_heartbeat()
|
this_host.update_heartbeat()
|
||||||
host_pool.put(this_host)
|
host_pool.put(this_host)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
|
|
||||||
def maintenance():
|
def maintenance(host):
|
||||||
vmm = VMM()
|
vmm = VMM()
|
||||||
running_vms = vmm.discover()
|
running_vms = vmm.discover()
|
||||||
for vm_uuid in running_vms:
|
for vm_uuid in running_vms:
|
||||||
if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == 'running':
|
if (
|
||||||
vm = shared.vm_pool.get(join_path(settings['etcd']['vm_prefix'], vm_uuid))
|
vmm.is_running(vm_uuid)
|
||||||
|
and vmm.get_status(vm_uuid) == "running"
|
||||||
|
):
|
||||||
|
vm = shared.vm_pool.get(
|
||||||
|
join_path(settings["etcd"]["vm_prefix"], vm_uuid)
|
||||||
|
)
|
||||||
vm.status = VMStatus.running
|
vm.status = VMStatus.running
|
||||||
|
vm.vnc_socket = vmm.get_vnc(vm_uuid)
|
||||||
|
vm.hostname = host
|
||||||
shared.vm_pool.put(vm)
|
shared.vm_pool.put(vm)
|
||||||
|
|
||||||
|
|
||||||
def main(hostname):
|
def main(hostname):
|
||||||
host_pool = shared.host_pool
|
host_pool = shared.host_pool
|
||||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
host = next(
|
||||||
assert host is not None, "No such host with name = {}".format(hostname)
|
filter(lambda h: h.hostname == hostname, host_pool.hosts), None
|
||||||
|
)
|
||||||
|
assert host is not None, "No such host with name = {}".format(
|
||||||
|
hostname
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
|
heartbeat_updating_process = mp.Process(
|
||||||
|
target=update_heartbeat, args=(hostname,)
|
||||||
|
)
|
||||||
heartbeat_updating_process.start()
|
heartbeat_updating_process.start()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception('ucloud-host heartbeat updating mechanism is not working') from e
|
raise Exception(
|
||||||
|
"ucloud-host heartbeat updating mechanism is not working"
|
||||||
|
) from e
|
||||||
|
|
||||||
for events_iterator in [
|
for events_iterator in [
|
||||||
shared.etcd_client.get_prefix(settings['etcd']['request_prefix'], value_in_json=True),
|
shared.etcd_client.get_prefix(
|
||||||
shared.etcd_client.watch_prefix(settings['etcd']['request_prefix'], timeout=10, value_in_json=True),
|
settings["etcd"]["request_prefix"], value_in_json=True
|
||||||
|
),
|
||||||
|
shared.etcd_client.watch_prefix(
|
||||||
|
settings["etcd"]["request_prefix"],
|
||||||
|
timeout=10,
|
||||||
|
value_in_json=True,
|
||||||
|
),
|
||||||
]:
|
]:
|
||||||
for request_event in events_iterator:
|
for request_event in events_iterator:
|
||||||
request_event = RequestEntry(request_event)
|
request_event = RequestEntry(request_event)
|
||||||
|
|
||||||
if request_event.type == "TIMEOUT":
|
if request_event.type == "TIMEOUT":
|
||||||
maintenance()
|
maintenance(host.key)
|
||||||
|
|
||||||
if request_event.hostname == host.key:
|
if request_event.hostname == host.key:
|
||||||
logger.debug("VM Request: %s", request_event)
|
logger.debug("VM Request: %s", request_event)
|
||||||
|
|
||||||
shared.request_pool.client.client.delete(request_event.key)
|
shared.request_pool.client.client.delete(
|
||||||
vm_entry = shared.etcd_client.get(join_path(settings['etcd']['vm_prefix'], request_event.uuid))
|
request_event.key
|
||||||
|
)
|
||||||
|
vm_entry = shared.etcd_client.get(
|
||||||
|
join_path(
|
||||||
|
settings["etcd"]["vm_prefix"],
|
||||||
|
request_event.uuid,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if vm_entry:
|
if vm_entry:
|
||||||
vm = virtualmachine.VM(vm_entry)
|
vm = virtualmachine.VM(vm_entry)
|
||||||
|
@ -70,23 +100,35 @@ def main(hostname):
|
||||||
elif request_event.type == RequestType.DeleteVM:
|
elif request_event.type == RequestType.DeleteVM:
|
||||||
vm.delete()
|
vm.delete()
|
||||||
|
|
||||||
elif request_event.type == RequestType.InitVMMigration:
|
elif (
|
||||||
|
request_event.type
|
||||||
|
== RequestType.InitVMMigration
|
||||||
|
):
|
||||||
vm.start(destination_host_key=host.key)
|
vm.start(destination_host_key=host.key)
|
||||||
|
|
||||||
elif request_event.type == RequestType.TransferVM:
|
elif request_event.type == RequestType.TransferVM:
|
||||||
host = host_pool.get(request_event.destination_host_key)
|
host = host_pool.get(
|
||||||
|
request_event.destination_host_key
|
||||||
|
)
|
||||||
if host:
|
if host:
|
||||||
vm.migrate(destination_host=host.hostname,
|
vm.migrate(
|
||||||
destination_sock_path=request_event.destination_sock_path)
|
destination_host=host.hostname,
|
||||||
|
destination_sock_path=request_event.destination_sock_path,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.error('Host %s not found!', request_event.destination_host_key)
|
logger.error(
|
||||||
|
"Host %s not found!",
|
||||||
|
request_event.destination_host_key,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.info("VM Entry missing")
|
logger.info("VM Entry missing")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
argparser = argparse.ArgumentParser()
|
argparser = argparse.ArgumentParser()
|
||||||
argparser.add_argument("hostname", help="Name of this host. e.g uncloud1.ungleich.ch")
|
argparser.add_argument(
|
||||||
|
"hostname", help="Name of this host. e.g uncloud1.ungleich.ch"
|
||||||
|
)
|
||||||
args = argparser.parse_args()
|
args = argparser.parse_args()
|
||||||
mp.set_start_method('spawn')
|
mp.set_start_method("spawn")
|
||||||
main(args.hostname)
|
main(args.hostname)
|
||||||
|
|
|
@ -26,10 +26,7 @@ LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Mapping host architecture to any additional architectures it can
|
# Mapping host architecture to any additional architectures it can
|
||||||
# support which often includes its 32 bit cousin.
|
# support which often includes its 32 bit cousin.
|
||||||
ADDITIONAL_ARCHES = {
|
ADDITIONAL_ARCHES = {"x86_64": "i386", "aarch64": "armhf"}
|
||||||
"x86_64": "i386",
|
|
||||||
"aarch64": "armhf"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def kvm_available(target_arch=None):
|
def kvm_available(target_arch=None):
|
||||||
|
@ -81,10 +78,17 @@ class QEMUMachine(object):
|
||||||
# vm is guaranteed to be shut down here
|
# vm is guaranteed to be shut down here
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, binary, args=None, wrapper=None, name=None,
|
def __init__(
|
||||||
test_dir="/var/tmp", monitor_address=None,
|
self,
|
||||||
socket_scm_helper=None):
|
binary,
|
||||||
'''
|
args=None,
|
||||||
|
wrapper=None,
|
||||||
|
name=None,
|
||||||
|
test_dir="/var/tmp",
|
||||||
|
monitor_address=None,
|
||||||
|
socket_scm_helper=None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
Initialize a QEMUMachine
|
Initialize a QEMUMachine
|
||||||
|
|
||||||
@param binary: path to the qemu binary
|
@param binary: path to the qemu binary
|
||||||
|
@ -95,7 +99,7 @@ class QEMUMachine(object):
|
||||||
@param monitor_address: address for QMP monitor
|
@param monitor_address: address for QMP monitor
|
||||||
@param socket_scm_helper: helper program, required for send_fd_scm()
|
@param socket_scm_helper: helper program, required for send_fd_scm()
|
||||||
@note: Qemu process is not started until launch() is used.
|
@note: Qemu process is not started until launch() is used.
|
||||||
'''
|
"""
|
||||||
if args is None:
|
if args is None:
|
||||||
args = []
|
args = []
|
||||||
if wrapper is None:
|
if wrapper is None:
|
||||||
|
@ -109,7 +113,9 @@ class QEMUMachine(object):
|
||||||
self._qemu_log_file = None
|
self._qemu_log_file = None
|
||||||
self._popen = None
|
self._popen = None
|
||||||
self._binary = binary
|
self._binary = binary
|
||||||
self._args = list(args) # Force copy args in case we modify them
|
self._args = list(
|
||||||
|
args
|
||||||
|
) # Force copy args in case we modify them
|
||||||
self._wrapper = wrapper
|
self._wrapper = wrapper
|
||||||
self._events = []
|
self._events = []
|
||||||
self._iolog = None
|
self._iolog = None
|
||||||
|
@ -137,26 +143,24 @@ class QEMUMachine(object):
|
||||||
|
|
||||||
# This can be used to add an unused monitor instance.
|
# This can be used to add an unused monitor instance.
|
||||||
def add_monitor_null(self):
|
def add_monitor_null(self):
|
||||||
self._args.append('-monitor')
|
self._args.append("-monitor")
|
||||||
self._args.append('null')
|
self._args.append("null")
|
||||||
|
|
||||||
def add_fd(self, fd, fdset, opaque, opts=''):
|
def add_fd(self, fd, fdset, opaque, opts=""):
|
||||||
"""
|
"""
|
||||||
Pass a file descriptor to the VM
|
Pass a file descriptor to the VM
|
||||||
"""
|
"""
|
||||||
options = ['fd=%d' % fd,
|
options = ["fd=%d" % fd, "set=%d" % fdset, "opaque=%s" % opaque]
|
||||||
'set=%d' % fdset,
|
|
||||||
'opaque=%s' % opaque]
|
|
||||||
if opts:
|
if opts:
|
||||||
options.append(opts)
|
options.append(opts)
|
||||||
|
|
||||||
# This did not exist before 3.4, but since then it is
|
# This did not exist before 3.4, but since then it is
|
||||||
# mandatory for our purpose
|
# mandatory for our purpose
|
||||||
if hasattr(os, 'set_inheritable'):
|
if hasattr(os, "set_inheritable"):
|
||||||
os.set_inheritable(fd, True)
|
os.set_inheritable(fd, True)
|
||||||
|
|
||||||
self._args.append('-add-fd')
|
self._args.append("-add-fd")
|
||||||
self._args.append(','.join(options))
|
self._args.append(",".join(options))
|
||||||
return self
|
return self
|
||||||
|
|
||||||
# Exactly one of fd and file_path must be given.
|
# Exactly one of fd and file_path must be given.
|
||||||
|
@ -168,18 +172,21 @@ class QEMUMachine(object):
|
||||||
if self._socket_scm_helper is None:
|
if self._socket_scm_helper is None:
|
||||||
raise QEMUMachineError("No path to socket_scm_helper set")
|
raise QEMUMachineError("No path to socket_scm_helper set")
|
||||||
if not os.path.exists(self._socket_scm_helper):
|
if not os.path.exists(self._socket_scm_helper):
|
||||||
raise QEMUMachineError("%s does not exist" %
|
raise QEMUMachineError(
|
||||||
self._socket_scm_helper)
|
"%s does not exist" % self._socket_scm_helper
|
||||||
|
)
|
||||||
|
|
||||||
# This did not exist before 3.4, but since then it is
|
# This did not exist before 3.4, but since then it is
|
||||||
# mandatory for our purpose
|
# mandatory for our purpose
|
||||||
if hasattr(os, 'set_inheritable'):
|
if hasattr(os, "set_inheritable"):
|
||||||
os.set_inheritable(self._qmp.get_sock_fd(), True)
|
os.set_inheritable(self._qmp.get_sock_fd(), True)
|
||||||
if fd is not None:
|
if fd is not None:
|
||||||
os.set_inheritable(fd, True)
|
os.set_inheritable(fd, True)
|
||||||
|
|
||||||
fd_param = ["%s" % self._socket_scm_helper,
|
fd_param = [
|
||||||
"%d" % self._qmp.get_sock_fd()]
|
"%s" % self._socket_scm_helper,
|
||||||
|
"%d" % self._qmp.get_sock_fd(),
|
||||||
|
]
|
||||||
|
|
||||||
if file_path is not None:
|
if file_path is not None:
|
||||||
assert fd is None
|
assert fd is None
|
||||||
|
@ -188,9 +195,14 @@ class QEMUMachine(object):
|
||||||
assert fd is not None
|
assert fd is not None
|
||||||
fd_param.append(str(fd))
|
fd_param.append(str(fd))
|
||||||
|
|
||||||
devnull = open(os.path.devnull, 'rb')
|
devnull = open(os.path.devnull, "rb")
|
||||||
proc = subprocess.Popen(fd_param, stdin=devnull, stdout=subprocess.PIPE,
|
proc = subprocess.Popen(
|
||||||
stderr=subprocess.STDOUT, close_fds=False)
|
fd_param,
|
||||||
|
stdin=devnull,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
close_fds=False,
|
||||||
|
)
|
||||||
output = proc.communicate()[0]
|
output = proc.communicate()[0]
|
||||||
if output:
|
if output:
|
||||||
LOG.debug(output)
|
LOG.debug(output)
|
||||||
|
@ -231,24 +243,29 @@ class QEMUMachine(object):
|
||||||
if isinstance(self._monitor_address, tuple):
|
if isinstance(self._monitor_address, tuple):
|
||||||
moncdev = "socket,id=mon,host=%s,port=%s" % (
|
moncdev = "socket,id=mon,host=%s,port=%s" % (
|
||||||
self._monitor_address[0],
|
self._monitor_address[0],
|
||||||
self._monitor_address[1])
|
self._monitor_address[1],
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
moncdev = 'socket,id=mon,path=%s' % self._vm_monitor
|
moncdev = "socket,id=mon,path=%s" % self._vm_monitor
|
||||||
args = ['-chardev', moncdev,
|
args = ["-chardev", moncdev, "-mon", "chardev=mon,mode=control"]
|
||||||
'-mon', 'chardev=mon,mode=control']
|
|
||||||
if self._machine is not None:
|
if self._machine is not None:
|
||||||
args.extend(['-machine', self._machine])
|
args.extend(["-machine", self._machine])
|
||||||
if self._console_set:
|
if self._console_set:
|
||||||
self._console_address = os.path.join(self._temp_dir,
|
self._console_address = os.path.join(
|
||||||
self._name + "-console.sock")
|
self._temp_dir, self._name + "-console.sock"
|
||||||
chardev = ('socket,id=console,path=%s,server,nowait' %
|
)
|
||||||
self._console_address)
|
chardev = (
|
||||||
args.extend(['-chardev', chardev])
|
"socket,id=console,path=%s,server,nowait"
|
||||||
|
% self._console_address
|
||||||
|
)
|
||||||
|
args.extend(["-chardev", chardev])
|
||||||
if self._console_device_type is None:
|
if self._console_device_type is None:
|
||||||
args.extend(['-serial', 'chardev:console'])
|
args.extend(["-serial", "chardev:console"])
|
||||||
else:
|
else:
|
||||||
device = '%s,chardev=console' % self._console_device_type
|
device = (
|
||||||
args.extend(['-device', device])
|
"%s,chardev=console" % self._console_device_type
|
||||||
|
)
|
||||||
|
args.extend(["-device", device])
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def _pre_launch(self):
|
def _pre_launch(self):
|
||||||
|
@ -256,13 +273,17 @@ class QEMUMachine(object):
|
||||||
if self._monitor_address is not None:
|
if self._monitor_address is not None:
|
||||||
self._vm_monitor = self._monitor_address
|
self._vm_monitor = self._monitor_address
|
||||||
else:
|
else:
|
||||||
self._vm_monitor = os.path.join(self._temp_dir,
|
self._vm_monitor = os.path.join(
|
||||||
self._name + "-monitor.sock")
|
self._temp_dir, self._name + "-monitor.sock"
|
||||||
self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
|
)
|
||||||
self._qemu_log_file = open(self._qemu_log_path, 'wb')
|
self._qemu_log_path = os.path.join(
|
||||||
|
self._temp_dir, self._name + ".log"
|
||||||
|
)
|
||||||
|
self._qemu_log_file = open(self._qemu_log_path, "wb")
|
||||||
|
|
||||||
self._qmp = qmp.QEMUMonitorProtocol(self._vm_monitor,
|
self._qmp = qmp.QEMUMonitorProtocol(
|
||||||
server=True)
|
self._vm_monitor, server=True
|
||||||
|
)
|
||||||
|
|
||||||
def _post_launch(self):
|
def _post_launch(self):
|
||||||
self._qmp.accept()
|
self._qmp.accept()
|
||||||
|
@ -289,7 +310,7 @@ class QEMUMachine(object):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self._launched:
|
if self._launched:
|
||||||
raise QEMUMachineError('VM already launched')
|
raise QEMUMachineError("VM already launched")
|
||||||
|
|
||||||
self._iolog = None
|
self._iolog = None
|
||||||
self._qemu_full_args = None
|
self._qemu_full_args = None
|
||||||
|
@ -299,11 +320,11 @@ class QEMUMachine(object):
|
||||||
except:
|
except:
|
||||||
self.shutdown()
|
self.shutdown()
|
||||||
|
|
||||||
LOG.debug('Error launching VM')
|
LOG.debug("Error launching VM")
|
||||||
if self._qemu_full_args:
|
if self._qemu_full_args:
|
||||||
LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
|
LOG.debug("Command: %r", " ".join(self._qemu_full_args))
|
||||||
if self._iolog:
|
if self._iolog:
|
||||||
LOG.debug('Output: %r', self._iolog)
|
LOG.debug("Output: %r", self._iolog)
|
||||||
raise Exception(self._iolog)
|
raise Exception(self._iolog)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -311,17 +332,25 @@ class QEMUMachine(object):
|
||||||
"""
|
"""
|
||||||
Launch the VM and establish a QMP connection
|
Launch the VM and establish a QMP connection
|
||||||
"""
|
"""
|
||||||
devnull = open(os.path.devnull, 'rb')
|
devnull = open(os.path.devnull, "rb")
|
||||||
self._pre_launch()
|
self._pre_launch()
|
||||||
self._qemu_full_args = (self._wrapper + [self._binary] +
|
self._qemu_full_args = (
|
||||||
self._base_args() + self._args)
|
self._wrapper
|
||||||
LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
|
+ [self._binary]
|
||||||
self._popen = subprocess.Popen(self._qemu_full_args,
|
+ self._base_args()
|
||||||
stdin=devnull,
|
+ self._args
|
||||||
stdout=self._qemu_log_file,
|
)
|
||||||
stderr=subprocess.STDOUT,
|
LOG.debug(
|
||||||
shell=False,
|
"VM launch command: %r", " ".join(self._qemu_full_args)
|
||||||
close_fds=False)
|
)
|
||||||
|
self._popen = subprocess.Popen(
|
||||||
|
self._qemu_full_args,
|
||||||
|
stdin=devnull,
|
||||||
|
stdout=self._qemu_log_file,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
shell=False,
|
||||||
|
close_fds=False,
|
||||||
|
)
|
||||||
self._post_launch()
|
self._post_launch()
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
|
@ -339,7 +368,7 @@ class QEMUMachine(object):
|
||||||
"""
|
"""
|
||||||
if self.is_running():
|
if self.is_running():
|
||||||
try:
|
try:
|
||||||
self._qmp.cmd('quit')
|
self._qmp.cmd("quit")
|
||||||
self._qmp.close()
|
self._qmp.close()
|
||||||
except:
|
except:
|
||||||
self._popen.kill()
|
self._popen.kill()
|
||||||
|
@ -350,11 +379,11 @@ class QEMUMachine(object):
|
||||||
|
|
||||||
exitcode = self.exitcode()
|
exitcode = self.exitcode()
|
||||||
if exitcode is not None and exitcode < 0:
|
if exitcode is not None and exitcode < 0:
|
||||||
msg = 'qemu received signal %i: %s'
|
msg = "qemu received signal %i: %s"
|
||||||
if self._qemu_full_args:
|
if self._qemu_full_args:
|
||||||
command = ' '.join(self._qemu_full_args)
|
command = " ".join(self._qemu_full_args)
|
||||||
else:
|
else:
|
||||||
command = ''
|
command = ""
|
||||||
LOG.warn(msg, -exitcode, command)
|
LOG.warn(msg, -exitcode, command)
|
||||||
|
|
||||||
self._launched = False
|
self._launched = False
|
||||||
|
@ -366,7 +395,7 @@ class QEMUMachine(object):
|
||||||
qmp_args = dict()
|
qmp_args = dict()
|
||||||
for key, value in args.items():
|
for key, value in args.items():
|
||||||
if conv_keys:
|
if conv_keys:
|
||||||
qmp_args[key.replace('_', '-')] = value
|
qmp_args[key.replace("_", "-")] = value
|
||||||
else:
|
else:
|
||||||
qmp_args[key] = value
|
qmp_args[key] = value
|
||||||
|
|
||||||
|
@ -427,7 +456,9 @@ class QEMUMachine(object):
|
||||||
try:
|
try:
|
||||||
for key in match:
|
for key in match:
|
||||||
if key in event:
|
if key in event:
|
||||||
if not QEMUMachine.event_match(event[key], match[key]):
|
if not QEMUMachine.event_match(
|
||||||
|
event[key], match[key]
|
||||||
|
):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -458,8 +489,9 @@ class QEMUMachine(object):
|
||||||
|
|
||||||
def _match(event):
|
def _match(event):
|
||||||
for name, match in events:
|
for name, match in events:
|
||||||
if (event['event'] == name and
|
if event["event"] == name and self.event_match(
|
||||||
self.event_match(event, match)):
|
event, match
|
||||||
|
):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -531,7 +563,8 @@ class QEMUMachine(object):
|
||||||
Returns a socket connected to the console
|
Returns a socket connected to the console
|
||||||
"""
|
"""
|
||||||
if self._console_socket is None:
|
if self._console_socket is None:
|
||||||
self._console_socket = socket.socket(socket.AF_UNIX,
|
self._console_socket = socket.socket(
|
||||||
socket.SOCK_STREAM)
|
socket.AF_UNIX, socket.SOCK_STREAM
|
||||||
|
)
|
||||||
self._console_socket.connect(self._console_address)
|
self._console_socket.connect(self._console_address)
|
||||||
return self._console_socket
|
return self._console_socket
|
||||||
|
|
|
@ -32,7 +32,7 @@ class QMPTimeoutError(QMPError):
|
||||||
|
|
||||||
class QEMUMonitorProtocol(object):
|
class QEMUMonitorProtocol(object):
|
||||||
#: Logger object for debugging messages
|
#: Logger object for debugging messages
|
||||||
logger = logging.getLogger('QMP')
|
logger = logging.getLogger("QMP")
|
||||||
#: Socket's error class
|
#: Socket's error class
|
||||||
error = socket.error
|
error = socket.error
|
||||||
#: Socket's timeout
|
#: Socket's timeout
|
||||||
|
@ -55,7 +55,9 @@ class QEMUMonitorProtocol(object):
|
||||||
self.__sock = self.__get_sock()
|
self.__sock = self.__get_sock()
|
||||||
self.__sockfile = None
|
self.__sockfile = None
|
||||||
if server:
|
if server:
|
||||||
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
self.__sock.setsockopt(
|
||||||
|
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
|
||||||
|
)
|
||||||
self.__sock.bind(self.__address)
|
self.__sock.bind(self.__address)
|
||||||
self.__sock.listen(1)
|
self.__sock.listen(1)
|
||||||
|
|
||||||
|
@ -71,7 +73,7 @@ class QEMUMonitorProtocol(object):
|
||||||
if greeting is None or "QMP" not in greeting:
|
if greeting is None or "QMP" not in greeting:
|
||||||
raise QMPConnectError
|
raise QMPConnectError
|
||||||
# Greeting seems ok, negotiate capabilities
|
# Greeting seems ok, negotiate capabilities
|
||||||
resp = self.cmd('qmp_capabilities')
|
resp = self.cmd("qmp_capabilities")
|
||||||
if "return" in resp:
|
if "return" in resp:
|
||||||
return greeting
|
return greeting
|
||||||
raise QMPCapabilitiesError
|
raise QMPCapabilitiesError
|
||||||
|
@ -82,7 +84,7 @@ class QEMUMonitorProtocol(object):
|
||||||
if not data:
|
if not data:
|
||||||
return
|
return
|
||||||
resp = json.loads(data)
|
resp = json.loads(data)
|
||||||
if 'event' in resp:
|
if "event" in resp:
|
||||||
self.logger.debug("<<< %s", resp)
|
self.logger.debug("<<< %s", resp)
|
||||||
self.__events.append(resp)
|
self.__events.append(resp)
|
||||||
if not only_event:
|
if not only_event:
|
||||||
|
@ -165,7 +167,7 @@ class QEMUMonitorProtocol(object):
|
||||||
"""
|
"""
|
||||||
self.logger.debug(">>> %s", qmp_cmd)
|
self.logger.debug(">>> %s", qmp_cmd)
|
||||||
try:
|
try:
|
||||||
self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
|
self.__sock.sendall(json.dumps(qmp_cmd).encode("utf-8"))
|
||||||
except socket.error as err:
|
except socket.error as err:
|
||||||
if err[0] == errno.EPIPE:
|
if err[0] == errno.EPIPE:
|
||||||
return
|
return
|
||||||
|
@ -182,11 +184,11 @@ class QEMUMonitorProtocol(object):
|
||||||
@param args: command arguments (dict)
|
@param args: command arguments (dict)
|
||||||
@param cmd_id: command id (dict, list, string or int)
|
@param cmd_id: command id (dict, list, string or int)
|
||||||
"""
|
"""
|
||||||
qmp_cmd = {'execute': name}
|
qmp_cmd = {"execute": name}
|
||||||
if args:
|
if args:
|
||||||
qmp_cmd['arguments'] = args
|
qmp_cmd["arguments"] = args
|
||||||
if cmd_id:
|
if cmd_id:
|
||||||
qmp_cmd['id'] = cmd_id
|
qmp_cmd["id"] = cmd_id
|
||||||
return self.cmd_obj(qmp_cmd)
|
return self.cmd_obj(qmp_cmd)
|
||||||
|
|
||||||
def command(self, cmd, **kwds):
|
def command(self, cmd, **kwds):
|
||||||
|
@ -195,8 +197,8 @@ class QEMUMonitorProtocol(object):
|
||||||
"""
|
"""
|
||||||
ret = self.cmd(cmd, kwds)
|
ret = self.cmd(cmd, kwds)
|
||||||
if "error" in ret:
|
if "error" in ret:
|
||||||
raise Exception(ret['error']['desc'])
|
raise Exception(ret["error"]["desc"])
|
||||||
return ret['return']
|
return ret["return"]
|
||||||
|
|
||||||
def pull_event(self, wait=False):
|
def pull_event(self, wait=False):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -23,10 +23,6 @@ from ucloud.vmm import VMM
|
||||||
from marshmallow import ValidationError
|
from marshmallow import ValidationError
|
||||||
|
|
||||||
|
|
||||||
def maintenance():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class VM:
|
class VM:
|
||||||
def __init__(self, vm_entry):
|
def __init__(self, vm_entry):
|
||||||
self.schema = VMSchema()
|
self.schema = VMSchema()
|
||||||
|
@ -35,23 +31,30 @@ class VM:
|
||||||
try:
|
try:
|
||||||
self.vm = self.schema.loads(vm_entry.value)
|
self.vm = self.schema.loads(vm_entry.value)
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
logger.exception('Couldn\'t validate VM Entry', vm_entry.value)
|
logger.exception(
|
||||||
|
"Couldn't validate VM Entry", vm_entry.value
|
||||||
|
)
|
||||||
self.vm = None
|
self.vm = None
|
||||||
else:
|
else:
|
||||||
self.uuid = vm_entry.key.split('/')[-1]
|
self.uuid = vm_entry.key.split("/")[-1]
|
||||||
self.host_key = self.vm['hostname']
|
self.host_key = self.vm["hostname"]
|
||||||
|
|
||||||
def get_qemu_args(self):
|
def get_qemu_args(self):
|
||||||
command = (
|
command = (
|
||||||
'-name {owner}_{name}'
|
"-name {owner}_{name}"
|
||||||
' -drive file={file},format=raw,if=virtio,cache=none'
|
" -drive file={file},format=raw,if=virtio,cache=none"
|
||||||
' -device virtio-rng-pci'
|
" -device virtio-rng-pci"
|
||||||
' -m {memory} -smp cores={cores},threads={threads}'
|
" -m {memory} -smp cores={cores},threads={threads}"
|
||||||
).format(owner=self.vm['owner'], name=self.vm['name'],
|
).format(
|
||||||
memory=int(self.vm['specs']['ram'].to_MB()), cores=self.vm['specs']['cpu'],
|
owner=self.vm["owner"],
|
||||||
threads=1, file=shared.storage_handler.qemu_path_string(self.uuid))
|
name=self.vm["name"],
|
||||||
|
memory=int(self.vm["specs"]["ram"].to_MB()),
|
||||||
|
cores=self.vm["specs"]["cpu"],
|
||||||
|
threads=1,
|
||||||
|
file=shared.storage_handler.qemu_path_string(self.uuid),
|
||||||
|
)
|
||||||
|
|
||||||
return command.split(' ')
|
return command.split(" ")
|
||||||
|
|
||||||
def start(self, destination_host_key=None):
|
def start(self, destination_host_key=None):
|
||||||
migration = False
|
migration = False
|
||||||
|
@ -63,24 +66,34 @@ class VM:
|
||||||
network_args = self.create_network_dev()
|
network_args = self.create_network_dev()
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
declare_stopped(self.vm)
|
declare_stopped(self.vm)
|
||||||
self.vm['log'].append('Cannot Setup Network Properly')
|
self.vm["log"].append("Cannot Setup Network Properly")
|
||||||
logger.error('Cannot Setup Network Properly for vm %s', self.uuid, exc_info=err)
|
logger.error(
|
||||||
|
"Cannot Setup Network Properly for vm %s",
|
||||||
|
self.uuid,
|
||||||
|
exc_info=err,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.vmm.start(uuid=self.uuid, migration=migration,
|
self.vmm.start(
|
||||||
*self.get_qemu_args(), *network_args)
|
uuid=self.uuid,
|
||||||
|
migration=migration,
|
||||||
|
*self.get_qemu_args(),
|
||||||
|
*network_args
|
||||||
|
)
|
||||||
|
|
||||||
status = self.vmm.get_status(self.uuid)
|
status = self.vmm.get_status(self.uuid)
|
||||||
if status == 'running':
|
if status == "running":
|
||||||
self.vm['status'] = VMStatus.running
|
self.vm["status"] = VMStatus.running
|
||||||
self.vm['vnc_socket'] = self.vmm.get_vnc(self.uuid)
|
self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
|
||||||
elif status == 'inmigrate':
|
elif status == "inmigrate":
|
||||||
r = RequestEntry.from_scratch(
|
r = RequestEntry.from_scratch(
|
||||||
type=RequestType.TransferVM, # Transfer VM
|
type=RequestType.TransferVM, # Transfer VM
|
||||||
hostname=self.host_key, # Which VM should get this request. It is source host
|
hostname=self.host_key, # Which VM should get this request. It is source host
|
||||||
uuid=self.uuid, # uuid of VM
|
uuid=self.uuid, # uuid of VM
|
||||||
destination_sock_path=join_path(self.vmm.socket_dir, self.uuid),
|
destination_sock_path=join_path(
|
||||||
|
self.vmm.socket_dir, self.uuid
|
||||||
|
),
|
||||||
destination_host_key=destination_host_key, # Where source host transfer VM
|
destination_host_key=destination_host_key, # Where source host transfer VM
|
||||||
request_prefix=settings['etcd']['request_prefix']
|
request_prefix=settings["etcd"]["request_prefix"],
|
||||||
)
|
)
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
else:
|
else:
|
||||||
|
@ -96,15 +109,22 @@ class VM:
|
||||||
self.sync()
|
self.sync()
|
||||||
|
|
||||||
def migrate(self, destination_host, destination_sock_path):
|
def migrate(self, destination_host, destination_sock_path):
|
||||||
self.vmm.transfer(src_uuid=self.uuid, destination_sock_path=destination_sock_path,
|
self.vmm.transfer(
|
||||||
host=destination_host)
|
src_uuid=self.uuid,
|
||||||
|
destination_sock_path=destination_sock_path,
|
||||||
|
host=destination_host,
|
||||||
|
)
|
||||||
|
|
||||||
def create_network_dev(self):
|
def create_network_dev(self):
|
||||||
command = ''
|
command = ""
|
||||||
for network_mac_and_tap in self.vm['network']:
|
for network_mac_and_tap in self.vm["network"]:
|
||||||
network_name, mac, tap = network_mac_and_tap
|
network_name, mac, tap = network_mac_and_tap
|
||||||
|
|
||||||
_key = os.path.join(settings['etcd']['network_prefix'], self.vm['owner'], network_name)
|
_key = os.path.join(
|
||||||
|
settings["etcd"]["network_prefix"],
|
||||||
|
self.vm["owner"],
|
||||||
|
network_name,
|
||||||
|
)
|
||||||
network = shared.etcd_client.get(_key, value_in_json=True)
|
network = shared.etcd_client.get(_key, value_in_json=True)
|
||||||
network_schema = NetworkSchema()
|
network_schema = NetworkSchema()
|
||||||
try:
|
try:
|
||||||
|
@ -112,49 +132,64 @@ class VM:
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if network['type'] == "vxlan":
|
if network["type"] == "vxlan":
|
||||||
tap = create_vxlan_br_tap(_id=network['id'],
|
tap = create_vxlan_br_tap(
|
||||||
_dev=settings['network']['vxlan_phy_dev'],
|
_id=network["id"],
|
||||||
tap_id=tap,
|
_dev=settings["network"]["vxlan_phy_dev"],
|
||||||
ip=network['ipv6'])
|
tap_id=tap,
|
||||||
|
ip=network["ipv6"],
|
||||||
|
)
|
||||||
|
|
||||||
all_networks = shared.etcd_client.get_prefix(settings['etcd']['network_prefix'],
|
all_networks = shared.etcd_client.get_prefix(
|
||||||
value_in_json=True)
|
settings["etcd"]["network_prefix"],
|
||||||
|
value_in_json=True,
|
||||||
|
)
|
||||||
|
|
||||||
if ipaddress.ip_network(network['ipv6']).is_global:
|
if ipaddress.ip_network(network["ipv6"]).is_global:
|
||||||
update_radvd_conf(all_networks)
|
update_radvd_conf(all_networks)
|
||||||
|
|
||||||
command += '-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no' \
|
command += (
|
||||||
' -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}' \
|
"-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
|
||||||
.format(tap=tap, net_id=network['id'], mac=mac)
|
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
|
||||||
|
tap=tap, net_id=network["id"], mac=mac
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return command.split(' ')
|
return command.split(" ")
|
||||||
|
|
||||||
def delete_network_dev(self):
|
def delete_network_dev(self):
|
||||||
try:
|
try:
|
||||||
for network in self.vm['network']:
|
for network in self.vm["network"]:
|
||||||
network_name = network[0]
|
network_name = network[0]
|
||||||
_ = network[1] # tap_mac
|
_ = network[1] # tap_mac
|
||||||
tap_id = network[2]
|
tap_id = network[2]
|
||||||
|
|
||||||
delete_network_interface('tap{}'.format(tap_id))
|
delete_network_interface("tap{}".format(tap_id))
|
||||||
|
|
||||||
owners_vms = shared.vm_pool.by_owner(self.vm['owner'])
|
owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
|
||||||
owners_running_vms = shared.vm_pool.by_status(VMStatus.running,
|
owners_running_vms = shared.vm_pool.by_status(
|
||||||
_vms=owners_vms)
|
VMStatus.running, _vms=owners_vms
|
||||||
|
)
|
||||||
|
|
||||||
networks = map(
|
networks = map(
|
||||||
lambda n: n[0], map(lambda vm: vm.network, owners_running_vms)
|
lambda n: n[0],
|
||||||
|
map(lambda vm: vm.network, owners_running_vms),
|
||||||
)
|
)
|
||||||
networks_in_use_by_user_vms = [vm[0] for vm in networks]
|
networks_in_use_by_user_vms = [vm[0] for vm in networks]
|
||||||
if network_name not in networks_in_use_by_user_vms:
|
if network_name not in networks_in_use_by_user_vms:
|
||||||
network_entry = resolve_network(network[0], self.vm['owner'])
|
network_entry = resolve_network(
|
||||||
|
network[0], self.vm["owner"]
|
||||||
|
)
|
||||||
if network_entry:
|
if network_entry:
|
||||||
network_type = network_entry.value["type"]
|
network_type = network_entry.value["type"]
|
||||||
network_id = network_entry.value["id"]
|
network_id = network_entry.value["id"]
|
||||||
if network_type == "vxlan":
|
if network_type == "vxlan":
|
||||||
delete_network_interface('br{}'.format(network_id))
|
delete_network_interface(
|
||||||
delete_network_interface('vxlan{}'.format(network_id))
|
"br{}".format(network_id)
|
||||||
|
)
|
||||||
|
delete_network_interface(
|
||||||
|
"vxlan{}".format(network_id)
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Exception in network interface deletion")
|
logger.exception("Exception in network interface deletion")
|
||||||
|
|
||||||
|
@ -163,15 +198,21 @@ class VM:
|
||||||
# File Already exists. No Problem Continue
|
# File Already exists. No Problem Continue
|
||||||
logger.debug("Image for vm %s exists", self.uuid)
|
logger.debug("Image for vm %s exists", self.uuid)
|
||||||
else:
|
else:
|
||||||
if shared.storage_handler.make_vm_image(src=self.vm['image_uuid'], dest=self.uuid):
|
if shared.storage_handler.make_vm_image(
|
||||||
if not shared.storage_handler.resize_vm_image(path=self.uuid,
|
src=self.vm["image_uuid"], dest=self.uuid
|
||||||
size=int(self.vm['specs']['os-ssd'].to_MB())):
|
):
|
||||||
self.vm['status'] = VMStatus.error
|
if not shared.storage_handler.resize_vm_image(
|
||||||
|
path=self.uuid,
|
||||||
|
size=int(self.vm["specs"]["os-ssd"].to_MB()),
|
||||||
|
):
|
||||||
|
self.vm["status"] = VMStatus.error
|
||||||
else:
|
else:
|
||||||
logger.info("New VM Created")
|
logger.info("New VM Created")
|
||||||
|
|
||||||
def sync(self):
|
def sync(self):
|
||||||
shared.etcd_client.put(self.key, self.schema.dump(self.vm), value_in_json=True)
|
shared.etcd_client.put(
|
||||||
|
self.key, self.schema.dump(self.vm), value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
self.stop()
|
self.stop()
|
||||||
|
@ -186,50 +227,77 @@ class VM:
|
||||||
|
|
||||||
def resolve_network(network_name, network_owner):
|
def resolve_network(network_name, network_owner):
|
||||||
network = shared.etcd_client.get(
|
network = shared.etcd_client.get(
|
||||||
join_path(settings['etcd']['network_prefix'], network_owner, network_name), value_in_json=True
|
join_path(
|
||||||
|
settings["etcd"]["network_prefix"],
|
||||||
|
network_owner,
|
||||||
|
network_name,
|
||||||
|
),
|
||||||
|
value_in_json=True,
|
||||||
)
|
)
|
||||||
return network
|
return network
|
||||||
|
|
||||||
|
|
||||||
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
|
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
|
||||||
network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
|
network_script_base = os.path.join(
|
||||||
vxlan = create_dev(script=os.path.join(network_script_base, 'create-vxlan.sh'),
|
os.path.dirname(os.path.dirname(__file__)), "network"
|
||||||
_id=_id, dev=_dev)
|
)
|
||||||
|
vxlan = create_dev(
|
||||||
|
script=os.path.join(network_script_base, "create-vxlan.sh"),
|
||||||
|
_id=_id,
|
||||||
|
dev=_dev,
|
||||||
|
)
|
||||||
if vxlan:
|
if vxlan:
|
||||||
bridge = create_dev(script=os.path.join(network_script_base, 'create-bridge.sh'),
|
bridge = create_dev(
|
||||||
_id=_id, dev=vxlan, ip=ip)
|
script=os.path.join(
|
||||||
|
network_script_base, "create-bridge.sh"
|
||||||
|
),
|
||||||
|
_id=_id,
|
||||||
|
dev=vxlan,
|
||||||
|
ip=ip,
|
||||||
|
)
|
||||||
if bridge:
|
if bridge:
|
||||||
tap = create_dev(script=os.path.join(network_script_base, 'create-tap.sh'),
|
tap = create_dev(
|
||||||
_id=str(tap_id), dev=bridge)
|
script=os.path.join(
|
||||||
|
network_script_base, "create-tap.sh"
|
||||||
|
),
|
||||||
|
_id=str(tap_id),
|
||||||
|
dev=bridge,
|
||||||
|
)
|
||||||
if tap:
|
if tap:
|
||||||
return tap
|
return tap
|
||||||
|
|
||||||
|
|
||||||
def update_radvd_conf(all_networks):
|
def update_radvd_conf(all_networks):
|
||||||
network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
|
network_script_base = os.path.join(
|
||||||
|
os.path.dirname(os.path.dirname(__file__)), "network"
|
||||||
|
)
|
||||||
|
|
||||||
networks = {
|
networks = {
|
||||||
net.value['ipv6']: net.value['id']
|
net.value["ipv6"]: net.value["id"]
|
||||||
for net in all_networks
|
for net in all_networks
|
||||||
if net.value.get('ipv6') and ipaddress.ip_network(net.value.get('ipv6')).is_global
|
if net.value.get("ipv6")
|
||||||
|
and ipaddress.ip_network(net.value.get("ipv6")).is_global
|
||||||
}
|
}
|
||||||
radvd_template = open(os.path.join(network_script_base,
|
radvd_template = open(
|
||||||
'radvd-template.conf'), 'r').read()
|
os.path.join(network_script_base, "radvd-template.conf"), "r"
|
||||||
|
).read()
|
||||||
radvd_template = Template(radvd_template)
|
radvd_template = Template(radvd_template)
|
||||||
|
|
||||||
content = [
|
content = [
|
||||||
radvd_template.safe_substitute(
|
radvd_template.safe_substitute(
|
||||||
bridge='br{}'.format(networks[net]),
|
bridge="br{}".format(networks[net]), prefix=net
|
||||||
prefix=net
|
|
||||||
)
|
)
|
||||||
for net in networks if networks.get(net)
|
for net in networks
|
||||||
|
if networks.get(net)
|
||||||
]
|
]
|
||||||
with open('/etc/radvd.conf', 'w') as radvd_conf:
|
with open("/etc/radvd.conf", "w") as radvd_conf:
|
||||||
radvd_conf.writelines(content)
|
radvd_conf.writelines(content)
|
||||||
try:
|
try:
|
||||||
sp.check_output(['systemctl', 'restart', 'radvd'])
|
sp.check_output(["systemctl", "restart", "radvd"])
|
||||||
except sp.CalledProcessError:
|
except sp.CalledProcessError:
|
||||||
try:
|
try:
|
||||||
sp.check_output(['service', 'radvd', 'restart'])
|
sp.check_output(["service", "radvd", "restart"])
|
||||||
except sp.CalledProcessError as err:
|
except sp.CalledProcessError as err:
|
||||||
raise err.__class__('Cannot start/restart radvd service', err.cmd) from err
|
raise err.__class__(
|
||||||
|
"Cannot start/restart radvd service", err.cmd
|
||||||
|
) from err
|
||||||
|
|
|
@ -9,7 +9,13 @@ from ucloud.imagescanner import logger
|
||||||
|
|
||||||
|
|
||||||
def qemu_img_type(path):
|
def qemu_img_type(path):
|
||||||
qemu_img_info_command = ["qemu-img", "info", "--output", "json", path]
|
qemu_img_info_command = [
|
||||||
|
"qemu-img",
|
||||||
|
"info",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
path,
|
||||||
|
]
|
||||||
try:
|
try:
|
||||||
qemu_img_info = sp.check_output(qemu_img_info_command)
|
qemu_img_info = sp.check_output(qemu_img_info_command)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -22,32 +28,57 @@ def qemu_img_type(path):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# We want to get images entries that requests images to be created
|
# We want to get images entries that requests images to be created
|
||||||
images = shared.etcd_client.get_prefix(settings['etcd']['image_prefix'], value_in_json=True)
|
images = shared.etcd_client.get_prefix(
|
||||||
images_to_be_created = list(filter(lambda im: im.value['status'] == 'TO_BE_CREATED', images))
|
settings["etcd"]["image_prefix"], value_in_json=True
|
||||||
|
)
|
||||||
|
images_to_be_created = list(
|
||||||
|
filter(lambda im: im.value["status"] == "TO_BE_CREATED", images)
|
||||||
|
)
|
||||||
|
|
||||||
for image in images_to_be_created:
|
for image in images_to_be_created:
|
||||||
try:
|
try:
|
||||||
image_uuid = image.key.split('/')[-1]
|
image_uuid = image.key.split("/")[-1]
|
||||||
image_owner = image.value['owner']
|
image_owner = image.value["owner"]
|
||||||
image_filename = image.value['filename']
|
image_filename = image.value["filename"]
|
||||||
image_store_name = image.value['store_name']
|
image_store_name = image.value["store_name"]
|
||||||
image_full_path = join_path(settings['storage']['file_dir'], image_owner, image_filename)
|
image_full_path = join_path(
|
||||||
|
settings["storage"]["file_dir"],
|
||||||
|
image_owner,
|
||||||
|
image_filename,
|
||||||
|
)
|
||||||
|
|
||||||
image_stores = shared.etcd_client.get_prefix(settings['etcd']['image_store_prefix'],
|
image_stores = shared.etcd_client.get_prefix(
|
||||||
value_in_json=True)
|
settings["etcd"]["image_store_prefix"],
|
||||||
user_image_store = next(filter(
|
value_in_json=True,
|
||||||
lambda s, store_name=image_store_name: s.value["name"] == store_name,
|
)
|
||||||
image_stores
|
user_image_store = next(
|
||||||
))
|
filter(
|
||||||
|
lambda s, store_name=image_store_name: s.value[
|
||||||
|
"name"
|
||||||
|
]
|
||||||
|
== store_name,
|
||||||
|
image_stores,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
image_store_pool = user_image_store.value['attributes']['pool']
|
image_store_pool = user_image_store.value["attributes"][
|
||||||
|
"pool"
|
||||||
|
]
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
else:
|
else:
|
||||||
# At least our basic data is available
|
# At least our basic data is available
|
||||||
qemu_img_convert_command = ["qemu-img", "convert", "-f", "qcow2",
|
qemu_img_convert_command = [
|
||||||
"-O", "raw", image_full_path, "image.raw"]
|
"qemu-img",
|
||||||
|
"convert",
|
||||||
|
"-f",
|
||||||
|
"qcow2",
|
||||||
|
"-O",
|
||||||
|
"raw",
|
||||||
|
image_full_path,
|
||||||
|
"image.raw",
|
||||||
|
]
|
||||||
|
|
||||||
if qemu_img_type(image_full_path) == "qcow2":
|
if qemu_img_type(image_full_path) == "qcow2":
|
||||||
try:
|
try:
|
||||||
|
@ -55,16 +86,20 @@ def main():
|
||||||
sp.check_output(qemu_img_convert_command,)
|
sp.check_output(qemu_img_convert_command,)
|
||||||
|
|
||||||
except sp.CalledProcessError:
|
except sp.CalledProcessError:
|
||||||
logger.exception('Image convertion from .qcow2 to .raw failed.')
|
logger.exception(
|
||||||
|
"Image convertion from .qcow2 to .raw failed."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Import and Protect
|
# Import and Protect
|
||||||
r_status = shared.storage_handler.import_image(src="image.raw",
|
r_status = shared.storage_handler.import_image(
|
||||||
dest=image_uuid,
|
src="image.raw", dest=image_uuid, protect=True
|
||||||
protect=True)
|
)
|
||||||
if r_status:
|
if r_status:
|
||||||
# Everything is successfully done
|
# Everything is successfully done
|
||||||
image.value["status"] = "CREATED"
|
image.value["status"] = "CREATED"
|
||||||
shared.etcd_client.put(image.key, json.dumps(image.value))
|
shared.etcd_client.put(
|
||||||
|
image.key, json.dumps(image.value)
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
os.remove("image.raw")
|
os.remove("image.raw")
|
||||||
|
@ -74,7 +109,9 @@ def main():
|
||||||
else:
|
else:
|
||||||
# The user provided image is either not found or of invalid format
|
# The user provided image is either not found or of invalid format
|
||||||
image.value["status"] = "INVALID_IMAGE"
|
image.value["status"] = "INVALID_IMAGE"
|
||||||
shared.etcd_client.put(image.key, json.dumps(image.value))
|
shared.etcd_client.put(
|
||||||
|
image.key, json.dumps(image.value)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -21,33 +21,39 @@ def handle_exception(e):
|
||||||
return e
|
return e
|
||||||
|
|
||||||
# now you're handling non-HTTP exceptions only
|
# now you're handling non-HTTP exceptions only
|
||||||
return {'message': 'Server Error'}, 500
|
return {"message": "Server Error"}, 500
|
||||||
|
|
||||||
|
|
||||||
def get_vm_entry(mac_addr):
|
def get_vm_entry(mac_addr):
|
||||||
return next(filter(lambda vm: mac_addr in list(zip(*vm.network))[1], shared.vm_pool.vms), None)
|
return next(
|
||||||
|
filter(
|
||||||
|
lambda vm: mac_addr in list(zip(*vm.network))[1],
|
||||||
|
shared.vm_pool.vms,
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
|
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
|
||||||
def ipv62mac(ipv6):
|
def ipv62mac(ipv6):
|
||||||
# remove subnet info if given
|
# remove subnet info if given
|
||||||
subnet_index = ipv6.find('/')
|
subnet_index = ipv6.find("/")
|
||||||
if subnet_index != -1:
|
if subnet_index != -1:
|
||||||
ipv6 = ipv6[:subnet_index]
|
ipv6 = ipv6[:subnet_index]
|
||||||
|
|
||||||
ipv6_parts = ipv6.split(':')
|
ipv6_parts = ipv6.split(":")
|
||||||
mac_parts = list()
|
mac_parts = list()
|
||||||
for ipv6_part in ipv6_parts[-4:]:
|
for ipv6_part in ipv6_parts[-4:]:
|
||||||
while len(ipv6_part) < 4:
|
while len(ipv6_part) < 4:
|
||||||
ipv6_part = '0' + ipv6_part
|
ipv6_part = "0" + ipv6_part
|
||||||
mac_parts.append(ipv6_part[:2])
|
mac_parts.append(ipv6_part[:2])
|
||||||
mac_parts.append(ipv6_part[-2:])
|
mac_parts.append(ipv6_part[-2:])
|
||||||
|
|
||||||
# modify parts to match MAC value
|
# modify parts to match MAC value
|
||||||
mac_parts[0] = '%02x' % (int(mac_parts[0], 16) ^ 2)
|
mac_parts[0] = "%02x" % (int(mac_parts[0], 16) ^ 2)
|
||||||
del mac_parts[4]
|
del mac_parts[4]
|
||||||
del mac_parts[3]
|
del mac_parts[3]
|
||||||
return ':'.join(mac_parts)
|
return ":".join(mac_parts)
|
||||||
|
|
||||||
|
|
||||||
class Root(Resource):
|
class Root(Resource):
|
||||||
|
@ -56,19 +62,27 @@ class Root(Resource):
|
||||||
data = get_vm_entry(ipv62mac(request.remote_addr))
|
data = get_vm_entry(ipv62mac(request.remote_addr))
|
||||||
|
|
||||||
if not data:
|
if not data:
|
||||||
return {'message': 'Metadata for such VM does not exists.'}, 404
|
return (
|
||||||
|
{"message": "Metadata for such VM does not exists."},
|
||||||
|
404,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
etcd_key = os.path.join(settings['etcd']['user_prefix'],
|
etcd_key = os.path.join(
|
||||||
data.value['owner_realm'],
|
settings["etcd"]["user_prefix"],
|
||||||
data.value['owner'], 'key')
|
data.value["owner_realm"],
|
||||||
etcd_entry = shared.etcd_client.get_prefix(etcd_key, value_in_json=True)
|
data.value["owner"],
|
||||||
|
"key",
|
||||||
|
)
|
||||||
|
etcd_entry = shared.etcd_client.get_prefix(
|
||||||
|
etcd_key, value_in_json=True
|
||||||
|
)
|
||||||
user_personal_ssh_keys = [key.value for key in etcd_entry]
|
user_personal_ssh_keys = [key.value for key in etcd_entry]
|
||||||
data.value['metadata']['ssh-keys'] += user_personal_ssh_keys
|
data.value["metadata"]["ssh-keys"] += user_personal_ssh_keys
|
||||||
return data.value['metadata'], 200
|
return data.value["metadata"], 200
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def post():
|
def post():
|
||||||
return {'message': 'Previous Implementation is deprecated.'}
|
return {"message": "Previous Implementation is deprecated."}
|
||||||
# data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
|
# data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
|
||||||
# print(data)
|
# print(data)
|
||||||
# if data:
|
# if data:
|
||||||
|
@ -94,12 +108,12 @@ class Root(Resource):
|
||||||
# data, value_in_json=True)
|
# data, value_in_json=True)
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(Root, '/')
|
api.add_resource(Root, "/")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
app.run(debug=True, host="::", port="80")
|
app.run(debug=True, host="::", port="80")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
|
@ -24,17 +24,35 @@ def remaining_resources(host_specs, vms_specs):
|
||||||
|
|
||||||
for component in _vms_specs:
|
for component in _vms_specs:
|
||||||
if isinstance(_vms_specs[component], str):
|
if isinstance(_vms_specs[component], str):
|
||||||
_vms_specs[component] = int(bitmath.parse_string_unsafe(_vms_specs[component]).to_MB())
|
_vms_specs[component] = int(
|
||||||
|
bitmath.parse_string_unsafe(
|
||||||
|
_vms_specs[component]
|
||||||
|
).to_MB()
|
||||||
|
)
|
||||||
elif isinstance(_vms_specs[component], list):
|
elif isinstance(_vms_specs[component], list):
|
||||||
_vms_specs[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _vms_specs[component])
|
_vms_specs[component] = map(
|
||||||
_vms_specs[component] = reduce(lambda x, y: x + y, _vms_specs[component], 0)
|
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
|
||||||
|
_vms_specs[component],
|
||||||
|
)
|
||||||
|
_vms_specs[component] = reduce(
|
||||||
|
lambda x, y: x + y, _vms_specs[component], 0
|
||||||
|
)
|
||||||
|
|
||||||
for component in _remaining:
|
for component in _remaining:
|
||||||
if isinstance(_remaining[component], str):
|
if isinstance(_remaining[component], str):
|
||||||
_remaining[component] = int(bitmath.parse_string_unsafe(_remaining[component]).to_MB())
|
_remaining[component] = int(
|
||||||
|
bitmath.parse_string_unsafe(
|
||||||
|
_remaining[component]
|
||||||
|
).to_MB()
|
||||||
|
)
|
||||||
elif isinstance(_remaining[component], list):
|
elif isinstance(_remaining[component], list):
|
||||||
_remaining[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _remaining[component])
|
_remaining[component] = map(
|
||||||
_remaining[component] = reduce(lambda x, y: x + y, _remaining[component], 0)
|
lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
|
||||||
|
_remaining[component],
|
||||||
|
)
|
||||||
|
_remaining[component] = reduce(
|
||||||
|
lambda x, y: x + y, _remaining[component], 0
|
||||||
|
)
|
||||||
|
|
||||||
_remaining.subtract(_vms_specs)
|
_remaining.subtract(_vms_specs)
|
||||||
|
|
||||||
|
@ -59,11 +77,15 @@ def get_suitable_host(vm_specs, hosts=None):
|
||||||
running_vms_specs = [vm.specs for vm in vms]
|
running_vms_specs = [vm.specs for vm in vms]
|
||||||
|
|
||||||
# Accumulate all of their combined specs
|
# Accumulate all of their combined specs
|
||||||
running_vms_accumulated_specs = accumulated_specs(running_vms_specs)
|
running_vms_accumulated_specs = accumulated_specs(
|
||||||
|
running_vms_specs
|
||||||
|
)
|
||||||
|
|
||||||
# Find out remaining resources after
|
# Find out remaining resources after
|
||||||
# host_specs - already running vm_specs
|
# host_specs - already running vm_specs
|
||||||
remaining = remaining_resources(host.specs, running_vms_accumulated_specs)
|
remaining = remaining_resources(
|
||||||
|
host.specs, running_vms_accumulated_specs
|
||||||
|
)
|
||||||
|
|
||||||
# Find out remaining - new_vm_specs
|
# Find out remaining - new_vm_specs
|
||||||
remaining = remaining_resources(remaining, vm_specs)
|
remaining = remaining_resources(remaining, vm_specs)
|
||||||
|
@ -95,7 +117,7 @@ def dead_host_mitigation(dead_hosts_keys):
|
||||||
|
|
||||||
vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
|
vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
|
||||||
for vm in vms_hosted_on_dead_host:
|
for vm in vms_hosted_on_dead_host:
|
||||||
vm.status = 'UNKNOWN'
|
vm.status = "UNKNOWN"
|
||||||
shared.vm_pool.put(vm)
|
shared.vm_pool.put(vm)
|
||||||
shared.host_pool.put(host)
|
shared.host_pool.put(host)
|
||||||
|
|
||||||
|
@ -104,10 +126,12 @@ def assign_host(vm):
|
||||||
vm.hostname = get_suitable_host(vm.specs)
|
vm.hostname = get_suitable_host(vm.specs)
|
||||||
shared.vm_pool.put(vm)
|
shared.vm_pool.put(vm)
|
||||||
|
|
||||||
r = RequestEntry.from_scratch(type=RequestType.StartVM,
|
r = RequestEntry.from_scratch(
|
||||||
uuid=vm.uuid,
|
type=RequestType.StartVM,
|
||||||
hostname=vm.hostname,
|
uuid=vm.uuid,
|
||||||
request_prefix=settings['etcd']['request_prefix'])
|
hostname=vm.hostname,
|
||||||
|
request_prefix=settings["etcd"]["request_prefix"],
|
||||||
|
)
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
|
|
||||||
vm.log.append("VM scheduled for starting")
|
vm.log.append("VM scheduled for starting")
|
||||||
|
|
|
@ -7,8 +7,13 @@
|
||||||
from ucloud.common.request import RequestEntry, RequestType
|
from ucloud.common.request import RequestEntry, RequestType
|
||||||
from ucloud.shared import shared
|
from ucloud.shared import shared
|
||||||
from ucloud.settings import settings
|
from ucloud.settings import settings
|
||||||
from .helper import (get_suitable_host, dead_host_mitigation, dead_host_detection,
|
from .helper import (
|
||||||
assign_host, NoSuitableHostFound)
|
get_suitable_host,
|
||||||
|
dead_host_mitigation,
|
||||||
|
dead_host_detection,
|
||||||
|
assign_host,
|
||||||
|
NoSuitableHostFound,
|
||||||
|
)
|
||||||
from . import logger
|
from . import logger
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,8 +21,14 @@ def main():
|
||||||
pending_vms = []
|
pending_vms = []
|
||||||
|
|
||||||
for request_iterator in [
|
for request_iterator in [
|
||||||
shared.etcd_client.get_prefix(settings['etcd']['request_prefix'], value_in_json=True),
|
shared.etcd_client.get_prefix(
|
||||||
shared.etcd_client.watch_prefix(settings['etcd']['request_prefix'], timeout=5, value_in_json=True),
|
settings["etcd"]["request_prefix"], value_in_json=True
|
||||||
|
),
|
||||||
|
shared.etcd_client.watch_prefix(
|
||||||
|
settings["etcd"]["request_prefix"],
|
||||||
|
timeout=5,
|
||||||
|
value_in_json=True,
|
||||||
|
),
|
||||||
]:
|
]:
|
||||||
for request_event in request_iterator:
|
for request_event in request_iterator:
|
||||||
request_entry = RequestEntry(request_event)
|
request_entry = RequestEntry(request_event)
|
||||||
|
@ -41,25 +52,39 @@ def main():
|
||||||
# on our behalf.
|
# on our behalf.
|
||||||
while pending_vms:
|
while pending_vms:
|
||||||
pending_vm_entry = pending_vms.pop()
|
pending_vm_entry = pending_vms.pop()
|
||||||
r = RequestEntry.from_scratch(type="ScheduleVM",
|
r = RequestEntry.from_scratch(
|
||||||
uuid=pending_vm_entry.uuid,
|
type="ScheduleVM",
|
||||||
hostname=pending_vm_entry.hostname,
|
uuid=pending_vm_entry.uuid,
|
||||||
request_prefix=settings['etcd']['request_prefix'])
|
hostname=pending_vm_entry.hostname,
|
||||||
|
request_prefix=settings["etcd"][
|
||||||
|
"request_prefix"
|
||||||
|
],
|
||||||
|
)
|
||||||
shared.request_pool.put(r)
|
shared.request_pool.put(r)
|
||||||
|
|
||||||
elif request_entry.type == RequestType.ScheduleVM:
|
elif request_entry.type == RequestType.ScheduleVM:
|
||||||
logger.debug("%s, %s", request_entry.key, request_entry.value)
|
logger.debug(
|
||||||
|
"%s, %s", request_entry.key, request_entry.value
|
||||||
|
)
|
||||||
|
|
||||||
vm_entry = shared.vm_pool.get(request_entry.uuid)
|
vm_entry = shared.vm_pool.get(request_entry.uuid)
|
||||||
if vm_entry is None:
|
if vm_entry is None:
|
||||||
logger.info("Trying to act on {} but it is deleted".format(request_entry.uuid))
|
logger.info(
|
||||||
|
"Trying to act on {} but it is deleted".format(
|
||||||
|
request_entry.uuid
|
||||||
|
)
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
shared.etcd_client.client.delete(request_entry.key) # consume Request
|
shared.etcd_client.client.delete(
|
||||||
|
request_entry.key
|
||||||
|
) # consume Request
|
||||||
|
|
||||||
try:
|
try:
|
||||||
assign_host(vm_entry)
|
assign_host(vm_entry)
|
||||||
except NoSuitableHostFound:
|
except NoSuitableHostFound:
|
||||||
vm_entry.add_log("Can't schedule VM. No Resource Left.")
|
vm_entry.add_log(
|
||||||
|
"Can't schedule VM. No Resource Left."
|
||||||
|
)
|
||||||
shared.vm_pool.put(vm_entry)
|
shared.vm_pool.put(vm_entry)
|
||||||
|
|
||||||
pending_vms.append(vm_entry)
|
pending_vms.append(vm_entry)
|
||||||
|
|
|
@ -70,9 +70,15 @@ class TestFunctions(unittest.TestCase):
|
||||||
"last_heartbeat": datetime.utcnow().isoformat(),
|
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||||
}
|
}
|
||||||
with self.client.client.lock("lock"):
|
with self.client.client.lock("lock"):
|
||||||
self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
|
self.client.put(
|
||||||
self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
|
f"{self.host_prefix}/1", host1, value_in_json=True
|
||||||
self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
|
)
|
||||||
|
self.client.put(
|
||||||
|
f"{self.host_prefix}/2", host2, value_in_json=True
|
||||||
|
)
|
||||||
|
self.client.put(
|
||||||
|
f"{self.host_prefix}/3", host3, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
def create_vms(self):
|
def create_vms(self):
|
||||||
vm1 = json.dumps(
|
vm1 = json.dumps(
|
||||||
|
@ -146,15 +152,17 @@ class TestFunctions(unittest.TestCase):
|
||||||
{"cpu": 8, "ram": 32},
|
{"cpu": 8, "ram": 32},
|
||||||
]
|
]
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
accumulated_specs(vms), {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
accumulated_specs(vms),
|
||||||
|
{"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_remaining_resources(self):
|
def test_remaining_resources(self):
|
||||||
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
||||||
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
|
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
|
||||||
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
|
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
|
||||||
self.assertEqual(remaining_resources(host_specs, vms_specs),
|
self.assertEqual(
|
||||||
resultant_specs)
|
remaining_resources(host_specs, vms_specs), resultant_specs
|
||||||
|
)
|
||||||
|
|
||||||
def test_vmpool(self):
|
def test_vmpool(self):
|
||||||
self.p.join(1)
|
self.p.join(1)
|
||||||
|
@ -167,7 +175,12 @@ class TestFunctions(unittest.TestCase):
|
||||||
f"{self.vm_prefix}/1",
|
f"{self.vm_prefix}/1",
|
||||||
{
|
{
|
||||||
"owner": "meow",
|
"owner": "meow",
|
||||||
"specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
|
"specs": {
|
||||||
|
"cpu": 4,
|
||||||
|
"ram": 8,
|
||||||
|
"hdd": 100,
|
||||||
|
"sdd": 256,
|
||||||
|
},
|
||||||
"hostname": f"{self.host_prefix}/3",
|
"hostname": f"{self.host_prefix}/3",
|
||||||
"status": "SCHEDULED_DEPLOY",
|
"status": "SCHEDULED_DEPLOY",
|
||||||
},
|
},
|
||||||
|
@ -182,7 +195,12 @@ class TestFunctions(unittest.TestCase):
|
||||||
f"{self.vm_prefix}/7",
|
f"{self.vm_prefix}/7",
|
||||||
{
|
{
|
||||||
"owner": "meow",
|
"owner": "meow",
|
||||||
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
"specs": {
|
||||||
|
"cpu": 10,
|
||||||
|
"ram": 22,
|
||||||
|
"hdd": 146,
|
||||||
|
"sdd": 0,
|
||||||
|
},
|
||||||
"hostname": "",
|
"hostname": "",
|
||||||
"status": "REQUESTED_NEW",
|
"status": "REQUESTED_NEW",
|
||||||
},
|
},
|
||||||
|
@ -197,7 +215,12 @@ class TestFunctions(unittest.TestCase):
|
||||||
f"{self.vm_prefix}/7",
|
f"{self.vm_prefix}/7",
|
||||||
{
|
{
|
||||||
"owner": "meow",
|
"owner": "meow",
|
||||||
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
"specs": {
|
||||||
|
"cpu": 10,
|
||||||
|
"ram": 22,
|
||||||
|
"hdd": 146,
|
||||||
|
"sdd": 0,
|
||||||
|
},
|
||||||
"hostname": "",
|
"hostname": "",
|
||||||
"status": "REQUESTED_NEW",
|
"status": "REQUESTED_NEW",
|
||||||
},
|
},
|
||||||
|
|
|
@ -6,11 +6,7 @@ from os.path import dirname
|
||||||
BASE_DIR = dirname(dirname(__file__))
|
BASE_DIR = dirname(dirname(__file__))
|
||||||
sys.path.insert(0, BASE_DIR)
|
sys.path.insert(0, BASE_DIR)
|
||||||
|
|
||||||
from main import (
|
from main import dead_host_detection, dead_host_mitigation, config
|
||||||
dead_host_detection,
|
|
||||||
dead_host_mitigation,
|
|
||||||
config
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDeadHostMechanism(unittest.TestCase):
|
class TestDeadHostMechanism(unittest.TestCase):
|
||||||
|
@ -52,13 +48,23 @@ class TestDeadHostMechanism(unittest.TestCase):
|
||||||
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
||||||
}
|
}
|
||||||
with self.client.client.lock("lock"):
|
with self.client.client.lock("lock"):
|
||||||
self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
|
self.client.put(
|
||||||
self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
|
f"{self.host_prefix}/1", host1, value_in_json=True
|
||||||
self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
|
)
|
||||||
self.client.put(f"{self.host_prefix}/4", host4, value_in_json=True)
|
self.client.put(
|
||||||
|
f"{self.host_prefix}/2", host2, value_in_json=True
|
||||||
|
)
|
||||||
|
self.client.put(
|
||||||
|
f"{self.host_prefix}/3", host3, value_in_json=True
|
||||||
|
)
|
||||||
|
self.client.put(
|
||||||
|
f"{self.host_prefix}/4", host4, value_in_json=True
|
||||||
|
)
|
||||||
|
|
||||||
def test_dead_host_detection(self):
|
def test_dead_host_detection(self):
|
||||||
hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
|
hosts = self.client.get_prefix(
|
||||||
|
self.host_prefix, value_in_json=True
|
||||||
|
)
|
||||||
deads = dead_host_detection(hosts)
|
deads = dead_host_detection(hosts)
|
||||||
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
|
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
|
||||||
return deads
|
return deads
|
||||||
|
@ -66,7 +72,9 @@ class TestDeadHostMechanism(unittest.TestCase):
|
||||||
def test_dead_host_mitigation(self):
|
def test_dead_host_mitigation(self):
|
||||||
deads = self.test_dead_host_detection()
|
deads = self.test_dead_host_detection()
|
||||||
dead_host_mitigation(self.client, deads)
|
dead_host_mitigation(self.client, deads)
|
||||||
hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
|
hosts = self.client.get_prefix(
|
||||||
|
self.host_prefix, value_in_json=True
|
||||||
|
)
|
||||||
deads = dead_host_detection(hosts)
|
deads = dead_host_detection(hosts)
|
||||||
self.assertEqual(deads, [])
|
self.assertEqual(deads, [])
|
||||||
|
|
||||||
|
|
|
@ -14,18 +14,22 @@ class CustomConfigParser(configparser.RawConfigParser):
|
||||||
result = super().__getitem__(key)
|
result = super().__getitem__(key)
|
||||||
except KeyError as err:
|
except KeyError as err:
|
||||||
raise KeyError(
|
raise KeyError(
|
||||||
'Key \'{}\' not found in configuration. Make sure you configure ucloud.'.format(key)
|
"Key '{}' not found in configuration. Make sure you configure ucloud.".format(
|
||||||
|
key
|
||||||
|
)
|
||||||
) from err
|
) from err
|
||||||
else:
|
else:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Settings(object):
|
class Settings(object):
|
||||||
def __init__(self, config_key='/uncloud/config/'):
|
def __init__(self, config_key="/uncloud/config/"):
|
||||||
conf_name = 'ucloud.conf'
|
conf_name = "ucloud.conf"
|
||||||
conf_dir = os.environ.get('UCLOUD_CONF_DIR', os.path.expanduser('~/ucloud/'))
|
conf_dir = os.environ.get(
|
||||||
|
"UCLOUD_CONF_DIR", os.path.expanduser("~/ucloud/")
|
||||||
|
)
|
||||||
self.config_file = os.path.join(conf_dir, conf_name)
|
self.config_file = os.path.join(conf_dir, conf_name)
|
||||||
|
|
||||||
self.config_parser = CustomConfigParser(allow_no_value=True)
|
self.config_parser = CustomConfigParser(allow_no_value=True)
|
||||||
self.config_key = config_key
|
self.config_key = config_key
|
||||||
|
|
||||||
|
@ -33,43 +37,55 @@ class Settings(object):
|
||||||
try:
|
try:
|
||||||
self.config_parser.read(self.config_file)
|
self.config_parser.read(self.config_file)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.error('%s', err)
|
logger.error("%s", err)
|
||||||
|
|
||||||
def get_etcd_client(self):
|
def get_etcd_client(self):
|
||||||
args = tuple()
|
args = tuple()
|
||||||
try:
|
try:
|
||||||
kwargs = {
|
kwargs = {
|
||||||
'host': self.config_parser.get('etcd', 'url'),
|
"host": self.config_parser.get("etcd", "url"),
|
||||||
'port': self.config_parser.get('etcd', 'port'),
|
"port": self.config_parser.get("etcd", "port"),
|
||||||
'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
|
"ca_cert": self.config_parser.get("etcd", "ca_cert"),
|
||||||
'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
|
"cert_cert": self.config_parser.get(
|
||||||
'cert_key': self.config_parser.get('etcd', 'cert_key')
|
"etcd", "cert_cert"
|
||||||
|
),
|
||||||
|
"cert_key": self.config_parser.get("etcd", "cert_key"),
|
||||||
}
|
}
|
||||||
except configparser.Error as err:
|
except configparser.Error as err:
|
||||||
raise configparser.Error('{} in config file {}'.format(err.message, self.config_file)) from err
|
raise configparser.Error(
|
||||||
|
"{} in config file {}".format(
|
||||||
|
err.message, self.config_file
|
||||||
|
)
|
||||||
|
) from err
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
wrapper = Etcd3Wrapper(*args, **kwargs)
|
wrapper = Etcd3Wrapper(*args, **kwargs)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.error('etcd connection not successfull. Please check your config file.'
|
logger.error(
|
||||||
'\nDetails: %s\netcd connection parameters: %s', err, kwargs)
|
"etcd connection not successfull. Please check your config file."
|
||||||
|
"\nDetails: %s\netcd connection parameters: %s",
|
||||||
|
err,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
def read_internal_values(self):
|
def read_internal_values(self):
|
||||||
self.config_parser.read_dict({
|
self.config_parser.read_dict(
|
||||||
'etcd': {
|
{
|
||||||
'file_prefix': '/files/',
|
"etcd": {
|
||||||
'host_prefix': '/hosts/',
|
"file_prefix": "/files/",
|
||||||
'image_prefix': '/images/',
|
"host_prefix": "/hosts/",
|
||||||
'image_store_prefix': '/imagestore/',
|
"image_prefix": "/images/",
|
||||||
'network_prefix': '/networks/',
|
"image_store_prefix": "/imagestore/",
|
||||||
'request_prefix': '/requests/',
|
"network_prefix": "/networks/",
|
||||||
'user_prefix': '/users/',
|
"request_prefix": "/requests/",
|
||||||
'vm_prefix': '/vms/',
|
"user_prefix": "/users/",
|
||||||
|
"vm_prefix": "/vms/",
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
)
|
||||||
|
|
||||||
def read_config_file_values(self, config_file):
|
def read_config_file_values(self, config_file):
|
||||||
try:
|
try:
|
||||||
|
@ -77,18 +93,26 @@ class Settings(object):
|
||||||
with open(config_file, "r") as config_file_handle:
|
with open(config_file, "r") as config_file_handle:
|
||||||
self.config_parser.read_file(config_file_handle)
|
self.config_parser.read_file(config_file_handle)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
sys.exit('Configuration file {} not found!'.format(config_file))
|
sys.exit(
|
||||||
|
"Configuration file {} not found!".format(config_file)
|
||||||
|
)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.exception(err)
|
logger.exception(err)
|
||||||
sys.exit("Error occurred while reading configuration file")
|
sys.exit("Error occurred while reading configuration file")
|
||||||
|
|
||||||
def read_values_from_etcd(self):
|
def read_values_from_etcd(self):
|
||||||
etcd_client = self.get_etcd_client()
|
etcd_client = self.get_etcd_client()
|
||||||
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
|
config_from_etcd = etcd_client.get(
|
||||||
|
self.config_key, value_in_json=True
|
||||||
|
)
|
||||||
if config_from_etcd:
|
if config_from_etcd:
|
||||||
self.config_parser.read_dict(config_from_etcd.value)
|
self.config_parser.read_dict(config_from_etcd.value)
|
||||||
else:
|
else:
|
||||||
raise KeyError("Key '{}' not found in etcd. Please configure ucloud.".format(self.config_key))
|
raise KeyError(
|
||||||
|
"Key '{}' not found in etcd. Please configure ucloud.".format(
|
||||||
|
self.config_key
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
self.read_values_from_etcd()
|
self.read_values_from_etcd()
|
||||||
|
|
|
@ -12,15 +12,19 @@ class Shared:
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def host_pool(self):
|
def host_pool(self):
|
||||||
return HostPool(self.etcd_client, settings['etcd']['host_prefix'])
|
return HostPool(
|
||||||
|
self.etcd_client, settings["etcd"]["host_prefix"]
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def vm_pool(self):
|
def vm_pool(self):
|
||||||
return VmPool(self.etcd_client, settings['etcd']['vm_prefix'])
|
return VmPool(self.etcd_client, settings["etcd"]["vm_prefix"])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def request_pool(self):
|
def request_pool(self):
|
||||||
return RequestPool(self.etcd_client, settings['etcd']['request_prefix'])
|
return RequestPool(
|
||||||
|
self.etcd_client, settings["etcd"]["request_prefix"]
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def storage_handler(self):
|
def storage_handler(self):
|
||||||
|
|
|
@ -37,7 +37,9 @@ class VMQMPHandles:
|
||||||
self.sock.close()
|
self.sock.close()
|
||||||
|
|
||||||
if exc_type:
|
if exc_type:
|
||||||
logger.error('Couldn\'t get handle for VM.', exc_type, exc_val, exc_tb)
|
logger.error(
|
||||||
|
"Couldn't get handle for VM.", exc_type, exc_val, exc_tb
|
||||||
|
)
|
||||||
raise exc_type("Couldn't get handle for VM.") from exc_type
|
raise exc_type("Couldn't get handle for VM.") from exc_type
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,29 +56,46 @@ class TransferVM(Process):
|
||||||
with suppress(FileNotFoundError):
|
with suppress(FileNotFoundError):
|
||||||
os.remove(self.src_sock_path)
|
os.remove(self.src_sock_path)
|
||||||
|
|
||||||
command = ['ssh', '-nNT', '-L', '{}:{}'.format(self.src_sock_path, self.dest_sock_path),
|
command = [
|
||||||
'root@{}'.format(self.host)]
|
"ssh",
|
||||||
|
"-nNT",
|
||||||
|
"-L",
|
||||||
|
"{}:{}".format(self.src_sock_path, self.dest_sock_path),
|
||||||
|
"root@{}".format(self.host),
|
||||||
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
p = sp.Popen(command)
|
p = sp.Popen(command)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error('Couldn\' forward unix socks over ssh.', exc_info=e)
|
logger.error(
|
||||||
|
"Couldn' forward unix socks over ssh.", exc_info=e
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
vmm = VMM()
|
vmm = VMM()
|
||||||
logger.debug('Executing: ssh forwarding command: %s', command)
|
logger.debug(
|
||||||
vmm.execute_command(self.src_uuid, command='migrate',
|
"Executing: ssh forwarding command: %s", command
|
||||||
arguments={'uri': 'unix:{}'.format(self.src_sock_path)})
|
)
|
||||||
|
vmm.execute_command(
|
||||||
|
self.src_uuid,
|
||||||
|
command="migrate",
|
||||||
|
arguments={"uri": "unix:{}".format(self.src_sock_path)},
|
||||||
|
)
|
||||||
|
|
||||||
while p.poll() is None:
|
while p.poll() is None:
|
||||||
success, output = vmm.execute_command(self.src_uuid, command='query-migrate')
|
success, output = vmm.execute_command(
|
||||||
|
self.src_uuid, command="query-migrate"
|
||||||
|
)
|
||||||
if success:
|
if success:
|
||||||
status = output['return']['status']
|
status = output["return"]["status"]
|
||||||
if status != 'active':
|
|
||||||
print('Migration Status: ', status)
|
if status != "active":
|
||||||
|
print("Migration Status: ", status)
|
||||||
|
if status == "completed":
|
||||||
|
vmm.stop(self.src_uuid)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print('Migration Status: ', status)
|
print("Migration Status: ", status)
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
|
@ -84,18 +103,29 @@ class TransferVM(Process):
|
||||||
|
|
||||||
class VMM:
|
class VMM:
|
||||||
# Virtual Machine Manager
|
# Virtual Machine Manager
|
||||||
def __init__(self, qemu_path='/usr/bin/qemu-system-x86_64',
|
def __init__(
|
||||||
vmm_backend=os.path.expanduser('~/ucloud/vmm/')):
|
self,
|
||||||
|
qemu_path="/usr/bin/qemu-system-x86_64",
|
||||||
|
vmm_backend=os.path.expanduser("~/ucloud/vmm/"),
|
||||||
|
):
|
||||||
self.qemu_path = qemu_path
|
self.qemu_path = qemu_path
|
||||||
self.vmm_backend = vmm_backend
|
self.vmm_backend = vmm_backend
|
||||||
self.socket_dir = os.path.join(self.vmm_backend, 'sock')
|
self.socket_dir = os.path.join(self.vmm_backend, "sock")
|
||||||
|
|
||||||
if not os.path.isdir(self.vmm_backend):
|
if not os.path.isdir(self.vmm_backend):
|
||||||
logger.info('{} does not exists. Creating it...'.format(self.vmm_backend))
|
logger.info(
|
||||||
|
"{} does not exists. Creating it...".format(
|
||||||
|
self.vmm_backend
|
||||||
|
)
|
||||||
|
)
|
||||||
os.makedirs(self.vmm_backend, exist_ok=True)
|
os.makedirs(self.vmm_backend, exist_ok=True)
|
||||||
|
|
||||||
if not os.path.isdir(self.socket_dir):
|
if not os.path.isdir(self.socket_dir):
|
||||||
logger.info('{} does not exists. Creating it...'.format(self.socket_dir))
|
logger.info(
|
||||||
|
"{} does not exists. Creating it...".format(
|
||||||
|
self.socket_dir
|
||||||
|
)
|
||||||
|
)
|
||||||
os.makedirs(self.socket_dir, exist_ok=True)
|
os.makedirs(self.socket_dir, exist_ok=True)
|
||||||
|
|
||||||
def is_running(self, uuid):
|
def is_running(self, uuid):
|
||||||
|
@ -106,8 +136,12 @@ class VMM:
|
||||||
recv = sock.recv(4096)
|
recv = sock.recv(4096)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
# unix sock doesn't exists or it is closed
|
# unix sock doesn't exists or it is closed
|
||||||
logger.debug('VM {} sock either don\' exists or it is closed. It mean VM is stopped.'.format(uuid),
|
logger.debug(
|
||||||
exc_info=err)
|
"VM {} sock either don' exists or it is closed. It mean VM is stopped.".format(
|
||||||
|
uuid
|
||||||
|
),
|
||||||
|
exc_info=err,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# if we receive greetings from qmp it mean VM is running
|
# if we receive greetings from qmp it mean VM is running
|
||||||
if len(recv) > 0:
|
if len(recv) > 0:
|
||||||
|
@ -122,36 +156,67 @@ class VMM:
|
||||||
# start --> sucess?
|
# start --> sucess?
|
||||||
migration_args = ()
|
migration_args = ()
|
||||||
if migration:
|
if migration:
|
||||||
migration_args = ('-incoming', 'unix:{}'.format(os.path.join(self.socket_dir, uuid)))
|
migration_args = (
|
||||||
|
"-incoming",
|
||||||
|
"unix:{}".format(os.path.join(self.socket_dir, uuid)),
|
||||||
|
)
|
||||||
|
|
||||||
if self.is_running(uuid):
|
if self.is_running(uuid):
|
||||||
logger.warning('Cannot start VM. It is already running.')
|
logger.warning("Cannot start VM. It is already running.")
|
||||||
else:
|
else:
|
||||||
qmp_arg = ('-qmp', 'unix:{},server,nowait'.format(join_path(self.vmm_backend, uuid)))
|
qmp_arg = (
|
||||||
vnc_arg = ('-vnc', 'unix:{}'.format(tempfile.NamedTemporaryFile().name))
|
"-qmp",
|
||||||
|
"unix:{},server,nowait".format(
|
||||||
|
join_path(self.vmm_backend, uuid)
|
||||||
|
),
|
||||||
|
)
|
||||||
|
vnc_arg = (
|
||||||
|
"-vnc",
|
||||||
|
"unix:{}".format(tempfile.NamedTemporaryFile().name),
|
||||||
|
)
|
||||||
|
|
||||||
command = ['sudo', '-p', 'Enter password to start VM {}: '.format(uuid),
|
command = [
|
||||||
self.qemu_path, *args, *qmp_arg, *migration_args, *vnc_arg, '-daemonize']
|
"sudo",
|
||||||
|
"-p",
|
||||||
|
"Enter password to start VM {}: ".format(uuid),
|
||||||
|
self.qemu_path,
|
||||||
|
*args,
|
||||||
|
*qmp_arg,
|
||||||
|
*migration_args,
|
||||||
|
*vnc_arg,
|
||||||
|
"-daemonize",
|
||||||
|
]
|
||||||
try:
|
try:
|
||||||
sp.check_output(command, stderr=sp.PIPE)
|
sp.check_output(command, stderr=sp.PIPE)
|
||||||
except sp.CalledProcessError as err:
|
except sp.CalledProcessError as err:
|
||||||
logger.exception('Error occurred while starting VM.\nDetail %s', err.stderr.decode('utf-8'))
|
logger.exception(
|
||||||
|
"Error occurred while starting VM.\nDetail %s",
|
||||||
|
err.stderr.decode("utf-8"),
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
with suppress(sp.CalledProcessError):
|
with suppress(sp.CalledProcessError):
|
||||||
sp.check_output([
|
sp.check_output(
|
||||||
'sudo', '-p',
|
[
|
||||||
'Enter password to correct permission for uncloud-vmm\'s directory',
|
"sudo",
|
||||||
'chmod', '-R', 'o=rwx,g=rwx', self.vmm_backend
|
"-p",
|
||||||
])
|
"Enter password to correct permission for uncloud-vmm's directory",
|
||||||
|
"chmod",
|
||||||
|
"-R",
|
||||||
|
"o=rwx,g=rwx",
|
||||||
|
self.vmm_backend,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
# TODO: Find some good way to check whether the virtual machine is up and
|
# TODO: Find some good way to check whether the virtual machine is up and
|
||||||
# running without relying on non-guarenteed ways.
|
# running without relying on non-guarenteed ways.
|
||||||
for _ in range(10):
|
for _ in range(10):
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
status = self.get_status(uuid)
|
status = self.get_status(uuid)
|
||||||
if status in ['running', 'inmigrate']:
|
if status in ["running", "inmigrate"]:
|
||||||
return status
|
return status
|
||||||
logger.warning('Timeout on VM\'s status. Shutting down VM %s', uuid)
|
logger.warning(
|
||||||
|
"Timeout on VM's status. Shutting down VM %s", uuid
|
||||||
|
)
|
||||||
self.stop(uuid)
|
self.stop(uuid)
|
||||||
# TODO: What should we do more. VM can still continue to run in background.
|
# TODO: What should we do more. VM can still continue to run in background.
|
||||||
# If we have pid of vm we can kill it using OS.
|
# If we have pid of vm we can kill it using OS.
|
||||||
|
@ -159,55 +224,73 @@ class VMM:
|
||||||
def execute_command(self, uuid, command, **kwargs):
|
def execute_command(self, uuid, command, **kwargs):
|
||||||
# execute_command -> sucess?, output
|
# execute_command -> sucess?, output
|
||||||
try:
|
try:
|
||||||
with VMQMPHandles(os.path.join(self.vmm_backend, uuid)) as (sock_handle, file_handle):
|
with VMQMPHandles(os.path.join(self.vmm_backend, uuid)) as (
|
||||||
command_to_execute = {
|
sock_handle,
|
||||||
'execute': command,
|
file_handle,
|
||||||
**kwargs
|
):
|
||||||
}
|
command_to_execute = {"execute": command, **kwargs}
|
||||||
sock_handle.sendall(json.dumps(command_to_execute).encode('utf-8'))
|
sock_handle.sendall(
|
||||||
|
json.dumps(command_to_execute).encode("utf-8")
|
||||||
|
)
|
||||||
output = file_handle.readline()
|
output = file_handle.readline()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('Error occurred while executing command and getting valid output from qmp')
|
logger.exception(
|
||||||
|
"Error occurred while executing command and getting valid output from qmp"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
output = json.loads(output)
|
output = json.loads(output)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception('QMP Output isn\'t valid JSON. %s', output)
|
logger.exception(
|
||||||
|
"QMP Output isn't valid JSON. %s", output
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return 'return' in output, output
|
return "return" in output, output
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
def stop(self, uuid):
|
def stop(self, uuid):
|
||||||
success, output = self.execute_command(command='quit', uuid=uuid)
|
success, output = self.execute_command(
|
||||||
|
command="quit", uuid=uuid
|
||||||
|
)
|
||||||
return success
|
return success
|
||||||
|
|
||||||
def get_status(self, uuid):
|
def get_status(self, uuid):
|
||||||
success, output = self.execute_command(command='query-status', uuid=uuid)
|
success, output = self.execute_command(
|
||||||
|
command="query-status", uuid=uuid
|
||||||
|
)
|
||||||
if success:
|
if success:
|
||||||
return output['return']['status']
|
return output["return"]["status"]
|
||||||
else:
|
else:
|
||||||
# TODO: Think about this for a little more
|
# TODO: Think about this for a little more
|
||||||
return 'STOPPED'
|
return "STOPPED"
|
||||||
|
|
||||||
def discover(self):
|
def discover(self):
|
||||||
vms = [
|
vms = [
|
||||||
uuid for uuid in os.listdir(self.vmm_backend)
|
uuid
|
||||||
|
for uuid in os.listdir(self.vmm_backend)
|
||||||
if not isdir(join_path(self.vmm_backend, uuid))
|
if not isdir(join_path(self.vmm_backend, uuid))
|
||||||
]
|
]
|
||||||
return vms
|
return vms
|
||||||
|
|
||||||
def get_vnc(self, uuid):
|
def get_vnc(self, uuid):
|
||||||
success, output = self.execute_command(uuid, command='query-vnc')
|
success, output = self.execute_command(
|
||||||
|
uuid, command="query-vnc"
|
||||||
|
)
|
||||||
if success:
|
if success:
|
||||||
return output['return']['service']
|
return output["return"]["service"]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def transfer(self, src_uuid, destination_sock_path, host):
|
def transfer(self, src_uuid, destination_sock_path, host):
|
||||||
p = TransferVM(src_uuid, destination_sock_path, socket_dir=self.socket_dir, host=host)
|
p = TransferVM(
|
||||||
|
src_uuid,
|
||||||
|
destination_sock_path,
|
||||||
|
socket_dir=self.socket_dir,
|
||||||
|
host=host,
|
||||||
|
)
|
||||||
p.start()
|
p.start()
|
||||||
|
|
||||||
# TODO: the following method should clean things that went wrong
|
# TODO: the following method should clean things that went wrong
|
||||||
# e.g If VM migration fails or didn't start for long time
|
# e.g If VM migration fails or didn't start for long time
|
||||||
# i.e 15 minutes we should stop the waiting VM.
|
# i.e 15 minutes we should stop the waiting VM.
|
||||||
def maintenace(self):
|
def maintenace(self):
|
||||||
pass
|
pass
|
||||||
|
|
Loading…
Reference in a new issue