Remove ucloud_common and put its files under ucloud.common subpackage.

Remove individual config.py used by every component and put them into single config.py ucloud/config.py
Use /etc/ucloud/ucloud.conf for Environment Variables
Refactoring and a lot of it
Make ucloud repo a package and different components of ucloud a subpackage for avoiding code duplication.
Improved logging.
This commit is contained in:
ahmadbilalkhalid 2019-11-18 22:39:57 +05:00
commit 6fa77bce4d
51 changed files with 890 additions and 567 deletions

View file

@ -1,35 +1,19 @@
import json
import subprocess
import os
import pynetbox
import decouple
import schemas
import subprocess
from uuid import uuid4
import pynetbox
from flask import Flask, request
from flask_restful import Resource, Api
from ucloud_common.vm import VMStatus
from ucloud_common.request import RequestEntry, RequestType
from helper import (generate_mac, get_ip_addr, get_etcd_counter,
increment_etcd_counter, mac2ipv6)
from config import (
etcd_client,
WITHOUT_CEPH,
VM_PREFIX,
HOST_PREFIX,
FILE_PREFIX,
IMAGE_PREFIX,
NETWORK_PREFIX,
logging,
REQUEST_POOL,
VM_POOL,
HOST_POOL,
)
from common import counters
from common.request import RequestEntry, RequestType
from common.vm import VMStatus
from config import (etcd_client, request_pool, vm_pool, host_pool, env_vars)
from . import schemas
from .helper import generate_mac, mac2ipv6
from api import logger
app = Flask(__name__)
api = Api(app)
@ -42,12 +26,12 @@ class CreateVM(Resource):
validator = schemas.CreateVMSchema(data)
if validator.is_valid():
vm_uuid = uuid4().hex
vm_key = os.path.join(VM_PREFIX, vm_uuid)
vm_key = os.path.join(env_vars.get("VM_PREFIX"), vm_uuid)
specs = {
'cpu': validator.specs['cpu'],
'ram': validator.specs['ram'],
'os-ssd': validator.specs['os-ssd'],
'hdd': validator.specs['hdd']
"cpu": validator.specs["cpu"],
"ram": validator.specs["ram"],
"os-ssd": validator.specs["os-ssd"],
"hdd": validator.specs["hdd"],
}
macs = [generate_mac() for i in range(len(data["network"]))]
vm_entry = {
@ -61,15 +45,16 @@ class CreateVM(Resource):
"log": [],
"vnc_socket": "",
"network": list(zip(data["network"], macs)),
"metadata": {
"ssh-keys": []
},
"metadata": {"ssh-keys": []},
}
etcd_client.put(vm_key, vm_entry, value_in_json=True)
# Create ScheduleVM Request
r = RequestEntry.from_scratch(type=RequestType.ScheduleVM, uuid=vm_uuid)
REQUEST_POOL.put(r)
r = RequestEntry.from_scratch(
type=RequestType.ScheduleVM, uuid=vm_uuid,
request_prefix=env_vars.get("REQUEST_PREFIX")
)
request_pool.put(r)
return {"message": "VM Creation Queued"}, 200
return validator.get_errors(), 400
@ -81,13 +66,21 @@ class VmStatus(Resource):
data = request.json
validator = schemas.VMStatusSchema(data)
if validator.is_valid():
vm = VM_POOL.get(os.path.join(VM_PREFIX, data["uuid"]))
vm = vm_pool.get(
os.path.join(env_vars.get("VM_PREFIX"), data["uuid"])
)
vm_value = vm.value.copy()
vm_value["ip"] = []
for network_and_mac in vm.network:
network_name, mac = network_and_mac
network = etcd_client.get(os.path.join(NETWORK_PREFIX, data["name"], network_name),
value_in_json=True)
network = etcd_client.get(
os.path.join(
env_vars.get("NETWORK_PREFIX"),
data["name"],
network_name,
),
value_in_json=True,
)
ipv6_addr = network.value.get("ipv6").split("::")[0] + "::"
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
vm.value = vm_value
@ -102,7 +95,9 @@ class CreateImage(Resource):
data = request.json
validator = schemas.CreateImageSchema(data)
if validator.is_valid():
file_entry = etcd_client.get(os.path.join(FILE_PREFIX, data["uuid"]))
file_entry = etcd_client.get(
os.path.join(env_vars.get("FILE_PREFIX"), data["uuid"])
)
file_entry_value = json.loads(file_entry.value)
image_entry_json = {
@ -114,7 +109,8 @@ class CreateImage(Resource):
"visibility": "public",
}
etcd_client.put(
os.path.join(IMAGE_PREFIX, data["uuid"]), json.dumps(image_entry_json)
os.path.join(env_vars.get("IMAGE_PREFIX"), data["uuid"]),
json.dumps(image_entry_json),
)
return {"message": "Image queued for creation."}
@ -124,15 +120,18 @@ class CreateImage(Resource):
class ListPublicImages(Resource):
@staticmethod
def get():
images = etcd_client.get_prefix(IMAGE_PREFIX, value_in_json=True)
images = etcd_client.get_prefix(
env_vars.get("IMAGE_PREFIX"), value_in_json=True
)
r = {}
r["images"] = []
for image in images:
image_key = "{}:{}".format(image.value["store_name"], image.value["name"])
r["images"].append({
"name":image_key,
"status": image.value["status"]
})
image_key = "{}:{}".format(
image.value["store_name"], image.value["name"]
)
r["images"].append(
{"name": image_key, "status": image.value["status"]}
)
return r, 200
@ -143,34 +142,47 @@ class VMAction(Resource):
validator = schemas.VmActionSchema(data)
if validator.is_valid():
vm_entry = VM_POOL.get(os.path.join(VM_PREFIX, data["uuid"]))
vm_entry = vm_pool.get(
os.path.join(env_vars.get("VM_PREFIX"), data["uuid"])
)
action = data["action"]
if action == "start":
vm_entry.status = VMStatus.requested_start
VM_POOL.put(vm_entry)
vm_pool.put(vm_entry)
action = "schedule"
if action == "delete" and vm_entry.hostname == "":
try:
path_without_protocol = vm_entry.path[vm_entry.path.find(":") + 1 :]
path_without_protocol = vm_entry.path[
vm_entry.path.find(":") + 1:
]
if WITHOUT_CEPH:
if env_vars.get("WITHOUT_CEPH"):
command_to_delete = [
"rm", "-rf",
"rm",
"-rf",
os.path.join("/var/vm", vm_entry.uuid),
]
else:
command_to_delete = ["rbd", "rm", path_without_protocol]
command_to_delete = [
"rbd",
"rm",
path_without_protocol,
]
subprocess.check_output(command_to_delete, stderr=subprocess.PIPE)
subprocess.check_output(
command_to_delete, stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
if "No such file" in e.stderr.decode("utf-8"):
etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"}
else:
logging.exception(e)
return {"message": "Some error occurred while deleting VM"}
logger.exception(e)
return {
"message": "Some error occurred while deleting VM"
}
else:
etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"}
@ -179,8 +191,9 @@ class VMAction(Resource):
type="{}VM".format(action.title()),
uuid=data["uuid"],
hostname=vm_entry.hostname,
request_prefix=env_vars.get("REQUEST_PREFIX")
)
REQUEST_POOL.put(r)
request_pool.put(r)
return {"message": "VM {} Queued".format(action.title())}, 200
else:
return validator.get_errors(), 400
@ -193,15 +206,18 @@ class VMMigration(Resource):
validator = schemas.VmMigrationSchema(data)
if validator.is_valid():
vm = VM_POOL.get(data["uuid"])
vm = vm_pool.get(data["uuid"])
r = RequestEntry.from_scratch(
type=RequestType.ScheduleVM,
uuid=vm.uuid,
destination=os.path.join(HOST_PREFIX, data["destination"]),
destination=os.path.join(
env_vars.get("HOST_PREFIX"), data["destination"]
),
migration=True,
request_prefix=env_vars.get("REQUEST_PREFIX")
)
REQUEST_POOL.put(r)
request_pool.put(r)
return {"message": "VM Migration Initialization Queued"}, 200
else:
return validator.get_errors(), 400
@ -214,7 +230,9 @@ class ListUserVM(Resource):
validator = schemas.OTPSchema(data)
if validator.is_valid():
vms = etcd_client.get_prefix(VM_PREFIX, value_in_json=True)
vms = etcd_client.get_prefix(
env_vars.get("VM_PREFIX"), value_in_json=True
)
return_vms = []
user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
for vm in user_vms:
@ -246,9 +264,13 @@ class ListUserFiles(Resource):
validator = schemas.OTPSchema(data)
if validator.is_valid():
files = etcd_client.get_prefix(FILE_PREFIX, value_in_json=True)
files = etcd_client.get_prefix(
env_vars.get("FILE_PREFIX"), value_in_json=True
)
return_files = []
user_files = list(filter(lambda f: f.value["owner"] == data["name"], files))
user_files = list(
filter(lambda f: f.value["owner"] == data["name"], files)
)
for file in user_files:
return_files.append(
{
@ -267,7 +289,7 @@ class CreateHost(Resource):
data = request.json
validator = schemas.CreateHostSchema(data)
if validator.is_valid():
host_key = os.path.join(HOST_PREFIX, uuid4().hex)
host_key = os.path.join(env_vars.get("HOST_PREFIX"), uuid4().hex)
host_entry = {
"specs": data["specs"],
"hostname": data["hostname"],
@ -284,7 +306,7 @@ class CreateHost(Resource):
class ListHost(Resource):
@staticmethod
def get():
hosts = HOST_POOL.hosts
hosts = host_pool.hosts
r = {
host.key: {
"status": host.status,
@ -305,21 +327,38 @@ class GetSSHKeys(Resource):
if not validator.key_name.value:
# {user_prefix}/{realm}/{name}/key/
etcd_key = os.path.join(decouple.config("USER_PREFIX"), data["realm"],
data["name"], "key")
etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
keys = {key.key.split("/")[-1]: key.value for key in etcd_entry}
etcd_key = os.path.join(
env_vars.get('USER_PREFIX'),
data["realm"],
data["name"],
"key",
)
etcd_entry = etcd_client.get_prefix(
etcd_key, value_in_json=True
)
keys = {
key.key.split("/")[-1]: key.value for key in etcd_entry
}
return {"keys": keys}
else:
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = os.path.join(decouple.config("USER_PREFIX"), data["realm"],
data["name"], "key", data["key_name"])
etcd_key = os.path.join(
env_vars.get('USER_PREFIX'),
data["realm"],
data["name"],
"key",
data["key_name"],
)
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
if etcd_entry:
return {"keys": {etcd_entry.key.split("/")[-1]: etcd_entry.value}}
return {
"keys": {
etcd_entry.key.split("/")[-1]: etcd_entry.value
}
}
else:
return {"keys": {}}
else:
@ -332,13 +371,22 @@ class AddSSHKey(Resource):
data = request.json
validator = schemas.AddSSHSchema(data)
if validator.is_valid():
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"],
"key", data["key_name"])
etcd_key = os.path.join(
env_vars.get("USER_PREFIX"),
data["realm"],
data["name"],
"key",
data["key_name"],
)
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
if etcd_entry:
return {"message": "Key with name '{}' already exists".format(data["key_name"])}
return {
"message": "Key with name '{}' already exists".format(
data["key_name"]
)
}
else:
# Key Not Found. It implies user' haven't added any key yet.
etcd_client.put(etcd_key, data["key"], value_in_json=True)
@ -353,16 +401,25 @@ class RemoveSSHKey(Resource):
data = request.json
validator = schemas.RemoveSSHSchema(data)
if validator.is_valid():
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"],
"key", data["key_name"])
etcd_key = os.path.join(
env_vars.get("USER_PREFIX"),
data["realm"],
data["name"],
"key",
data["key_name"],
)
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
if etcd_entry:
etcd_client.client.delete(etcd_key)
return {"message": "Key successfully removed."}
else:
return {"message": "No Key with name '{}' Exists at all.".format(data["key_name"])}
return {
"message": "No Key with name '{}' Exists at all.".format(
data["key_name"]
)
}
else:
return validator.get_errors(), 400
@ -374,29 +431,42 @@ class CreateNetwork(Resource):
validator = schemas.CreateNetwork(data)
if validator.is_valid():
network_entry = {
"id": increment_etcd_counter(etcd_client, "/v1/counter/vxlan"),
"id": counters.increment_etcd_counter(
etcd_client, "/v1/counter/vxlan"
),
"type": data["type"],
}
if validator.user.value:
nb = pynetbox.api(url=decouple.config("NETBOX_URL"),
token=decouple.config("NETBOX_TOKEN"))
nb_prefix = nb.ipam.prefixes.get(prefix=decouple.config("PREFIX"))
nb = pynetbox.api(
url=env_vars.get("NETBOX_URL"),
token=env_vars.get("NETBOX_TOKEN"),
)
nb_prefix = nb.ipam.prefixes.get(
prefix=env_vars.get("PREFIX")
)
prefix = nb_prefix.available_prefixes.create(data=
{
"prefix_length": decouple.config("PREFIX_LENGTH", cast=int),
"description": "{}'s network \"{}\"".format(data["name"],
data["network_name"]),
"is_pool": True
prefix = nb_prefix.available_prefixes.create(
data={
"prefix_length": env_vars.get(
"PREFIX_LENGTH", cast=int
),
"description": '{}\'s network "{}"'.format(
data["name"], data["network_name"]
),
"is_pool": True,
}
)
network_entry["ipv6"] = prefix["prefix"]
else:
network_entry["ipv6"] = "fd00::/64"
network_key = os.path.join(NETWORK_PREFIX, data["name"], data["network_name"])
network_key = os.path.join(
env_vars.get("NETWORK_PREFIX"),
data["name"],
data["network_name"],
)
etcd_client.put(network_key, network_entry, value_in_json=True)
return {"message": "Network successfully added."}
else:
@ -410,7 +480,9 @@ class ListUserNetwork(Resource):
validator = schemas.OTPSchema(data)
if validator.is_valid():
prefix = os.path.join(NETWORK_PREFIX, data["name"])
prefix = os.path.join(
env_vars.get("NETWORK_PREFIX"), data["name"]
)
networks = etcd_client.get_prefix(prefix, value_in_json=True)
user_networks = []
for net in networks:
@ -443,5 +515,20 @@ api.add_resource(ListHost, "/host/list")
api.add_resource(CreateNetwork, "/network/create")
if __name__ == "__main__":
def main():
data = {
"is_public": True,
"type": "ceph",
"name": "images",
"description": "first ever public image-store",
"attributes": {"list": [], "key": [], "pool": "images"},
}
etcd_client.put(os.path.join(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
app.run(host="::", debug=True)
if __name__ == "__main__":
main()