uncloud cli converted to argparse

This commit is contained in:
ahmadbilalkhalid 2020-01-03 18:38:59 +05:00
parent 50fb135726
commit 3296e524cc
13 changed files with 284 additions and 287 deletions

View file

@ -1,7 +1,11 @@
[etcd] [etcd]
url = localhost url = localhost
port = 2379 port = 2379
ca_cert ca_cert
cert_cert cert_cert
cert_key cert_key
[client]
name = replace_me
realm = replace_me
seed = replace_me

View file

@ -1,14 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import argparse
import logging import logging
import importlib
import multiprocessing as mp
import sys import sys
import importlib
from logging.handlers import SysLogHandler import argparse
from uncloud.configure.main import configure_parser import multiprocessing as mp
from uncloud import UncloudException from uncloud import UncloudException
from contextlib import suppress
def exception_hook(exc_type, exc_value, exc_traceback): def exception_hook(exc_type, exc_value, exc_traceback):
logging.getLogger(__name__).error( logging.getLogger(__name__).error(
@ -19,40 +18,25 @@ def exception_hook(exc_type, exc_value, exc_traceback):
sys.excepthook = exception_hook sys.excepthook = exception_hook
if __name__ == '__main__': if __name__ == '__main__':
# Setting up root logger # Setting up root logger
logger = logging.getLogger() logger = logging.getLogger()
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument("--debug", "-d", action='store_true')
arg_parser = argparse.ArgumentParser() arg_parser = argparse.ArgumentParser()
subparsers = arg_parser.add_subparsers(dest='command')
subparsers = arg_parser.add_subparsers(dest="command") parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
help='More verbose logging')
api_parser = subparsers.add_parser("api", parents=[parent_parser]) for component in ['api', 'scheduler', 'host', 'filescanner', 'imagescanner',
api_parser.add_argument("--port", "-p") 'metadata', 'configure', 'cli']:
mod = importlib.import_module('uncloud.{}.main'.format(component))
parser = getattr(mod, 'arg_parser')
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
host_parser = subparsers.add_parser("host")
host_parser.add_argument("--hostname", required=True)
scheduler_parser = subparsers.add_parser("scheduler", parents=[parent_parser])
filescanner_parser = subparsers.add_parser("filescanner")
imagescanner_parser = subparsers.add_parser("imagescanner")
metadata_parser = subparsers.add_parser("metadata")
metadata_parser.add_argument("--port", "-p")
config_parser = subparsers.add_parser("configure")
configure_parser(config_parser)
args = arg_parser.parse_args() args = arg_parser.parse_args()
if not args.command: if not args.command:
arg_parser.print_help() arg_parser.print_help()
else: else:
@ -62,12 +46,11 @@ if __name__ == '__main__':
# errors out, so the following command configure multiprocessing # errors out, so the following command configure multiprocessing
# module to not inherit anything from parent. # module to not inherit anything from parent.
mp.set_start_method('spawn') mp.set_start_method('spawn')
arguments = vars(args) arguments = vars(args)
try: try:
name = arguments.pop('command') name = arguments.pop('command')
mod = importlib.import_module("uncloud.{}.main".format(name)) mod = importlib.import_module('uncloud.{}.main'.format(name))
main = getattr(mod, "main") main = getattr(mod, 'main')
main(**arguments) main(**arguments)
except UncloudException as err: except UncloudException as err:
logger.error(err) logger.error(err)

View file

@ -40,7 +40,7 @@ setup(
"pynetbox", "pynetbox",
"colorama", "colorama",
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3", "etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
"marshmallow", "marshmallow"
], ],
scripts=["scripts/uncloud"], scripts=["scripts/uncloud"],
data_files=[ data_files=[

View file

@ -1,6 +1,7 @@
import json import json
import pynetbox import pynetbox
import logging import logging
import argparse
from uuid import uuid4 from uuid import uuid4
from os.path import join as join_path from os.path import join as join_path
@ -14,7 +15,6 @@ from uncloud.common.vm import VMStatus
from uncloud.common.request import RequestEntry, RequestType from uncloud.common.request import RequestEntry, RequestType
from uncloud.settings import settings from uncloud.settings import settings
from uncloud.shared import shared from uncloud.shared import shared
from . import schemas from . import schemas
from .helper import generate_mac, mac2ipv6 from .helper import generate_mac, mac2ipv6
from uncloud import UncloudException from uncloud import UncloudException
@ -25,6 +25,9 @@ app = Flask(__name__)
api = Api(app) api = Api(app)
app.logger.handlers.clear() app.logger.handlers.clear()
arg_parser = argparse.ArgumentParser('api', add_help=False)
arg_parser.add_argument('--port', '-p')
@app.errorhandler(Exception) @app.errorhandler(Exception)
def handle_exception(e): def handle_exception(e):
@ -34,11 +37,11 @@ def handle_exception(e):
return e return e
# now you're handling non-HTTP exceptions only # now you're handling non-HTTP exceptions only
return {"message": "Server Error"}, 500 return {'message': 'Server Error'}, 500
class CreateVM(Resource): class CreateVM(Resource):
"""API Request to Handle Creation of VM""" '''API Request to Handle Creation of VM'''
@staticmethod @staticmethod
def post(): def post():
@ -46,33 +49,33 @@ class CreateVM(Resource):
validator = schemas.CreateVMSchema(data) validator = schemas.CreateVMSchema(data)
if validator.is_valid(): if validator.is_valid():
vm_uuid = uuid4().hex vm_uuid = uuid4().hex
vm_key = join_path(settings["etcd"]["vm_prefix"], vm_uuid) vm_key = join_path(settings['etcd']['vm_prefix'], vm_uuid)
specs = { specs = {
"cpu": validator.specs["cpu"], 'cpu': validator.specs['cpu'],
"ram": validator.specs["ram"], 'ram': validator.specs['ram'],
"os-ssd": validator.specs["os-ssd"], 'os-ssd': validator.specs['os-ssd'],
"hdd": validator.specs["hdd"], 'hdd': validator.specs['hdd'],
} }
macs = [generate_mac() for _ in range(len(data["network"]))] macs = [generate_mac() for _ in range(len(data['network']))]
tap_ids = [ tap_ids = [
counters.increment_etcd_counter( counters.increment_etcd_counter(
shared.etcd_client, "/v1/counter/tap" shared.etcd_client, '/v1/counter/tap'
) )
for _ in range(len(data["network"])) for _ in range(len(data['network']))
] ]
vm_entry = { vm_entry = {
"name": data["vm_name"], 'name': data['vm_name'],
"owner": data["name"], 'owner': data['name'],
"owner_realm": data["realm"], 'owner_realm': data['realm'],
"specs": specs, 'specs': specs,
"hostname": "", 'hostname': '',
"status": VMStatus.stopped, 'status': VMStatus.stopped,
"image_uuid": validator.image_uuid, 'image_uuid': validator.image_uuid,
"log": [], 'log': [],
"vnc_socket": "", 'vnc_socket': '',
"network": list(zip(data["network"], macs, tap_ids)), 'network': list(zip(data['network'], macs, tap_ids)),
"metadata": {"ssh-keys": []}, 'metadata': {'ssh-keys': []},
"in_migration": False, 'in_migration': False,
} }
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True) shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
@ -80,11 +83,11 @@ class CreateVM(Resource):
r = RequestEntry.from_scratch( r = RequestEntry.from_scratch(
type=RequestType.ScheduleVM, type=RequestType.ScheduleVM,
uuid=vm_uuid, uuid=vm_uuid,
request_prefix=settings["etcd"]["request_prefix"], request_prefix=settings['etcd']['request_prefix'],
) )
shared.request_pool.put(r) shared.request_pool.put(r)
return {"message": "VM Creation Queued"}, 200 return {'message': 'VM Creation Queued'}, 200
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -95,24 +98,24 @@ class VmStatus(Resource):
validator = schemas.VMStatusSchema(data) validator = schemas.VMStatusSchema(data)
if validator.is_valid(): if validator.is_valid():
vm = shared.vm_pool.get( vm = shared.vm_pool.get(
join_path(settings["etcd"]["vm_prefix"], data["uuid"]) join_path(settings['etcd']['vm_prefix'], data['uuid'])
) )
vm_value = vm.value.copy() vm_value = vm.value.copy()
vm_value["ip"] = [] vm_value['ip'] = []
for network_mac_and_tap in vm.network: for network_mac_and_tap in vm.network:
network_name, mac, tap = network_mac_and_tap network_name, mac, tap = network_mac_and_tap
network = shared.etcd_client.get( network = shared.etcd_client.get(
join_path( join_path(
settings["etcd"]["network_prefix"], settings['etcd']['network_prefix'],
data["name"], data['name'],
network_name, network_name,
), ),
value_in_json=True, value_in_json=True,
) )
ipv6_addr = ( ipv6_addr = (
network.value.get("ipv6").split("::")[0] + "::" network.value.get('ipv6').split('::')[0] + '::'
) )
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr)) vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
vm.value = vm_value vm.value = vm_value
return vm.value return vm.value
else: else:
@ -126,26 +129,26 @@ class CreateImage(Resource):
validator = schemas.CreateImageSchema(data) validator = schemas.CreateImageSchema(data)
if validator.is_valid(): if validator.is_valid():
file_entry = shared.etcd_client.get( file_entry = shared.etcd_client.get(
join_path(settings["etcd"]["file_prefix"], data["uuid"]) join_path(settings['etcd']['file_prefix'], data['uuid'])
) )
file_entry_value = json.loads(file_entry.value) file_entry_value = json.loads(file_entry.value)
image_entry_json = { image_entry_json = {
"status": "TO_BE_CREATED", 'status': 'TO_BE_CREATED',
"owner": file_entry_value["owner"], 'owner': file_entry_value['owner'],
"filename": file_entry_value["filename"], 'filename': file_entry_value['filename'],
"name": data["name"], 'name': data['name'],
"store_name": data["image_store"], 'store_name': data['image_store'],
"visibility": "public", 'visibility': 'public',
} }
shared.etcd_client.put( shared.etcd_client.put(
join_path( join_path(
settings["etcd"]["image_prefix"], data["uuid"] settings['etcd']['image_prefix'], data['uuid']
), ),
json.dumps(image_entry_json), json.dumps(image_entry_json),
) )
return {"message": "Image queued for creation."} return {'message': 'Image queued for creation.'}
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -153,15 +156,15 @@ class ListPublicImages(Resource):
@staticmethod @staticmethod
def get(): def get():
images = shared.etcd_client.get_prefix( images = shared.etcd_client.get_prefix(
settings["etcd"]["image_prefix"], value_in_json=True settings['etcd']['image_prefix'], value_in_json=True
) )
r = {"images": []} r = {'images': []}
for image in images: for image in images:
image_key = "{}:{}".format( image_key = '{}:{}'.format(
image.value["store_name"], image.value["name"] image.value['store_name'], image.value['name']
) )
r["images"].append( r['images'].append(
{"name": image_key, "status": image.value["status"]} {'name': image_key, 'status': image.value['status']}
) )
return r, 200 return r, 200
@ -174,14 +177,14 @@ class VMAction(Resource):
if validator.is_valid(): if validator.is_valid():
vm_entry = shared.vm_pool.get( vm_entry = shared.vm_pool.get(
join_path(settings["etcd"]["vm_prefix"], data["uuid"]) join_path(settings['etcd']['vm_prefix'], data['uuid'])
) )
action = data["action"] action = data['action']
if action == "start": if action == 'start':
action = "schedule" action = 'schedule'
if action == "delete" and vm_entry.hostname == "": if action == 'delete' and vm_entry.hostname == '':
if shared.storage_handler.is_vm_image_exists( if shared.storage_handler.is_vm_image_exists(
vm_entry.uuid vm_entry.uuid
): ):
@ -190,25 +193,25 @@ class VMAction(Resource):
) )
if r_status: if r_status:
shared.etcd_client.client.delete(vm_entry.key) shared.etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"} return {'message': 'VM successfully deleted'}
else: else:
logger.error( logger.error(
"Some Error Occurred while deleting VM" 'Some Error Occurred while deleting VM'
) )
return {"message": "VM deletion unsuccessfull"} return {'message': 'VM deletion unsuccessfull'}
else: else:
shared.etcd_client.client.delete(vm_entry.key) shared.etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"} return {'message': 'VM successfully deleted'}
r = RequestEntry.from_scratch( r = RequestEntry.from_scratch(
type="{}VM".format(action.title()), type='{}VM'.format(action.title()),
uuid=data["uuid"], uuid=data['uuid'],
hostname=vm_entry.hostname, hostname=vm_entry.hostname,
request_prefix=settings["etcd"]["request_prefix"], request_prefix=settings['etcd']['request_prefix'],
) )
shared.request_pool.put(r) shared.request_pool.put(r)
return ( return (
{"message": "VM {} Queued".format(action.title())}, {'message': 'VM {} Queued'.format(action.title())},
200, 200,
) )
else: else:
@ -222,20 +225,20 @@ class VMMigration(Resource):
validator = schemas.VmMigrationSchema(data) validator = schemas.VmMigrationSchema(data)
if validator.is_valid(): if validator.is_valid():
vm = shared.vm_pool.get(data["uuid"]) vm = shared.vm_pool.get(data['uuid'])
r = RequestEntry.from_scratch( r = RequestEntry.from_scratch(
type=RequestType.InitVMMigration, type=RequestType.InitVMMigration,
uuid=vm.uuid, uuid=vm.uuid,
hostname=join_path( hostname=join_path(
settings["etcd"]["host_prefix"], settings['etcd']['host_prefix'],
validator.destination.value, validator.destination.value,
), ),
request_prefix=settings["etcd"]["request_prefix"], request_prefix=settings['etcd']['request_prefix'],
) )
shared.request_pool.put(r) shared.request_pool.put(r)
return ( return (
{"message": "VM Migration Initialization Queued"}, {'message': 'VM Migration Initialization Queued'},
200, 200,
) )
else: else:
@ -250,26 +253,26 @@ class ListUserVM(Resource):
if validator.is_valid(): if validator.is_valid():
vms = shared.etcd_client.get_prefix( vms = shared.etcd_client.get_prefix(
settings["etcd"]["vm_prefix"], value_in_json=True settings['etcd']['vm_prefix'], value_in_json=True
) )
return_vms = [] return_vms = []
user_vms = filter( user_vms = filter(
lambda v: v.value["owner"] == data["name"], vms lambda v: v.value['owner'] == data['name'], vms
) )
for vm in user_vms: for vm in user_vms:
return_vms.append( return_vms.append(
{ {
"name": vm.value["name"], 'name': vm.value['name'],
"vm_uuid": vm.key.split("/")[-1], 'vm_uuid': vm.key.split('/')[-1],
"specs": vm.value["specs"], 'specs': vm.value['specs'],
"status": vm.value["status"], 'status': vm.value['status'],
"hostname": vm.value["hostname"], 'hostname': vm.value['hostname'],
"vnc_socket": vm.value.get("vnc_socket", None), 'vnc_socket': vm.value.get('vnc_socket', None),
} }
) )
if return_vms: if return_vms:
return {"message": return_vms}, 200 return {'message': return_vms}, 200
return {"message": "No VM found"}, 404 return {'message': 'No VM found'}, 404
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -283,22 +286,22 @@ class ListUserFiles(Resource):
if validator.is_valid(): if validator.is_valid():
files = shared.etcd_client.get_prefix( files = shared.etcd_client.get_prefix(
settings["etcd"]["file_prefix"], value_in_json=True settings['etcd']['file_prefix'], value_in_json=True
) )
return_files = [] return_files = []
user_files = list( user_files = list(
filter( filter(
lambda f: f.value["owner"] == data["name"], files lambda f: f.value['owner'] == data['name'], files
) )
) )
for file in user_files: for file in user_files:
return_files.append( return_files.append(
{ {
"filename": file.value["filename"], 'filename': file.value['filename'],
"uuid": file.key.split("/")[-1], 'uuid': file.key.split('/')[-1],
} }
) )
return {"message": return_files}, 200 return {'message': return_files}, 200
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -310,19 +313,19 @@ class CreateHost(Resource):
validator = schemas.CreateHostSchema(data) validator = schemas.CreateHostSchema(data)
if validator.is_valid(): if validator.is_valid():
host_key = join_path( host_key = join_path(
settings["etcd"]["host_prefix"], uuid4().hex settings['etcd']['host_prefix'], uuid4().hex
) )
host_entry = { host_entry = {
"specs": data["specs"], 'specs': data['specs'],
"hostname": data["hostname"], 'hostname': data['hostname'],
"status": "DEAD", 'status': 'DEAD',
"last_heartbeat": "", 'last_heartbeat': '',
} }
shared.etcd_client.put( shared.etcd_client.put(
host_key, host_entry, value_in_json=True host_key, host_entry, value_in_json=True
) )
return {"message": "Host Created"}, 200 return {'message': 'Host Created'}, 200
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -333,9 +336,9 @@ class ListHost(Resource):
hosts = shared.host_pool.hosts hosts = shared.host_pool.hosts
r = { r = {
host.key: { host.key: {
"status": host.status, 'status': host.status,
"specs": host.specs, 'specs': host.specs,
"hostname": host.hostname, 'hostname': host.hostname,
} }
for host in hosts for host in hosts
} }
@ -352,29 +355,29 @@ class GetSSHKeys(Resource):
# {user_prefix}/{realm}/{name}/key/ # {user_prefix}/{realm}/{name}/key/
etcd_key = join_path( etcd_key = join_path(
settings["etcd"]["user_prefix"], settings['etcd']['user_prefix'],
data["realm"], data['realm'],
data["name"], data['name'],
"key", 'key',
) )
etcd_entry = shared.etcd_client.get_prefix( etcd_entry = shared.etcd_client.get_prefix(
etcd_key, value_in_json=True etcd_key, value_in_json=True
) )
keys = { keys = {
key.key.split("/")[-1]: key.value key.key.split('/')[-1]: key.value
for key in etcd_entry for key in etcd_entry
} }
return {"keys": keys} return {'keys': keys}
else: else:
# {user_prefix}/{realm}/{name}/key/{key_name} # {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path( etcd_key = join_path(
settings["etcd"]["user_prefix"], settings['etcd']['user_prefix'],
data["realm"], data['realm'],
data["name"], data['name'],
"key", 'key',
data["key_name"], data['key_name'],
) )
etcd_entry = shared.etcd_client.get( etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True etcd_key, value_in_json=True
@ -382,14 +385,14 @@ class GetSSHKeys(Resource):
if etcd_entry: if etcd_entry:
return { return {
"keys": { 'keys': {
etcd_entry.key.split("/")[ etcd_entry.key.split('/')[
-1 -1
]: etcd_entry.value ]: etcd_entry.value
} }
} }
else: else:
return {"keys": {}} return {'keys': {}}
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -403,27 +406,27 @@ class AddSSHKey(Resource):
# {user_prefix}/{realm}/{name}/key/{key_name} # {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path( etcd_key = join_path(
settings["etcd"]["user_prefix"], settings['etcd']['user_prefix'],
data["realm"], data['realm'],
data["name"], data['name'],
"key", 'key',
data["key_name"], data['key_name'],
) )
etcd_entry = shared.etcd_client.get( etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True etcd_key, value_in_json=True
) )
if etcd_entry: if etcd_entry:
return { return {
"message": "Key with name '{}' already exists".format( 'message': 'Key with name "{}" already exists'.format(
data["key_name"] data['key_name']
) )
} }
else: else:
# Key Not Found. It implies user' haven't added any key yet. # Key Not Found. It implies user' haven't added any key yet.
shared.etcd_client.put( shared.etcd_client.put(
etcd_key, data["key"], value_in_json=True etcd_key, data['key'], value_in_json=True
) )
return {"message": "Key added successfully"} return {'message': 'Key added successfully'}
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -437,22 +440,22 @@ class RemoveSSHKey(Resource):
# {user_prefix}/{realm}/{name}/key/{key_name} # {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path( etcd_key = join_path(
settings["etcd"]["user_prefix"], settings['etcd']['user_prefix'],
data["realm"], data['realm'],
data["name"], data['name'],
"key", 'key',
data["key_name"], data['key_name'],
) )
etcd_entry = shared.etcd_client.get( etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True etcd_key, value_in_json=True
) )
if etcd_entry: if etcd_entry:
shared.etcd_client.client.delete(etcd_key) shared.etcd_client.client.delete(etcd_key)
return {"message": "Key successfully removed."} return {'message': 'Key successfully removed.'}
else: else:
return { return {
"message": "No Key with name '{}' Exists at all.".format( 'message': 'No Key with name "{}" Exists at all.'.format(
data["key_name"] data['key_name']
) )
} }
else: else:
@ -468,50 +471,50 @@ class CreateNetwork(Resource):
if validator.is_valid(): if validator.is_valid():
network_entry = { network_entry = {
"id": counters.increment_etcd_counter( 'id': counters.increment_etcd_counter(
shared.etcd_client, "/v1/counter/vxlan" shared.etcd_client, '/v1/counter/vxlan'
), ),
"type": data["type"], 'type': data['type'],
} }
if validator.user.value: if validator.user.value:
try: try:
nb = pynetbox.api( nb = pynetbox.api(
url=settings["netbox"]["url"], url=settings['netbox']['url'],
token=settings["netbox"]["token"], token=settings['netbox']['token'],
) )
nb_prefix = nb.ipam.prefixes.get( nb_prefix = nb.ipam.prefixes.get(
prefix=settings["network"]["prefix"] prefix=settings['network']['prefix']
) )
prefix = nb_prefix.available_prefixes.create( prefix = nb_prefix.available_prefixes.create(
data={ data={
"prefix_length": int( 'prefix_length': int(
settings["network"]["prefix_length"] settings['network']['prefix_length']
), ),
"description": '{}\'s network "{}"'.format( 'description': '{}\'s network "{}"'.format(
data["name"], data["network_name"] data['name'], data['network_name']
), ),
"is_pool": True, 'is_pool': True,
} }
) )
except Exception as err: except Exception as err:
app.logger.error(err) app.logger.error(err)
return { return {
"message": "Error occured while creating network." 'message': 'Error occured while creating network.'
} }
else: else:
network_entry["ipv6"] = prefix["prefix"] network_entry['ipv6'] = prefix['prefix']
else: else:
network_entry["ipv6"] = "fd00::/64" network_entry['ipv6'] = 'fd00::/64'
network_key = join_path( network_key = join_path(
settings["etcd"]["network_prefix"], settings['etcd']['network_prefix'],
data["name"], data['name'],
data["network_name"], data['network_name'],
) )
shared.etcd_client.put( shared.etcd_client.put(
network_key, network_entry, value_in_json=True network_key, network_entry, value_in_json=True
) )
return {"message": "Network successfully added."} return {'message': 'Network successfully added.'}
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
@ -524,48 +527,48 @@ class ListUserNetwork(Resource):
if validator.is_valid(): if validator.is_valid():
prefix = join_path( prefix = join_path(
settings["etcd"]["network_prefix"], data["name"] settings['etcd']['network_prefix'], data['name']
) )
networks = shared.etcd_client.get_prefix( networks = shared.etcd_client.get_prefix(
prefix, value_in_json=True prefix, value_in_json=True
) )
user_networks = [] user_networks = []
for net in networks: for net in networks:
net.value["name"] = net.key.split("/")[-1] net.value['name'] = net.key.split('/')[-1]
user_networks.append(net.value) user_networks.append(net.value)
return {"networks": user_networks}, 200 return {'networks': user_networks}, 200
else: else:
return validator.get_errors(), 400 return validator.get_errors(), 400
api.add_resource(CreateVM, "/vm/create") api.add_resource(CreateVM, '/vm/create')
api.add_resource(VmStatus, "/vm/status") api.add_resource(VmStatus, '/vm/status')
api.add_resource(VMAction, "/vm/action") api.add_resource(VMAction, '/vm/action')
api.add_resource(VMMigration, "/vm/migrate") api.add_resource(VMMigration, '/vm/migrate')
api.add_resource(CreateImage, "/image/create") api.add_resource(CreateImage, '/image/create')
api.add_resource(ListPublicImages, "/image/list-public") api.add_resource(ListPublicImages, '/image/list-public')
api.add_resource(ListUserVM, "/user/vms") api.add_resource(ListUserVM, '/user/vms')
api.add_resource(ListUserFiles, "/user/files") api.add_resource(ListUserFiles, '/user/files')
api.add_resource(ListUserNetwork, "/user/networks") api.add_resource(ListUserNetwork, '/user/networks')
api.add_resource(AddSSHKey, "/user/add-ssh") api.add_resource(AddSSHKey, '/user/add-ssh')
api.add_resource(RemoveSSHKey, "/user/remove-ssh") api.add_resource(RemoveSSHKey, '/user/remove-ssh')
api.add_resource(GetSSHKeys, "/user/get-ssh") api.add_resource(GetSSHKeys, '/user/get-ssh')
api.add_resource(CreateHost, "/host/create") api.add_resource(CreateHost, '/host/create')
api.add_resource(ListHost, "/host/list") api.add_resource(ListHost, '/host/list')
api.add_resource(CreateNetwork, "/network/create") api.add_resource(CreateNetwork, '/network/create')
def main(debug=False, port=None): def main(debug=False, port=None):
try: try:
image_stores = list( image_stores = list(
shared.etcd_client.get_prefix( shared.etcd_client.get_prefix(
settings["etcd"]["image_store_prefix"], value_in_json=True settings['etcd']['image_store_prefix'], value_in_json=True
) )
) )
except KeyError: except KeyError:
@ -576,27 +579,27 @@ def main(debug=False, port=None):
# #
# if not image_stores: # if not image_stores:
# data = { # data = {
# "is_public": True, # 'is_public': True,
# "type": "ceph", # 'type': 'ceph',
# "name": "images", # 'name': 'images',
# "description": "first ever public image-store", # 'description': 'first ever public image-store',
# "attributes": {"list": [], "key": [], "pool": "images"}, # 'attributes': {'list': [], 'key': [], 'pool': 'images'},
# } # }
# shared.etcd_client.put( # shared.etcd_client.put(
# join_path( # join_path(
# settings["etcd"]["image_store_prefix"], uuid4().hex # settings['etcd']['image_store_prefix'], uuid4().hex
# ), # ),
# json.dumps(data), # json.dumps(data),
# ) # )
try: try:
app.run(host="::", app.run(host='::',
port=port, port=port,
debug=debug) debug=debug)
except OSError as e: except OSError as e:
raise UncloudException("Failed to start Flask: {}".format(e)) raise UncloudException('Failed to start Flask: {}'.format(e))
if __name__ == "__main__": if __name__ == '__main__':
main() main()

View file

@ -322,7 +322,7 @@ class CreateVMSchema(OTPSchema):
"Your specified OS-SSD is not in correct units" "Your specified OS-SSD is not in correct units"
) )
if _cpu < 1: if int(_cpu) < 1:
self.add_error("CPU must be atleast 1") self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1): if parsed_ram < bitmath.GB(1):
@ -528,9 +528,7 @@ class GetSSHSchema(OTPSchema):
class CreateNetwork(OTPSchema): class CreateNetwork(OTPSchema):
def __init__(self, data): def __init__(self, data):
self.network_name = Field( self.network_name = Field("network_name", str, data.get("network_name", KeyError))
"network_name", str, data.get("network_name", KeyError)
)
self.type = Field("type", str, data.get("type", KeyError)) self.type = Field("type", str, data.get("type", KeyError))
self.user = Field("user", bool, bool(data.get("user", False))) self.user = Field("user", bool, bool(data.get("user", False)))
@ -541,14 +539,10 @@ class CreateNetwork(OTPSchema):
super().__init__(data, fields=fields) super().__init__(data, fields=fields)
def network_name_validation(self): def network_name_validation(self):
network = shared.etcd_client.get( print(self.name.value, self.network_name.value)
os.path.join( key = os.path.join(settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
settings["etcd"]["network_prefix"], print(key)
self.name.value, network = shared.etcd_client.get(key, value_in_json=True)
self.network_name.value,
),
value_in_json=True,
)
if network: if network:
self.add_error( self.add_error(
"Network with name {} already exists".format( "Network with name {} already exists".format(

View file

@ -0,0 +1,13 @@
import argparse
class BaseParser:
def __init__(self, command):
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
self.common_args = {'add_help': False}
methods = [attr for attr in dir(self) if not attr.startswith('__')
and type(getattr(self, attr)).__name__ == 'method']
for method in methods:
getattr(self, method)(**self.common_args)

View file

@ -1,8 +1,43 @@
import os import os
import argparse
from uncloud.settings import settings from uncloud.settings import settings
from uncloud.shared import shared from uncloud.shared import shared
arg_parser = argparse.ArgumentParser('configure', add_help=False)
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
otp_parser = configure_subparsers.add_parser('otp')
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
network_parser = configure_subparsers.add_parser('network')
network_parser.add_argument('--prefix-length', required=True, type=int)
network_parser.add_argument('--prefix', required=True)
network_parser.add_argument('--vxlan-phy-dev', required=True)
netbox_parser = configure_subparsers.add_parser('netbox')
netbox_parser.add_argument('--url', required=True)
netbox_parser.add_argument('--token', required=True)
ssh_parser = configure_subparsers.add_parser('ssh')
ssh_parser.add_argument('--username', default='root')
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
storage_parser = configure_subparsers.add_parser('storage')
storage_parser.add_argument('--file-dir', required=True)
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
filesystem_storage_parser.add_argument('--vm-dir', required=True)
filesystem_storage_parser.add_argument('--image-dir', required=True)
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
def update_config(section, kwargs): def update_config(section, kwargs):
uncloud_config = shared.etcd_client.get( uncloud_config = shared.etcd_client.get(
@ -19,61 +54,9 @@ def update_config(section, kwargs):
) )
def configure_parser(parser):
configure_subparsers = parser.add_subparsers(dest="subcommand")
otp_parser = configure_subparsers.add_parser("otp")
otp_parser.add_argument(
"--verification-controller-url", required=True, metavar="URL"
)
otp_parser.add_argument(
"--auth-name", required=True, metavar="OTP-NAME"
)
otp_parser.add_argument(
"--auth-realm", required=True, metavar="OTP-REALM"
)
otp_parser.add_argument(
"--auth-seed", required=True, metavar="OTP-SEED"
)
network_parser = configure_subparsers.add_parser("network")
network_parser.add_argument(
"--prefix-length", required=True, type=int
)
network_parser.add_argument("--prefix", required=True)
network_parser.add_argument("--vxlan-phy-dev", required=True)
netbox_parser = configure_subparsers.add_parser("netbox")
netbox_parser.add_argument("--url", required=True)
netbox_parser.add_argument("--token", required=True)
ssh_parser = configure_subparsers.add_parser("ssh")
ssh_parser.add_argument("--username", default="root")
ssh_parser.add_argument(
"--private-key-path",
default=os.path.expanduser("~/.ssh/id_rsa"),
)
storage_parser = configure_subparsers.add_parser("storage")
storage_parser.add_argument("--file-dir", required=True)
storage_parser_subparsers = storage_parser.add_subparsers(
dest="storage_backend"
)
filesystem_storage_parser = storage_parser_subparsers.add_parser(
"filesystem"
)
filesystem_storage_parser.add_argument("--vm-dir", required=True)
filesystem_storage_parser.add_argument("--image-dir", required=True)
ceph_storage_parser = storage_parser_subparsers.add_parser("ceph")
ceph_storage_parser.add_argument("--ceph-vm-pool", required=True)
ceph_storage_parser.add_argument("--ceph-image-pool", required=True)
def main(**kwargs): def main(**kwargs):
subcommand = kwargs.pop("subcommand") subcommand = kwargs.pop('subcommand')
if not subcommand: if not subcommand:
pass arg_parser.print_help()
else: else:
update_config(subcommand, kwargs) update_config(subcommand, kwargs)

View file

@ -3,6 +3,7 @@ import os
import pathlib import pathlib
import subprocess as sp import subprocess as sp
import time import time
import argparse
from uuid import uuid4 from uuid import uuid4
@ -11,6 +12,9 @@ from uncloud.settings import settings
from uncloud.shared import shared from uncloud.shared import shared
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
def sha512sum(file: str): def sha512sum(file: str):
"""Use sha512sum utility to compute sha512 sum of arg:file """Use sha512sum utility to compute sha512 sum of arg:file

View file

@ -1,6 +1,7 @@
import argparse import argparse
import multiprocessing as mp import multiprocessing as mp
import time import time
from uuid import uuid4 from uuid import uuid4
from uncloud.common.request import RequestEntry, RequestType from uncloud.common.request import RequestEntry, RequestType
@ -12,6 +13,9 @@ from os.path import join as join_path
from . import virtualmachine, logger from . import virtualmachine, logger
arg_parser = argparse.ArgumentParser('host', add_help=False)
arg_parser.add_argument('--hostname', required=True)
def update_heartbeat(hostname): def update_heartbeat(hostname):
"""Update Last HeartBeat Time for :param hostname: in etcd""" """Update Last HeartBeat Time for :param hostname: in etcd"""

View file

@ -1,5 +1,6 @@
import json import json
import os import os
import argparse
import subprocess as sp import subprocess as sp
from os.path import join as join_path from os.path import join as join_path
@ -8,6 +9,9 @@ from uncloud.shared import shared
from uncloud.imagescanner import logger from uncloud.imagescanner import logger
arg_parser = argparse.ArgumentParser('imagescanner', add_help=False)
def qemu_img_type(path): def qemu_img_type(path):
qemu_img_info_command = [ qemu_img_info_command = [
"qemu-img", "qemu-img",

View file

@ -1,4 +1,5 @@
import os import os
import argparse
from flask import Flask, request from flask import Flask, request
from flask_restful import Resource, Api from flask_restful import Resource, Api
@ -12,6 +13,9 @@ api = Api(app)
app.logger.handlers.clear() app.logger.handlers.clear()
arg_parser = argparse.ArgumentParser('metadata', add_help=False)
arg_parser.add_argument('--port', '-p', default=80, help='By default bind to port 80')
@app.errorhandler(Exception) @app.errorhandler(Exception)
def handle_exception(e): def handle_exception(e):

View file

@ -4,17 +4,16 @@
# 2. Introduce a status endpoint of the scheduler - # 2. Introduce a status endpoint of the scheduler -
# maybe expose a prometheus compatible output # maybe expose a prometheus compatible output
import argparse
from uncloud.common.request import RequestEntry, RequestType from uncloud.common.request import RequestEntry, RequestType
from uncloud.shared import shared from uncloud.shared import shared
from uncloud.settings import settings from uncloud.settings import settings
from .helper import ( from .helper import (dead_host_mitigation, dead_host_detection, assign_host, NoSuitableHostFound)
dead_host_mitigation,
dead_host_detection,
assign_host,
NoSuitableHostFound,
)
from . import logger from . import logger
arg_parser = argparse.ArgumentParser('scheduler', add_help=False)
def main(debug=False): def main(debug=False):
for request_iterator in [ for request_iterator in [

View file

@ -3,6 +3,8 @@ import logging
import sys import sys
import os import os
from datetime import datetime
from uncloud.common.etcd_wrapper import Etcd3Wrapper from uncloud.common.etcd_wrapper import Etcd3Wrapper
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,10 +31,13 @@ class Settings(object):
"UCLOUD_CONF_DIR", os.path.expanduser("~/uncloud/") "UCLOUD_CONF_DIR", os.path.expanduser("~/uncloud/")
) )
self.config_file = os.path.join(conf_dir, conf_name) self.config_file = os.path.join(conf_dir, conf_name)
self.config_parser = CustomConfigParser(allow_no_value=True) self.config_parser = CustomConfigParser(allow_no_value=True)
self.config_key = config_key self.config_key = config_key
# this is used to cache config from etcd for 1 minutes. Without this we
# would make a lot of requests to etcd which slows down everything.
self.last_config_update = datetime.fromtimestamp(0)
self.read_internal_values() self.read_internal_values()
try: try:
self.config_parser.read(self.config_file) self.config_parser.read(self.config_file)
@ -102,25 +107,22 @@ class Settings(object):
def read_values_from_etcd(self): def read_values_from_etcd(self):
etcd_client = self.get_etcd_client() etcd_client = self.get_etcd_client()
config_from_etcd = etcd_client.get( if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
self.config_key, value_in_json=True config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
) if config_from_etcd:
if config_from_etcd: self.config_parser.read_dict(config_from_etcd.value)
self.config_parser.read_dict(config_from_etcd.value) self.last_config_update = datetime.utcnow()
else: else:
raise KeyError( raise KeyError("Key '{}' not found in etcd. Please configure uncloud.".format(self.config_key))
"Key '{}' not found in etcd. Please configure uncloud.".format(
self.config_key
)
)
def __getitem__(self, key): def __getitem__(self, key):
# Allow failing to read from etcd if we have # Allow failing to read from etcd if we have
# it locally # it locally
try: if key not in self.config_parser.sections():
self.read_values_from_etcd() try:
except KeyError as e: self.read_values_from_etcd()
pass except KeyError as e:
pass
return self.config_parser[key] return self.config_parser[key]