uncloud cli converted to argparse

This commit is contained in:
ahmadbilalkhalid 2020-01-03 18:38:59 +05:00
parent 50fb135726
commit 3296e524cc
13 changed files with 284 additions and 287 deletions

View file

@ -1,7 +1,11 @@
[etcd]
url = localhost
port = 2379
ca_cert
cert_cert
cert_key
[client]
name = replace_me
realm = replace_me
seed = replace_me

View file

@ -1,14 +1,13 @@
#!/usr/bin/env python3
import argparse
import logging
import importlib
import multiprocessing as mp
import sys
from logging.handlers import SysLogHandler
from uncloud.configure.main import configure_parser
import importlib
import argparse
import multiprocessing as mp
from uncloud import UncloudException
from contextlib import suppress
def exception_hook(exc_type, exc_value, exc_traceback):
logging.getLogger(__name__).error(
@ -19,40 +18,25 @@ def exception_hook(exc_type, exc_value, exc_traceback):
sys.excepthook = exception_hook
if __name__ == '__main__':
# Setting up root logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument("--debug", "-d", action='store_true')
arg_parser = argparse.ArgumentParser()
subparsers = arg_parser.add_subparsers(dest='command')
subparsers = arg_parser.add_subparsers(dest="command")
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
help='More verbose logging')
api_parser = subparsers.add_parser("api", parents=[parent_parser])
api_parser.add_argument("--port", "-p")
for component in ['api', 'scheduler', 'host', 'filescanner', 'imagescanner',
'metadata', 'configure', 'cli']:
mod = importlib.import_module('uncloud.{}.main'.format(component))
parser = getattr(mod, 'arg_parser')
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
host_parser = subparsers.add_parser("host")
host_parser.add_argument("--hostname", required=True)
scheduler_parser = subparsers.add_parser("scheduler", parents=[parent_parser])
filescanner_parser = subparsers.add_parser("filescanner")
imagescanner_parser = subparsers.add_parser("imagescanner")
metadata_parser = subparsers.add_parser("metadata")
metadata_parser.add_argument("--port", "-p")
config_parser = subparsers.add_parser("configure")
configure_parser(config_parser)
args = arg_parser.parse_args()
if not args.command:
arg_parser.print_help()
else:
@ -62,12 +46,11 @@ if __name__ == '__main__':
# errors out, so the following command configure multiprocessing
# module to not inherit anything from parent.
mp.set_start_method('spawn')
arguments = vars(args)
try:
name = arguments.pop('command')
mod = importlib.import_module("uncloud.{}.main".format(name))
main = getattr(mod, "main")
mod = importlib.import_module('uncloud.{}.main'.format(name))
main = getattr(mod, 'main')
main(**arguments)
except UncloudException as err:
logger.error(err)

View file

@ -40,7 +40,7 @@ setup(
"pynetbox",
"colorama",
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
"marshmallow",
"marshmallow"
],
scripts=["scripts/uncloud"],
data_files=[

View file

@ -1,6 +1,7 @@
import json
import pynetbox
import logging
import argparse
from uuid import uuid4
from os.path import join as join_path
@ -14,7 +15,6 @@ from uncloud.common.vm import VMStatus
from uncloud.common.request import RequestEntry, RequestType
from uncloud.settings import settings
from uncloud.shared import shared
from . import schemas
from .helper import generate_mac, mac2ipv6
from uncloud import UncloudException
@ -25,6 +25,9 @@ app = Flask(__name__)
api = Api(app)
app.logger.handlers.clear()
arg_parser = argparse.ArgumentParser('api', add_help=False)
arg_parser.add_argument('--port', '-p')
@app.errorhandler(Exception)
def handle_exception(e):
@ -34,11 +37,11 @@ def handle_exception(e):
return e
# now you're handling non-HTTP exceptions only
return {"message": "Server Error"}, 500
return {'message': 'Server Error'}, 500
class CreateVM(Resource):
"""API Request to Handle Creation of VM"""
'''API Request to Handle Creation of VM'''
@staticmethod
def post():
@ -46,33 +49,33 @@ class CreateVM(Resource):
validator = schemas.CreateVMSchema(data)
if validator.is_valid():
vm_uuid = uuid4().hex
vm_key = join_path(settings["etcd"]["vm_prefix"], vm_uuid)
vm_key = join_path(settings['etcd']['vm_prefix'], vm_uuid)
specs = {
"cpu": validator.specs["cpu"],
"ram": validator.specs["ram"],
"os-ssd": validator.specs["os-ssd"],
"hdd": validator.specs["hdd"],
'cpu': validator.specs['cpu'],
'ram': validator.specs['ram'],
'os-ssd': validator.specs['os-ssd'],
'hdd': validator.specs['hdd'],
}
macs = [generate_mac() for _ in range(len(data["network"]))]
macs = [generate_mac() for _ in range(len(data['network']))]
tap_ids = [
counters.increment_etcd_counter(
shared.etcd_client, "/v1/counter/tap"
shared.etcd_client, '/v1/counter/tap'
)
for _ in range(len(data["network"]))
for _ in range(len(data['network']))
]
vm_entry = {
"name": data["vm_name"],
"owner": data["name"],
"owner_realm": data["realm"],
"specs": specs,
"hostname": "",
"status": VMStatus.stopped,
"image_uuid": validator.image_uuid,
"log": [],
"vnc_socket": "",
"network": list(zip(data["network"], macs, tap_ids)),
"metadata": {"ssh-keys": []},
"in_migration": False,
'name': data['vm_name'],
'owner': data['name'],
'owner_realm': data['realm'],
'specs': specs,
'hostname': '',
'status': VMStatus.stopped,
'image_uuid': validator.image_uuid,
'log': [],
'vnc_socket': '',
'network': list(zip(data['network'], macs, tap_ids)),
'metadata': {'ssh-keys': []},
'in_migration': False,
}
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
@ -80,11 +83,11 @@ class CreateVM(Resource):
r = RequestEntry.from_scratch(
type=RequestType.ScheduleVM,
uuid=vm_uuid,
request_prefix=settings["etcd"]["request_prefix"],
request_prefix=settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return {"message": "VM Creation Queued"}, 200
return {'message': 'VM Creation Queued'}, 200
return validator.get_errors(), 400
@ -95,24 +98,24 @@ class VmStatus(Resource):
validator = schemas.VMStatusSchema(data)
if validator.is_valid():
vm = shared.vm_pool.get(
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
join_path(settings['etcd']['vm_prefix'], data['uuid'])
)
vm_value = vm.value.copy()
vm_value["ip"] = []
vm_value['ip'] = []
for network_mac_and_tap in vm.network:
network_name, mac, tap = network_mac_and_tap
network = shared.etcd_client.get(
join_path(
settings["etcd"]["network_prefix"],
data["name"],
settings['etcd']['network_prefix'],
data['name'],
network_name,
),
value_in_json=True,
)
ipv6_addr = (
network.value.get("ipv6").split("::")[0] + "::"
network.value.get('ipv6').split('::')[0] + '::'
)
vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
vm.value = vm_value
return vm.value
else:
@ -126,26 +129,26 @@ class CreateImage(Resource):
validator = schemas.CreateImageSchema(data)
if validator.is_valid():
file_entry = shared.etcd_client.get(
join_path(settings["etcd"]["file_prefix"], data["uuid"])
join_path(settings['etcd']['file_prefix'], data['uuid'])
)
file_entry_value = json.loads(file_entry.value)
image_entry_json = {
"status": "TO_BE_CREATED",
"owner": file_entry_value["owner"],
"filename": file_entry_value["filename"],
"name": data["name"],
"store_name": data["image_store"],
"visibility": "public",
'status': 'TO_BE_CREATED',
'owner': file_entry_value['owner'],
'filename': file_entry_value['filename'],
'name': data['name'],
'store_name': data['image_store'],
'visibility': 'public',
}
shared.etcd_client.put(
join_path(
settings["etcd"]["image_prefix"], data["uuid"]
settings['etcd']['image_prefix'], data['uuid']
),
json.dumps(image_entry_json),
)
return {"message": "Image queued for creation."}
return {'message': 'Image queued for creation.'}
return validator.get_errors(), 400
@ -153,15 +156,15 @@ class ListPublicImages(Resource):
@staticmethod
def get():
images = shared.etcd_client.get_prefix(
settings["etcd"]["image_prefix"], value_in_json=True
settings['etcd']['image_prefix'], value_in_json=True
)
r = {"images": []}
r = {'images': []}
for image in images:
image_key = "{}:{}".format(
image.value["store_name"], image.value["name"]
image_key = '{}:{}'.format(
image.value['store_name'], image.value['name']
)
r["images"].append(
{"name": image_key, "status": image.value["status"]}
r['images'].append(
{'name': image_key, 'status': image.value['status']}
)
return r, 200
@ -174,14 +177,14 @@ class VMAction(Resource):
if validator.is_valid():
vm_entry = shared.vm_pool.get(
join_path(settings["etcd"]["vm_prefix"], data["uuid"])
join_path(settings['etcd']['vm_prefix'], data['uuid'])
)
action = data["action"]
action = data['action']
if action == "start":
action = "schedule"
if action == 'start':
action = 'schedule'
if action == "delete" and vm_entry.hostname == "":
if action == 'delete' and vm_entry.hostname == '':
if shared.storage_handler.is_vm_image_exists(
vm_entry.uuid
):
@ -190,25 +193,25 @@ class VMAction(Resource):
)
if r_status:
shared.etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"}
return {'message': 'VM successfully deleted'}
else:
logger.error(
"Some Error Occurred while deleting VM"
'Some Error Occurred while deleting VM'
)
return {"message": "VM deletion unsuccessfull"}
return {'message': 'VM deletion unsuccessfull'}
else:
shared.etcd_client.client.delete(vm_entry.key)
return {"message": "VM successfully deleted"}
return {'message': 'VM successfully deleted'}
r = RequestEntry.from_scratch(
type="{}VM".format(action.title()),
uuid=data["uuid"],
type='{}VM'.format(action.title()),
uuid=data['uuid'],
hostname=vm_entry.hostname,
request_prefix=settings["etcd"]["request_prefix"],
request_prefix=settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return (
{"message": "VM {} Queued".format(action.title())},
{'message': 'VM {} Queued'.format(action.title())},
200,
)
else:
@ -222,20 +225,20 @@ class VMMigration(Resource):
validator = schemas.VmMigrationSchema(data)
if validator.is_valid():
vm = shared.vm_pool.get(data["uuid"])
vm = shared.vm_pool.get(data['uuid'])
r = RequestEntry.from_scratch(
type=RequestType.InitVMMigration,
uuid=vm.uuid,
hostname=join_path(
settings["etcd"]["host_prefix"],
settings['etcd']['host_prefix'],
validator.destination.value,
),
request_prefix=settings["etcd"]["request_prefix"],
request_prefix=settings['etcd']['request_prefix'],
)
shared.request_pool.put(r)
return (
{"message": "VM Migration Initialization Queued"},
{'message': 'VM Migration Initialization Queued'},
200,
)
else:
@ -250,26 +253,26 @@ class ListUserVM(Resource):
if validator.is_valid():
vms = shared.etcd_client.get_prefix(
settings["etcd"]["vm_prefix"], value_in_json=True
settings['etcd']['vm_prefix'], value_in_json=True
)
return_vms = []
user_vms = filter(
lambda v: v.value["owner"] == data["name"], vms
lambda v: v.value['owner'] == data['name'], vms
)
for vm in user_vms:
return_vms.append(
{
"name": vm.value["name"],
"vm_uuid": vm.key.split("/")[-1],
"specs": vm.value["specs"],
"status": vm.value["status"],
"hostname": vm.value["hostname"],
"vnc_socket": vm.value.get("vnc_socket", None),
'name': vm.value['name'],
'vm_uuid': vm.key.split('/')[-1],
'specs': vm.value['specs'],
'status': vm.value['status'],
'hostname': vm.value['hostname'],
'vnc_socket': vm.value.get('vnc_socket', None),
}
)
if return_vms:
return {"message": return_vms}, 200
return {"message": "No VM found"}, 404
return {'message': return_vms}, 200
return {'message': 'No VM found'}, 404
else:
return validator.get_errors(), 400
@ -283,22 +286,22 @@ class ListUserFiles(Resource):
if validator.is_valid():
files = shared.etcd_client.get_prefix(
settings["etcd"]["file_prefix"], value_in_json=True
settings['etcd']['file_prefix'], value_in_json=True
)
return_files = []
user_files = list(
filter(
lambda f: f.value["owner"] == data["name"], files
lambda f: f.value['owner'] == data['name'], files
)
)
for file in user_files:
return_files.append(
{
"filename": file.value["filename"],
"uuid": file.key.split("/")[-1],
'filename': file.value['filename'],
'uuid': file.key.split('/')[-1],
}
)
return {"message": return_files}, 200
return {'message': return_files}, 200
else:
return validator.get_errors(), 400
@ -310,19 +313,19 @@ class CreateHost(Resource):
validator = schemas.CreateHostSchema(data)
if validator.is_valid():
host_key = join_path(
settings["etcd"]["host_prefix"], uuid4().hex
settings['etcd']['host_prefix'], uuid4().hex
)
host_entry = {
"specs": data["specs"],
"hostname": data["hostname"],
"status": "DEAD",
"last_heartbeat": "",
'specs': data['specs'],
'hostname': data['hostname'],
'status': 'DEAD',
'last_heartbeat': '',
}
shared.etcd_client.put(
host_key, host_entry, value_in_json=True
)
return {"message": "Host Created"}, 200
return {'message': 'Host Created'}, 200
return validator.get_errors(), 400
@ -333,9 +336,9 @@ class ListHost(Resource):
hosts = shared.host_pool.hosts
r = {
host.key: {
"status": host.status,
"specs": host.specs,
"hostname": host.hostname,
'status': host.status,
'specs': host.specs,
'hostname': host.hostname,
}
for host in hosts
}
@ -352,29 +355,29 @@ class GetSSHKeys(Resource):
# {user_prefix}/{realm}/{name}/key/
etcd_key = join_path(
settings["etcd"]["user_prefix"],
data["realm"],
data["name"],
"key",
settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
)
etcd_entry = shared.etcd_client.get_prefix(
etcd_key, value_in_json=True
)
keys = {
key.key.split("/")[-1]: key.value
key.key.split('/')[-1]: key.value
for key in etcd_entry
}
return {"keys": keys}
return {'keys': keys}
else:
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
settings["etcd"]["user_prefix"],
data["realm"],
data["name"],
"key",
data["key_name"],
settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
@ -382,14 +385,14 @@ class GetSSHKeys(Resource):
if etcd_entry:
return {
"keys": {
etcd_entry.key.split("/")[
'keys': {
etcd_entry.key.split('/')[
-1
]: etcd_entry.value
}
}
else:
return {"keys": {}}
return {'keys': {}}
else:
return validator.get_errors(), 400
@ -403,27 +406,27 @@ class AddSSHKey(Resource):
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
settings["etcd"]["user_prefix"],
data["realm"],
data["name"],
"key",
data["key_name"],
settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
)
if etcd_entry:
return {
"message": "Key with name '{}' already exists".format(
data["key_name"]
'message': 'Key with name "{}" already exists'.format(
data['key_name']
)
}
else:
# Key Not Found. It implies user' haven't added any key yet.
shared.etcd_client.put(
etcd_key, data["key"], value_in_json=True
etcd_key, data['key'], value_in_json=True
)
return {"message": "Key added successfully"}
return {'message': 'Key added successfully'}
else:
return validator.get_errors(), 400
@ -437,22 +440,22 @@ class RemoveSSHKey(Resource):
# {user_prefix}/{realm}/{name}/key/{key_name}
etcd_key = join_path(
settings["etcd"]["user_prefix"],
data["realm"],
data["name"],
"key",
data["key_name"],
settings['etcd']['user_prefix'],
data['realm'],
data['name'],
'key',
data['key_name'],
)
etcd_entry = shared.etcd_client.get(
etcd_key, value_in_json=True
)
if etcd_entry:
shared.etcd_client.client.delete(etcd_key)
return {"message": "Key successfully removed."}
return {'message': 'Key successfully removed.'}
else:
return {
"message": "No Key with name '{}' Exists at all.".format(
data["key_name"]
'message': 'No Key with name "{}" Exists at all.'.format(
data['key_name']
)
}
else:
@ -468,50 +471,50 @@ class CreateNetwork(Resource):
if validator.is_valid():
network_entry = {
"id": counters.increment_etcd_counter(
shared.etcd_client, "/v1/counter/vxlan"
'id': counters.increment_etcd_counter(
shared.etcd_client, '/v1/counter/vxlan'
),
"type": data["type"],
'type': data['type'],
}
if validator.user.value:
try:
nb = pynetbox.api(
url=settings["netbox"]["url"],
token=settings["netbox"]["token"],
url=settings['netbox']['url'],
token=settings['netbox']['token'],
)
nb_prefix = nb.ipam.prefixes.get(
prefix=settings["network"]["prefix"]
prefix=settings['network']['prefix']
)
prefix = nb_prefix.available_prefixes.create(
data={
"prefix_length": int(
settings["network"]["prefix_length"]
'prefix_length': int(
settings['network']['prefix_length']
),
"description": '{}\'s network "{}"'.format(
data["name"], data["network_name"]
'description': '{}\'s network "{}"'.format(
data['name'], data['network_name']
),
"is_pool": True,
'is_pool': True,
}
)
except Exception as err:
app.logger.error(err)
return {
"message": "Error occured while creating network."
'message': 'Error occured while creating network.'
}
else:
network_entry["ipv6"] = prefix["prefix"]
network_entry['ipv6'] = prefix['prefix']
else:
network_entry["ipv6"] = "fd00::/64"
network_entry['ipv6'] = 'fd00::/64'
network_key = join_path(
settings["etcd"]["network_prefix"],
data["name"],
data["network_name"],
settings['etcd']['network_prefix'],
data['name'],
data['network_name'],
)
shared.etcd_client.put(
network_key, network_entry, value_in_json=True
)
return {"message": "Network successfully added."}
return {'message': 'Network successfully added.'}
else:
return validator.get_errors(), 400
@ -524,48 +527,48 @@ class ListUserNetwork(Resource):
if validator.is_valid():
prefix = join_path(
settings["etcd"]["network_prefix"], data["name"]
settings['etcd']['network_prefix'], data['name']
)
networks = shared.etcd_client.get_prefix(
prefix, value_in_json=True
)
user_networks = []
for net in networks:
net.value["name"] = net.key.split("/")[-1]
net.value['name'] = net.key.split('/')[-1]
user_networks.append(net.value)
return {"networks": user_networks}, 200
return {'networks': user_networks}, 200
else:
return validator.get_errors(), 400
api.add_resource(CreateVM, "/vm/create")
api.add_resource(VmStatus, "/vm/status")
api.add_resource(CreateVM, '/vm/create')
api.add_resource(VmStatus, '/vm/status')
api.add_resource(VMAction, "/vm/action")
api.add_resource(VMMigration, "/vm/migrate")
api.add_resource(VMAction, '/vm/action')
api.add_resource(VMMigration, '/vm/migrate')
api.add_resource(CreateImage, "/image/create")
api.add_resource(ListPublicImages, "/image/list-public")
api.add_resource(CreateImage, '/image/create')
api.add_resource(ListPublicImages, '/image/list-public')
api.add_resource(ListUserVM, "/user/vms")
api.add_resource(ListUserFiles, "/user/files")
api.add_resource(ListUserNetwork, "/user/networks")
api.add_resource(ListUserVM, '/user/vms')
api.add_resource(ListUserFiles, '/user/files')
api.add_resource(ListUserNetwork, '/user/networks')
api.add_resource(AddSSHKey, "/user/add-ssh")
api.add_resource(RemoveSSHKey, "/user/remove-ssh")
api.add_resource(GetSSHKeys, "/user/get-ssh")
api.add_resource(AddSSHKey, '/user/add-ssh')
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
api.add_resource(GetSSHKeys, '/user/get-ssh')
api.add_resource(CreateHost, "/host/create")
api.add_resource(ListHost, "/host/list")
api.add_resource(CreateHost, '/host/create')
api.add_resource(ListHost, '/host/list')
api.add_resource(CreateNetwork, "/network/create")
api.add_resource(CreateNetwork, '/network/create')
def main(debug=False, port=None):
try:
image_stores = list(
shared.etcd_client.get_prefix(
settings["etcd"]["image_store_prefix"], value_in_json=True
settings['etcd']['image_store_prefix'], value_in_json=True
)
)
except KeyError:
@ -576,27 +579,27 @@ def main(debug=False, port=None):
#
# if not image_stores:
# data = {
# "is_public": True,
# "type": "ceph",
# "name": "images",
# "description": "first ever public image-store",
# "attributes": {"list": [], "key": [], "pool": "images"},
# 'is_public': True,
# 'type': 'ceph',
# 'name': 'images',
# 'description': 'first ever public image-store',
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
# }
# shared.etcd_client.put(
# join_path(
# settings["etcd"]["image_store_prefix"], uuid4().hex
# settings['etcd']['image_store_prefix'], uuid4().hex
# ),
# json.dumps(data),
# )
try:
app.run(host="::",
app.run(host='::',
port=port,
debug=debug)
except OSError as e:
raise UncloudException("Failed to start Flask: {}".format(e))
raise UncloudException('Failed to start Flask: {}'.format(e))
if __name__ == "__main__":
if __name__ == '__main__':
main()

View file

@ -322,7 +322,7 @@ class CreateVMSchema(OTPSchema):
"Your specified OS-SSD is not in correct units"
)
if _cpu < 1:
if int(_cpu) < 1:
self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1):
@ -528,9 +528,7 @@ class GetSSHSchema(OTPSchema):
class CreateNetwork(OTPSchema):
def __init__(self, data):
self.network_name = Field(
"network_name", str, data.get("network_name", KeyError)
)
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
self.type = Field("type", str, data.get("type", KeyError))
self.user = Field("user", bool, bool(data.get("user", False)))
@ -541,14 +539,10 @@ class CreateNetwork(OTPSchema):
super().__init__(data, fields=fields)
def network_name_validation(self):
network = shared.etcd_client.get(
os.path.join(
settings["etcd"]["network_prefix"],
self.name.value,
self.network_name.value,
),
value_in_json=True,
)
print(self.name.value, self.network_name.value)
key = os.path.join(settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
print(key)
network = shared.etcd_client.get(key, value_in_json=True)
if network:
self.add_error(
"Network with name {} already exists".format(

View file

@ -0,0 +1,13 @@
import argparse
class BaseParser:
def __init__(self, command):
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
self.common_args = {'add_help': False}
methods = [attr for attr in dir(self) if not attr.startswith('__')
and type(getattr(self, attr)).__name__ == 'method']
for method in methods:
getattr(self, method)(**self.common_args)

View file

@ -1,8 +1,43 @@
import os
import argparse
from uncloud.settings import settings
from uncloud.shared import shared
arg_parser = argparse.ArgumentParser('configure', add_help=False)
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
otp_parser = configure_subparsers.add_parser('otp')
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
network_parser = configure_subparsers.add_parser('network')
network_parser.add_argument('--prefix-length', required=True, type=int)
network_parser.add_argument('--prefix', required=True)
network_parser.add_argument('--vxlan-phy-dev', required=True)
netbox_parser = configure_subparsers.add_parser('netbox')
netbox_parser.add_argument('--url', required=True)
netbox_parser.add_argument('--token', required=True)
ssh_parser = configure_subparsers.add_parser('ssh')
ssh_parser.add_argument('--username', default='root')
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
storage_parser = configure_subparsers.add_parser('storage')
storage_parser.add_argument('--file-dir', required=True)
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
filesystem_storage_parser.add_argument('--vm-dir', required=True)
filesystem_storage_parser.add_argument('--image-dir', required=True)
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
def update_config(section, kwargs):
uncloud_config = shared.etcd_client.get(
@ -19,61 +54,9 @@ def update_config(section, kwargs):
)
def configure_parser(parser):
configure_subparsers = parser.add_subparsers(dest="subcommand")
otp_parser = configure_subparsers.add_parser("otp")
otp_parser.add_argument(
"--verification-controller-url", required=True, metavar="URL"
)
otp_parser.add_argument(
"--auth-name", required=True, metavar="OTP-NAME"
)
otp_parser.add_argument(
"--auth-realm", required=True, metavar="OTP-REALM"
)
otp_parser.add_argument(
"--auth-seed", required=True, metavar="OTP-SEED"
)
network_parser = configure_subparsers.add_parser("network")
network_parser.add_argument(
"--prefix-length", required=True, type=int
)
network_parser.add_argument("--prefix", required=True)
network_parser.add_argument("--vxlan-phy-dev", required=True)
netbox_parser = configure_subparsers.add_parser("netbox")
netbox_parser.add_argument("--url", required=True)
netbox_parser.add_argument("--token", required=True)
ssh_parser = configure_subparsers.add_parser("ssh")
ssh_parser.add_argument("--username", default="root")
ssh_parser.add_argument(
"--private-key-path",
default=os.path.expanduser("~/.ssh/id_rsa"),
)
storage_parser = configure_subparsers.add_parser("storage")
storage_parser.add_argument("--file-dir", required=True)
storage_parser_subparsers = storage_parser.add_subparsers(
dest="storage_backend"
)
filesystem_storage_parser = storage_parser_subparsers.add_parser(
"filesystem"
)
filesystem_storage_parser.add_argument("--vm-dir", required=True)
filesystem_storage_parser.add_argument("--image-dir", required=True)
ceph_storage_parser = storage_parser_subparsers.add_parser("ceph")
ceph_storage_parser.add_argument("--ceph-vm-pool", required=True)
ceph_storage_parser.add_argument("--ceph-image-pool", required=True)
def main(**kwargs):
subcommand = kwargs.pop("subcommand")
subcommand = kwargs.pop('subcommand')
if not subcommand:
pass
arg_parser.print_help()
else:
update_config(subcommand, kwargs)

View file

@ -3,6 +3,7 @@ import os
import pathlib
import subprocess as sp
import time
import argparse
from uuid import uuid4
@ -11,6 +12,9 @@ from uncloud.settings import settings
from uncloud.shared import shared
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
def sha512sum(file: str):
"""Use sha512sum utility to compute sha512 sum of arg:file

View file

@ -1,6 +1,7 @@
import argparse
import multiprocessing as mp
import time
from uuid import uuid4
from uncloud.common.request import RequestEntry, RequestType
@ -12,6 +13,9 @@ from os.path import join as join_path
from . import virtualmachine, logger
arg_parser = argparse.ArgumentParser('host', add_help=False)
arg_parser.add_argument('--hostname', required=True)
def update_heartbeat(hostname):
"""Update Last HeartBeat Time for :param hostname: in etcd"""

View file

@ -1,5 +1,6 @@
import json
import os
import argparse
import subprocess as sp
from os.path import join as join_path
@ -8,6 +9,9 @@ from uncloud.shared import shared
from uncloud.imagescanner import logger
arg_parser = argparse.ArgumentParser('imagescanner', add_help=False)
def qemu_img_type(path):
qemu_img_info_command = [
"qemu-img",

View file

@ -1,4 +1,5 @@
import os
import argparse
from flask import Flask, request
from flask_restful import Resource, Api
@ -12,6 +13,9 @@ api = Api(app)
app.logger.handlers.clear()
arg_parser = argparse.ArgumentParser('metadata', add_help=False)
arg_parser.add_argument('--port', '-p', default=80, help='By default bind to port 80')
@app.errorhandler(Exception)
def handle_exception(e):

View file

@ -4,17 +4,16 @@
# 2. Introduce a status endpoint of the scheduler -
# maybe expose a prometheus compatible output
import argparse
from uncloud.common.request import RequestEntry, RequestType
from uncloud.shared import shared
from uncloud.settings import settings
from .helper import (
dead_host_mitigation,
dead_host_detection,
assign_host,
NoSuitableHostFound,
)
from .helper import (dead_host_mitigation, dead_host_detection, assign_host, NoSuitableHostFound)
from . import logger
arg_parser = argparse.ArgumentParser('scheduler', add_help=False)
def main(debug=False):
for request_iterator in [

View file

@ -3,6 +3,8 @@ import logging
import sys
import os
from datetime import datetime
from uncloud.common.etcd_wrapper import Etcd3Wrapper
logger = logging.getLogger(__name__)
@ -29,10 +31,13 @@ class Settings(object):
"UCLOUD_CONF_DIR", os.path.expanduser("~/uncloud/")
)
self.config_file = os.path.join(conf_dir, conf_name)
self.config_parser = CustomConfigParser(allow_no_value=True)
self.config_key = config_key
# this is used to cache config from etcd for 1 minutes. Without this we
# would make a lot of requests to etcd which slows down everything.
self.last_config_update = datetime.fromtimestamp(0)
self.read_internal_values()
try:
self.config_parser.read(self.config_file)
@ -102,25 +107,22 @@ class Settings(object):
def read_values_from_etcd(self):
etcd_client = self.get_etcd_client()
config_from_etcd = etcd_client.get(
self.config_key, value_in_json=True
)
if config_from_etcd:
self.config_parser.read_dict(config_from_etcd.value)
else:
raise KeyError(
"Key '{}' not found in etcd. Please configure uncloud.".format(
self.config_key
)
)
if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
if config_from_etcd:
self.config_parser.read_dict(config_from_etcd.value)
self.last_config_update = datetime.utcnow()
else:
raise KeyError("Key '{}' not found in etcd. Please configure uncloud.".format(self.config_key))
def __getitem__(self, key):
# Allow failing to read from etcd if we have
# it locally
try:
self.read_values_from_etcd()
except KeyError as e:
pass
if key not in self.config_parser.sections():
try:
self.read_values_from_etcd()
except KeyError as e:
pass
return self.config_parser[key]