-
- {% for message in messages %}
- {% if 'vat_error' in message.tags %}
-
- {{ message|safe }}
-
- {% endif %}
- {% endfor %}
-
-
-
-
-
-
-
- {% with cards_len=cards|length %}
-
{%trans "Credit Card"%}
-
-
- {% if cards_len > 0 %}
- {% blocktrans %}Please select one of the cards that you used before or fill in your credit card information below. We are using Stripe for payment and do not store your information in our database.{% endblocktrans %}
- {% else %}
- {% blocktrans %}Please fill in your credit card information below. We are using Stripe for payment and do not store your information in our database.{% endblocktrans %}
- {% endif %}
-
-
- {% for card in cards %}
-
-
-
{% trans "Credit Card" %}
-
{% trans "Last" %} 4: ***** {{card.last4}}
-
{% trans "Type" %}: {{card.brand}}
-
{% trans "Expiry" %}: {{card.month}}/{{card.year}}
- >]
- django_contrib_auth_models_AbstractUser -> django_contrib_auth_models_PermissionsMixin
- [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
- uncloud_auth_models_User -> django_contrib_auth_models_Group
- [label=" groups (user)"] [arrowhead=dot arrowtail=dot, dir=both];
-
- uncloud_auth_models_User -> django_contrib_auth_models_Permission
- [label=" user_permissions (user)"] [arrowhead=dot arrowtail=dot, dir=both];
-
- uncloud_auth_models_User -> django_contrib_auth_models_AbstractUser
- [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
-
- uncloud_pay_models_Product -> uncloud_auth_models_User
- [label=" owner (product)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_pay_models_Product -> uncloud_pay_models_Order
- [label=" order (product)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMProduct -> uncloud_vm_models_VMHost
- [label=" vmhost (vmproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMProduct -> uncloud_pay_models_Product
- [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
- uncloud_vm_models_VMWithOSProduct -> uncloud_vm_models_VMProduct
- [label=" multi-table\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
- uncloud_vm_models_VMDiskImageProduct -> uncloud_auth_models_User
- [label=" owner (vmdiskimageproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMDiskProduct -> uncloud_auth_models_User
- [label=" owner (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMDiskProduct -> uncloud_vm_models_VMProduct
- [label=" vm (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMDiskProduct -> uncloud_vm_models_VMDiskImageProduct
- [label=" image (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMNetworkCard -> uncloud_vm_models_VMProduct
- [label=" vm (vmnetworkcard)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMSnapshotProduct -> uncloud_vm_models_VMProduct
- [label=" vm (vmsnapshotproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_vm_models_VMSnapshotProduct -> uncloud_pay_models_Product
- [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
-
- uncloud_pay_models_Product -> uncloud_auth_models_User
- [label=" owner (product)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- uncloud_pay_models_Product -> uncloud_pay_models_Order
- [label=" order (product)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- ungleich_service_models_MatrixServiceProduct -> uncloud_vm_models_VMProduct
- [label=" vm (matrixserviceproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
-
- ungleich_service_models_MatrixServiceProduct -> uncloud_pay_models_Product
- [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
-
-
- opennebula_models_VM -> uncloud_auth_models_User
- [label=" owner (vm)"] [arrowhead=none, arrowtail=dot, dir=both];
-
-
-}
diff --git a/models.png b/models.png
deleted file mode 100644
index f9d0c2e..0000000
Binary files a/models.png and /dev/null differ
diff --git a/opennebula/__init__.py b/opennebula/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/opennebula/admin.py b/opennebula/admin.py
deleted file mode 100644
index 8c38f3f..0000000
--- a/opennebula/admin.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.contrib import admin
-
-# Register your models here.
diff --git a/opennebula/apps.py b/opennebula/apps.py
deleted file mode 100644
index 0750576..0000000
--- a/opennebula/apps.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from django.apps import AppConfig
-
-
-class OpennebulaConfig(AppConfig):
- name = 'opennebula'
diff --git a/opennebula/management/commands/opennebula-synchosts.py b/opennebula/management/commands/opennebula-synchosts.py
deleted file mode 100644
index 29f9ac1..0000000
--- a/opennebula/management/commands/opennebula-synchosts.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import json
-
-import uncloud.secrets as secrets
-
-from xmlrpc.client import ServerProxy as RPCClient
-
-from django.core.management.base import BaseCommand
-from xmltodict import parse
-from enum import IntEnum
-from opennebula.models import VM as VMModel
-from uncloud_vm.models import VMHost
-from django_auth_ldap.backend import LDAPBackend
-
-
-class HostStates(IntEnum):
- """
- The following flags are copied from
- https://docs.opennebula.org/5.8/integration/system_interfaces/api.html#schemas-for-host
- """
- INIT = 0 # Initial state for enabled hosts
- MONITORING_MONITORED = 1 # Monitoring the host (from monitored)
- MONITORED = 2 # The host has been successfully monitored
- ERROR = 3 # An error ocurrer while monitoring the host
- DISABLED = 4 # The host is disabled
- MONITORING_ERROR = 5 # Monitoring the host (from error)
- MONITORING_INIT = 6 # Monitoring the host (from init)
- MONITORING_DISABLED = 7 # Monitoring the host (from disabled)
- OFFLINE = 8 # The host is totally offline
-
-
-class Command(BaseCommand):
- help = 'Syncronize Host information from OpenNebula'
-
- def add_arguments(self, parser):
- pass
-
- def handle(self, *args, **options):
- with RPCClient(secrets.OPENNEBULA_URL) as rpc_client:
- success, response, *_ = rpc_client.one.hostpool.info(secrets.OPENNEBULA_USER_PASS)
- if success:
- response = json.loads(json.dumps(parse(response)))
- host_pool = response.get('HOST_POOL', {}).get('HOST', {})
- for host in host_pool:
- host_share = host.get('HOST_SHARE', {})
-
- host_name = host.get('NAME')
- state = int(host.get('STATE', HostStates.OFFLINE.value))
-
- if state == HostStates.MONITORED:
- status = 'active'
- elif state == HostStates.DISABLED:
- status = 'disabled'
- else:
- status = 'unusable'
-
- usable_cores = host_share.get('TOTAL_CPU')
- usable_ram_in_kb = int(host_share.get('TOTAL_MEM', 0))
- usable_ram_in_gb = int(usable_ram_in_kb / 2 ** 20)
-
- # vms cannot be created like this -- Nico, 2020-03-17
- # vms = host.get('VMS', {}) or {}
- # vms = vms.get('ID', []) or []
- # vms = ','.join(vms)
-
- VMHost.objects.update_or_create(
- hostname=host_name,
- defaults={
- 'usable_cores': usable_cores,
- 'usable_ram_in_gb': usable_ram_in_gb,
- 'status': status
- }
- )
- else:
- print(response)
diff --git a/opennebula/management/commands/opennebula-syncvms.py b/opennebula/management/commands/opennebula-syncvms.py
deleted file mode 100644
index 3c12fa9..0000000
--- a/opennebula/management/commands/opennebula-syncvms.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import json
-
-from xmlrpc.client import ServerProxy as RPCClient
-from django_auth_ldap.backend import LDAPBackend
-from django.core.management.base import BaseCommand
-from django.conf import settings
-from xmltodict import parse
-
-from opennebula.models import VM as VMModel
-
-
-class Command(BaseCommand):
- help = 'Syncronize VM information from OpenNebula'
-
- def add_arguments(self, parser):
- pass
-
- def handle(self, *args, **options):
- with RPCClient(settings.OPENNEBULA_URL) as rpc_client:
- success, response, *_ = rpc_client.one.vmpool.infoextended(
- settings.OPENNEBULA_USER_PASS, -2, -1, -1, -1
- )
- if success:
- vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
- unknown_user = set()
-
- backend = LDAPBackend()
-
- for vm in vms:
- vm_id = vm['ID']
- vm_owner = vm['UNAME']
-
- user = backend.populate_user(username=vm_owner)
-
- if not user:
- unknown_user.add(vm_owner)
- else:
- VMModel.objects.update_or_create(
- vmid=vm_id,
- defaults={'data': vm, 'owner': user}
- )
- print('User not found in ldap:', unknown_user)
- else:
- print(response)
diff --git a/opennebula/management/commands/opennebula-to-uncloud.py b/opennebula/management/commands/opennebula-to-uncloud.py
deleted file mode 100644
index 230159a..0000000
--- a/opennebula/management/commands/opennebula-to-uncloud.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import sys
-from datetime import datetime
-
-from django.core.management.base import BaseCommand
-from django.utils import timezone
-from django.contrib.auth import get_user_model
-
-from opennebula.models import VM as VMModel
-from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct
-
-from uncloud_pay.models import Order
-
-import logging
-
-log = logging.getLogger(__name__)
-
-def convert_mac_to_int(mac_address: str):
- # Remove octet connecting characters
- mac_address = mac_address.replace(':', '')
- mac_address = mac_address.replace('.', '')
- mac_address = mac_address.replace('-', '')
- mac_address = mac_address.replace(' ', '')
-
- # Parse the resulting number as hexadecimal
- mac_address = int(mac_address, base=16)
-
- return mac_address
-
-
-def get_vm_price(core, ram, ssd_size, hdd_size, n_of_ipv4, n_of_ipv6):
- total = 3 * core + 4 * ram + (3.5 * ssd_size/10.) + (1.5 * hdd_size/100.) + 8 * n_of_ipv4 + 0 * n_of_ipv6
-
- # TODO: Find some reason about the following magical subtraction.
- total -= 8
-
- return total
-
-
-def create_nics(one_vm, vm_product):
- for nic in one_vm.nics:
- mac_address = convert_mac_to_int(nic.get('MAC'))
- ip_address = nic.get('IP', None) or nic.get('IP6_GLOBAL', None)
-
- VMNetworkCard.objects.update_or_create(
- mac_address=mac_address, vm=vm_product, defaults={'ip_address': ip_address}
- )
-
-
-def sync_disk_and_image(one_vm, vm_product, disk_owner):
- """
- a) Check all opennebula disk if they are in the uncloud VM, if not add
- b) Check all uncloud disks and remove them if they are not in the opennebula VM
- """
-
- vmdisknum = 0
-
- one_disks_extra_data = []
-
- for disk in one_vm.disks:
- vmowner = one_vm.owner
- name = disk.get('image')
- vmdisknum += 1
-
- log.info("Checking disk {} for VM {}".format(name, one_vm))
-
- is_os_image, is_public, status = True, False, 'active'
-
- image_size_in_gb = disk.get('image_size_in_gb')
- disk_size_in_gb = disk.get('size_in_gb')
- storage_class = disk.get('storage_class')
- image_source = disk.get('source')
- image_source_type = disk.get('source_type')
-
- image, _ = VMDiskImageProduct.objects.update_or_create(
- name=name,
- defaults={
- 'owner': disk_owner,
- 'is_os_image': is_os_image,
- 'is_public': is_public,
- 'size_in_gb': image_size_in_gb,
- 'storage_class': storage_class,
- 'image_source': image_source,
- 'image_source_type': image_source_type,
- 'status': status
- }
- )
-
- # identify vmdisk from opennebula - primary mapping key
- extra_data = {
- 'opennebula_vm': one_vm.vmid,
- 'opennebula_size_in_gb': disk_size_in_gb,
- 'opennebula_source': disk.get('opennebula_source'),
- 'opennebula_disk_num': vmdisknum
- }
- # Save for comparing later
- one_disks_extra_data.append(extra_data)
-
- try:
- vm_disk = VMDiskProduct.objects.get(extra_data=extra_data)
- except VMDiskProduct.DoesNotExist:
- vm_disk = VMDiskProduct.objects.create(
- owner=vmowner,
- vm=vm_product,
- image=image,
- size_in_gb=disk_size_in_gb,
- extra_data=extra_data
- )
-
- # Now remove all disks that are not in above extra_data list
- for disk in VMDiskProduct.objects.filter(vm=vm_product):
- extra_data = disk.extra_data
- if not extra_data in one_disks_extra_data:
- log.info("Removing disk {} from VM {}".format(disk, vm_product))
- disk.delete()
-
- disks = [ disk.extra_data for disk in VMDiskProduct.objects.filter(vm=vm_product) ]
- log.info("VM {} has disks: {}".format(vm_product, disks))
-
-class Command(BaseCommand):
- help = 'Migrate Opennebula VM to regular (uncloud) vm'
-
- def add_arguments(self, parser):
- parser.add_argument('--disk-owner', required=True, help="The user who owns the the opennebula disks")
-
- def handle(self, *args, **options):
- log.debug("{} {}".format(args, options))
-
- disk_owner = get_user_model().objects.get(username=options['disk_owner'])
-
- for one_vm in VMModel.objects.all():
-
- if not one_vm.last_host:
- log.warning("No VMHost for VM {} - VM might be on hold - skipping".format(one_vm.vmid))
- continue
-
- try:
- vmhost = VMHost.objects.get(hostname=one_vm.last_host)
- except VMHost.DoesNotExist:
- log.error("VMHost {} does not exist, aborting".format(one_vm.last_host))
- raise
-
- cores = one_vm.cores
- ram_in_gb = one_vm.ram_in_gb
- owner = one_vm.owner
- status = 'active'
-
- ssd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['ssd', 'one'] ])
- hdd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['hdd'] ])
-
- # List of IPv4 addresses and Global IPv6 addresses
- ipv4, ipv6 = one_vm.ips
-
- # TODO: Insert actual/real creation_date, starting_date, ending_date
- # instead of pseudo one we are putting currently
- creation_date = starting_date = datetime.now(tz=timezone.utc)
-
- # Price calculation based on datacenterlight.ch
- one_time_price = 0
- recurring_period = 'per_month'
- recurring_price = get_vm_price(cores, ram_in_gb,
- ssd_size, hdd_size,
- len(ipv4), len(ipv6))
-
- try:
- vm_product = VMProduct.objects.get(extra_data__opennebula_id=one_vm.vmid)
- except VMProduct.DoesNotExist:
- order = Order.objects.create(
- owner=owner,
- creation_date=creation_date,
- starting_date=starting_date
- )
- vm_product = VMProduct(
- extra_data={ 'opennebula_id': one_vm.vmid },
- name=one_vm.uncloud_name,
- order=order
- )
-
- # we don't use update_or_create, as filtering by json AND setting json
- # at the same time does not work
-
- vm_product.vmhost = vmhost
- vm_product.owner = owner
- vm_product.cores = cores
- vm_product.ram_in_gb = ram_in_gb
- vm_product.status = status
-
- vm_product.save()
-
- # Create VMNetworkCards
- create_nics(one_vm, vm_product)
-
- # Create VMDiskImageProduct and VMDiskProduct
- sync_disk_and_image(one_vm, vm_product, disk_owner=disk_owner)
diff --git a/opennebula/migrations/0001_initial.py b/opennebula/migrations/0001_initial.py
deleted file mode 100644
index 9a135c6..0000000
--- a/opennebula/migrations/0001_initial.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Generated by Django 3.1 on 2020-12-13 10:38
-
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- initial = True
-
- dependencies = [
- ]
-
- operations = [
- migrations.CreateModel(
- name='VM',
- fields=[
- ('vmid', models.IntegerField(primary_key=True, serialize=False)),
- ('data', models.JSONField()),
- ],
- ),
- ]
diff --git a/opennebula/migrations/__init__.py b/opennebula/migrations/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/opennebula/models.py b/opennebula/models.py
deleted file mode 100644
index f15b845..0000000
--- a/opennebula/models.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import uuid
-from django.db import models
-from django.contrib.auth import get_user_model
-from uncloud_pay.models import Product
-
-# ungleich specific
-storage_class_mapping = {
- 'one': 'ssd',
- 'ssd': 'ssd',
- 'hdd': 'hdd'
-}
-
-class VM(models.Model):
- vmid = models.IntegerField(primary_key=True)
- data = models.JSONField()
-
- @property
- def uncloud_name(self):
- return "opennebula-{}".format(self.vmid)
-
- @property
- def cores(self):
- return int(self.data['TEMPLATE']['VCPU'])
-
- @property
- def ram_in_gb(self):
- return int(self.data['TEMPLATE']['MEMORY'])/1024
-
- @property
- def disks(self):
- """
- If there is no disk then the key DISK does not exist.
-
- If there is only one disk, we have a dictionary in the database.
-
- If there are multiple disks, we have a list of dictionaries in the database.
- """
-
- disks = []
-
- if 'DISK' in self.data['TEMPLATE']:
- if type(self.data['TEMPLATE']['DISK']) is dict:
- disks = [self.data['TEMPLATE']['DISK']]
- else:
- disks = self.data['TEMPLATE']['DISK']
-
- disks = [
- {
- 'size_in_gb': int(d['SIZE'])/1024,
- 'opennebula_source': d['SOURCE'],
- 'opennebula_name': d['IMAGE'],
- 'image_size_in_gb': int(d['ORIGINAL_SIZE'])/1024,
- 'pool_name': d['POOL_NAME'],
- 'image': d['IMAGE'],
- 'source': d['SOURCE'],
- 'source_type': d['TM_MAD'],
- 'storage_class': storage_class_mapping[d['POOL_NAME']]
-
- }
- for d in disks
- ]
-
- return disks
-
- @property
- def last_host(self):
- return ((self.data.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None)
-
- @property
- def graphics(self):
- return self.data.get('TEMPLATE', {}).get('GRAPHICS', {})
-
- @property
- def nics(self):
- _nics = self.data.get('TEMPLATE', {}).get('NIC', {})
- if isinstance(_nics, dict):
- _nics = [_nics]
- return _nics
-
- @property
- def ips(self):
- ipv4, ipv6 = [], []
- for nic in self.nics:
- ip = nic.get('IP')
- ip6 = nic.get('IP6_GLOBAL')
- if ip:
- ipv4.append(ip)
- if ip6:
- ipv6.append(ip6)
- return ipv4, ipv6
diff --git a/opennebula/serializers.py b/opennebula/serializers.py
deleted file mode 100644
index cd00622..0000000
--- a/opennebula/serializers.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from rest_framework import serializers
-from opennebula.models import VM
-
-
-class OpenNebulaVMSerializer(serializers.HyperlinkedModelSerializer):
- class Meta:
- model = VM
- fields = [ 'vmid', 'owner', 'data',
- 'uncloud_name', 'cores', 'ram_in_gb',
- 'disks', 'nics', 'ips' ]
diff --git a/opennebula/tests.py b/opennebula/tests.py
deleted file mode 100644
index 7ce503c..0000000
--- a/opennebula/tests.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.test import TestCase
-
-# Create your tests here.
diff --git a/opennebula/views.py b/opennebula/views.py
deleted file mode 100644
index 688f0b4..0000000
--- a/opennebula/views.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from rest_framework import viewsets, permissions
-
-#from .models import VM
-# from .serializers import OpenNebulaVMSerializer
-
-# class VMViewSet(viewsets.ModelViewSet):
-# permission_classes = [permissions.IsAuthenticated]
-# serializer_class = OpenNebulaVMSerializer
-
-# def get_queryset(self):
-# if self.request.user.is_superuser:
-# obj = VM.objects.all()
-# else:
-# obj = VM.objects.filter(owner=self.request.user)
-
-# return obj
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 8231fd0..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-# Django basics
-Django==3.2.4
-djangorestframework
-django-auth-ldap
-django-bootstrap-v5
-fontawesome-free
-
-psycopg2
-ldap3
-django-allauth
-xmltodict
-parsedatetime
-# Follow are for creating graph models
-pyparsing
-pydot
-django-extensions
-
-# PDF creating
-django-hardcopy
-
-# schema support
-pyyaml
-uritemplate
-tldextract
-# Payment & VAT
-vat-validator
-stripe
-
-#Jobs
-django-q
-redis
-
-jinja2
-python-gitlab
diff --git a/resources/ci/.lock b/resources/ci/.lock
deleted file mode 100644
index e69de29..0000000
diff --git a/resources/ci/Dockerfile b/resources/ci/Dockerfile
deleted file mode 100644
index 020b66e..0000000
--- a/resources/ci/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM fedora:latest
-
-RUN dnf install -y python3-devel python3-pip python3-coverage libpq-devel openldap-devel gcc chromium
diff --git a/resources/vat-rates.csv b/resources/vat-rates.csv
deleted file mode 100644
index 17bdb99..0000000
--- a/resources/vat-rates.csv
+++ /dev/null
@@ -1,325 +0,0 @@
-start_date,stop_date,territory_codes,currency_code,rate,rate_type,description
-2011-01-04,,AI,XCD,0,standard,Anguilla (British overseas territory) is exempted of VAT.
-1984-01-01,,AT,EUR,0.2,standard,Austria (member state) standard VAT rate.
-1976-01-01,1984-01-01,AT,EUR,0.18,standard,
-1973-01-01,1976-01-01,AT,EUR,0.16,standard,
-1984-01-01,,"AT-6691
-DE-87491",EUR,0.19,standard,Jungholz (Austrian town) special VAT rate.
-1984-01-01,,"AT-6991
-AT-6992
-AT-6993
-DE-87567
-DE-87568
-DE-87569",EUR,0.19,standard,Mittelberg (Austrian town) special VAT rate.
-1996-01-01,,BE,EUR,0.21,standard,Belgium (member state) standard VAT rate.
-1994-01-01,1996-01-01,BE,EUR,0.205,standard,
-1992-04-01,1994-01-01,BE,EUR,0.195,standard,
-1983-01-01,1992-04-01,BE,EUR,0.19,standard,
-1981-07-01,1983-01-01,BE,EUR,0.17,standard,
-1978-07-01,1981-07-01,BE,EUR,0.16,standard,
-1971-07-01,1978-07-01,BE,EUR,0.18,standard,
-1999-01-01,,BG,BGN,0.2,standard,Bulgaria (member state) standard VAT rate.
-1996-07-01,1999-01-01,BG,BGN,0.22,standard,
-1994-04-01,1996-07-01,BG,BGN,0.18,standard,
-2011-01-04,,BM,BMD,0,standard,Bermuda (British overseas territory) is exempted of VAT.
-2014-01-13,,"CY
-GB-BFPO 57
-GB-BFPO 58
-GB-BFPO 59
-UK-BFPO 57
-UK-BFPO 58
-UK-BFPO 59",EUR,0.19,standard,"Cyprus (member state) standard VAT rate.
-Akrotiri and Dhekelia (British overseas territory) is subjected to Cyprus' standard VAT rate."
-2013-01-14,2014-01-13,CY,EUR,0.18,standard,
-2012-03-01,2013-01-14,CY,EUR,0.17,standard,
-2003-01-01,2012-03-01,CY,EUR,0.15,standard,
-2002-07-01,2003-01-01,CY,EUR,0.13,standard,
-2000-07-01,2002-07-01,CY,EUR,0.1,standard,
-1993-10-01,2000-07-01,CY,EUR,0.08,standard,
-1992-07-01,1993-10-01,CY,EUR,0.05,standard,
-2013-01-01,,CZ,CZK,0.21,standard,Czech Republic (member state) standard VAT rate.
-2010-01-01,2013-01-01,CZ,CZK,0.2,standard,
-2004-05-01,2010-01-01,CZ,CZK,0.19,standard,
-1995-01-01,2004-05-01,CZ,CZK,0.22,standard,
-1993-01-01,1995-01-01,CZ,CZK,0.23,standard,
-2007-01-01,,DE,EUR,0.19,standard,Germany (member state) standard VAT rate.
-1998-04-01,2007-01-01,DE,EUR,0.16,standard,
-1993-01-01,1998-04-01,DE,EUR,0.15,standard,
-1983-07-01,1993-01-01,DE,EUR,0.14,standard,
-1979-07-01,1983-07-01,DE,EUR,0.13,standard,
-1978-01-01,1979-07-01,DE,EUR,0.12,standard,
-1968-07-01,1978-01-01,DE,EUR,0.11,standard,
-1968-01-01,1968-07-01,DE,EUR,0.1,standard,
-2007-01-01,,DE-27498,EUR,0,standard,Heligoland (German island) is exempted of VAT.
-2007-01-01,,"DE-78266
-CH-8238",EUR,0,standard,Busingen am Hochrhein (German territory) is exempted of VAT.
-1992-01-01,,DK,DKK,0.25,standard,Denmark (member state) standard VAT rate.
-1980-06-30,1992-01-01,DK,DKK,0.22,standard,
-1978-10-30,1980-06-30,DK,DKK,0.2025,standard,
-1977-10-03,1978-10-30,DK,DKK,0.18,standard,
-1970-06-29,1977-10-03,DK,DKK,0.15,standard,
-1968-04-01,1970-06-29,DK,DKK,0.125,standard,
-1967-07-03,1968-04-01,DK,DKK,0.1,standard,
-2009-07-01,,EE,EUR,0.2,standard,Estonia (member state) standard VAT rate.
-1993-01-01,2009-07-01,EE,EUR,0.18,standard,
-1991-01-01,1993-01-01,EE,EUR,0.1,standard,
-2016-06-01,,"GR
-EL",EUR,0.24,standard,Greece (member state) standard VAT rate.
-2010-07-01,2016-06-01,"GR
-EL",EUR,0.23,standard,
-2010-03-15,2010-07-01,"GR
-EL",EUR,0.21,standard,
-2005-04-01,2010-03-15,"GR
-EL",EUR,0.19,standard,
-1990-04-28,2005-04-01,"GR
-EL",EUR,0.18,standard,
-1988-01-01,1990-04-28,"GR
-EL",EUR,0.16,standard,
-1987-01-01,1988-01-01,"GR
-EL",EUR,0.18,standard,
-2012-09-01,,ES,EUR,0.21,standard,Spain (member state) standard VAT rate.
-2010-07-01,2012-09-01,ES,EUR,0.18,standard,
-1995-01-01,2010-07-01,ES,EUR,0.16,standard,
-1992-08-01,1995-01-01,ES,EUR,0.15,standard,
-1992-01-01,1992-08-01,ES,EUR,0.13,standard,
-1986-01-01,1992-01-01,ES,EUR,0.12,standard,
-2012-09-01,,"ES-CN
-ES-GC
-ES-TF
-IC",EUR,0,standard,Canary Islands (Spanish autonomous community) is exempted of VAT.
-2012-09-01,,"ES-ML
-ES-CE
-EA",EUR,0,standard,Ceuta and Melilla (Spanish autonomous cities) is exempted of VAT.
-2013-01-01,,FI,EUR,0.24,standard,Finland (member state) standard VAT rate.
-2010-07-01,2013-01-01,FI,EUR,0.23,standard,
-1994-06-01,2010-07-01,FI,EUR,0.22,standard,
-2013-01-01,,"FI-01
-AX",EUR,0,standard,Aland Islands (Finish autonomous region) is exempted of VAT.
-2011-01-04,,FK,FKP,0,standard,Falkland Islands (British overseas territory) is exempted of VAT.
-1992-01-01,,FO,DKK,0,standard,Faroe Islands (Danish autonomous country) is exempted of VAT.
-2014-01-01,,"FR
-MC",EUR,0.2,standard,"France (member state) standard VAT rate.
-Monaco (sovereign city-state) is member of the EU VAT area and subjected to France's standard VAT rate."
-2000-04-01,2014-01-01,"FR
-MC",EUR,0.196,standard,
-1995-08-01,2000-04-01,"FR
-MC",EUR,0.206,standard,
-1982-07-01,1995-08-01,"FR
-MC",EUR,0.186,standard,
-1977-01-01,1982-07-01,"FR
-MC",EUR,0.176,standard,
-1973-01-01,1977-01-01,"FR
-MC",EUR,0.2,standard,
-1970-01-01,1973-01-01,"FR
-MC",EUR,0.23,standard,
-1968-12-01,1970-01-01,"FR
-MC",EUR,0.19,standard,
-1968-01-01,1968-12-01,"FR
-MC",EUR,0.1666,standard,
-2014-01-01,,"FR-BL
-BL",EUR,0,standard,Saint Barthelemy (French overseas collectivity) is exempted of VAT.
-2014-01-01,,"FR-GF
-GF",EUR,0,standard,Guiana (French overseas department) is exempted of VAT.
-2014-01-01,,"FR-GP
-GP",EUR,0.085,standard,Guadeloupe (French overseas department) special VAT rate.
-2014-01-01,,"FR-MF
-MF",EUR,0,standard,Saint Martin (French overseas collectivity) is subjected to France's standard VAT rate.
-2014-01-01,,"FR-MQ
-MQ",EUR,0.085,standard,Martinique (French overseas department) special VAT rate.
-2014-01-01,,"FR-NC
-NC",XPF,0,standard,New Caledonia (French special collectivity) is exempted of VAT.
-2014-01-01,,"FR-PF
-PF",XPF,0,standard,French Polynesia (French overseas collectivity) is exempted of VAT.
-2014-01-01,,"FR-PM
-PM",EUR,0,standard,Saint Pierre and Miquelon (French overseas collectivity) is exempted of VAT.
-2014-01-01,,"FR-RE
-RE",EUR,0.085,standard,Reunion (French overseas department) special VAT rate.
-2014-01-01,,"FR-TF
-TF",EUR,0,standard,French Southern and Antarctic Lands (French overseas territory) is exempted of VAT.
-2014-01-01,,"FR-WF
-WF",XPF,0,standard,Wallis and Futuna (French overseas collectivity) is exempted of VAT.
-2014-01-01,,"FR-YT
-YT",EUR,0,standard,Mayotte (French overseas department) is exempted of VAT.
-2011-01-04,,GG,GBP,0,standard,Guernsey (British Crown dependency) is exempted of VAT.
-2011-01-04,,GI,GIP,0,standard,Gibraltar (British overseas territory) is exempted of VAT.
-1992-01-01,,GL,DKK,0,standard,Greenland (Danish autonomous country) is exempted of VAT.
-2010-07-01,2016-06-01,"GR-34007
-EL-34007",EUR,0.16,standard,Skyros (Greek island) special VAT rate.
-2010-07-01,2016-06-01,"GR-37002
-GR-37003
-GR-37005
-EL-37002
-EL-37003
-EL-37005",EUR,0.16,standard,Northern Sporades (Greek islands) special VAT rate.
-2010-07-01,2016-06-01,"GR-64004
-EL-64004",EUR,0.16,standard,Thasos (Greek island) special VAT rate.
-2010-07-01,2016-06-01,"GR-68002
-EL-68002",EUR,0.16,standard,Samothrace (Greek island) special VAT rate.
-2010-07-01,,"GR-69
-EL-69",EUR,0,standard,Mount Athos (Greek self-governed part) is exempted of VAT.
-2010-07-01,2016-06-01,"GR-81
-EL-81",EUR,0.16,standard,Dodecanese (Greek department) special VAT rate.
-2010-07-01,2016-06-01,"GR-82
-EL-82",EUR,0.16,standard,Cyclades (Greek department) special VAT rate.
-2010-07-01,2016-06-01,"GR-83
-EL-83",EUR,0.16,standard,Lesbos (Greek department) special VAT rate.
-2010-07-01,2016-06-01,"GR-84
-EL-84",EUR,0.16,standard,Samos (Greek department) special VAT rate.
-2010-07-01,2016-06-01,"GR-85
-EL-85",EUR,0.16,standard,Chios (Greek department) special VAT rate.
-2011-01-04,,GS,GBP,0,standard,South Georgia and the South Sandwich Islands (British overseas territory) is exempted of VAT.
-2012-03-01,,HR,HRK,0.25,standard,Croatia (member state) standard VAT rate.
-2009-08-01,2012-03-01,HR,HRK,0.23,standard,
-1998-08-01,2009-08-01,HR,HRK,0.22,standard,
-2012-01-01,,HU,HUF,0.27,standard,Hungary (member state) standard VAT rate.
-2009-07-01,2012-01-01,HU,HUF,0.25,standard,
-2006-01-01,2009-07-01,HU,HUF,0.2,standard,
-1988-01-01,2006-01-01,HU,HUF,0.25,standard,
-2012-01-01,,IE,EUR,0.23,standard,Republic of Ireland (member state) standard VAT rate.
-2010-01-01,2012-01-01,IE,EUR,0.21,standard,
-2008-12-01,2010-01-01,IE,EUR,0.215,standard,
-2002-03-01,2008-12-01,IE,EUR,0.21,standard,
-2001-01-01,2002-03-01,IE,EUR,0.2,standard,
-1991-03-01,2001-01-01,IE,EUR,0.21,standard,
-1990-03-01,1991-03-01,IE,EUR,0.23,standard,
-1986-03-01,1990-03-01,IE,EUR,0.25,standard,
-1983-05-01,1986-03-01,IE,EUR,0.23,standard,
-1983-03-01,1983-05-01,IE,EUR,0.35,standard,
-1982-05-01,1983-03-01,IE,EUR,0.3,standard,
-1980-05-01,1982-05-01,IE,EUR,0.25,standard,
-1976-03-01,1980-05-01,IE,EUR,0.2,standard,
-1973-09-03,1976-03-01,IE,EUR,0.195,standard,
-1972-11-01,1973-09-03,IE,EUR,0.1637,standard,
-2011-01-04,,IO,GBP,0,standard,British Indian Ocean Territory (British overseas territory) is exempted of VAT.
-2013-10-01,,IT,EUR,0.22,standard,Italy (member state) standard VAT rate.
-2011-09-17,2013-10-01,IT,EUR,0.21,standard,
-1997-10-01,2011-09-17,IT,EUR,0.2,standard,
-1988-08-01,1997-10-01,IT,EUR,0.19,standard,
-1982-08-05,1988-08-01,IT,EUR,0.18,standard,
-1981-01-01,1982-08-05,IT,EUR,0.15,standard,
-1980-11-01,1981-01-01,IT,EUR,0.14,standard,
-1980-07-03,1980-11-01,IT,EUR,0.15,standard,
-1977-02-08,1980-07-03,IT,EUR,0.14,standard,
-1973-01-01,1977-02-08,IT,EUR,0.12,standard,
-2013-10-01,,"IT-22060
-CH-6911",CHF,0,standard,Campione (Italian town) is exempted of VAT.
-2013-10-01,,IT-23030,EUR,0,standard,Livigno (Italian town) is exempted of VAT.
-2011-01-04,,JE,GBP,0,standard,Jersey (British Crown dependency) is exempted of VAT.
-2011-01-04,,KY,KYD,0,standard,Cayman Islands (British overseas territory) is exempted of VAT.
-2009-09-01,,LT,EUR,0.21,standard,Lithuania (member state) standard VAT rate.
-2009-01-01,2009-09-01,LT,EUR,0.19,standard,
-1994-05-01,2009-01-01,LT,EUR,0.18,standard,
-2015-01-01,,LU,EUR,0.17,standard,Luxembourg (member state) standard VAT rate.
-1992-01-01,2015-01-01,LU,EUR,0.15,standard,
-1983-07-01,1992-01-01,LU,EUR,0.12,standard,
-1971-01-01,1983-07-01,LU,EUR,0.1,standard,
-1970-01-01,1971-01-01,LU,EUR,0.8,standard,
-2012-07-01,,LV,EUR,0.21,standard,Latvia (member state) standard VAT rate.
-2011-01-01,2012-07-01,LV,EUR,0.22,standard,
-2009-01-01,2011-01-01,LV,EUR,0.21,standard,
-1995-05-01,2009-01-01,LV,EUR,0.18,standard,
-2011-01-04,,MS,XCD,0,standard,Montserrat (British overseas territory) is exempted of VAT.
-2004-01-01,,MT,EUR,0.18,standard,Malta (member state) standard VAT rate.
-1995-01-01,2004-01-01,MT,EUR,0.15,standard,
-2012-10-01,,NL,EUR,0.21,standard,Netherlands (member state) standard VAT rate.
-2001-01-01,2012-10-01,NL,EUR,0.19,standard,
-1992-10-01,2001-01-01,NL,EUR,0.175,standard,
-1989-01-01,1992-10-01,NL,EUR,0.185,standard,
-1986-10-01,1989-01-01,NL,EUR,0.2,standard,
-1984-01-01,1986-10-01,NL,EUR,0.19,standard,
-1976-01-01,1984-01-01,NL,EUR,0.18,standard,
-1973-01-01,1976-01-01,NL,EUR,0.16,standard,
-1971-01-01,1973-01-01,NL,EUR,0.14,standard,
-1969-01-01,1971-01-01,NL,EUR,0.12,standard,
-2012-10-01,,"NL-AW
-AW",AWG,0,standard,Aruba (Dutch country) are exempted of VAT.
-2012-10-01,,"NL-CW
-NL-SX
-CW
-SX",ANG,0,standard,Curacao and Sint Maarten (Dutch countries) are exempted of VAT.
-2012-10-01,,"NL-BQ1
-NL-BQ2
-NL-BQ3
-BQ
-BQ-BO
-BQ-SA
-BQ-SE",USD,0,standard,"Bonaire, Saba and Sint Eustatius (Dutch special municipalities) are exempted of VAT."
-2011-01-01,,PL,PLN,0.23,standard,Poland (member state) standard VAT rate.
-1993-01-08,2011-01-01,PL,PLN,0.22,standard,
-2011-01-04,,PN,NZD,0,standard,Pitcairn Islands (British overseas territory) is exempted of VAT.
-2011-01-01,,PT,EUR,0.23,standard,Portugal (member state) standard VAT rate.
-2010-07-01,2011-01-01,PT,EUR,0.21,standard,
-2008-07-01,2010-07-01,PT,EUR,0.2,standard,
-2005-07-01,2008-07-01,PT,EUR,0.21,standard,
-2002-06-05,2005-07-01,PT,EUR,0.19,standard,
-1995-01-01,2002-06-05,PT,EUR,0.17,standard,
-1992-03-24,1995-01-01,PT,EUR,0.16,standard,
-1988-02-01,1992-03-24,PT,EUR,0.17,standard,
-1986-01-01,1988-02-01,PT,EUR,0.16,standard,
-2011-01-01,,PT-20,EUR,0.18,standard,Azores (Portuguese autonomous region) special VAT rate.
-2011-01-01,,PT-30,EUR,0.22,standard,Madeira (Portuguese autonomous region) special VAT rate.
-2017-01-01,,RO,RON,0.19,standard,Romania (member state) standard VAT rate.
-2016-01-01,2017-01-01,RO,RON,0.2,standard,Romania (member state) standard VAT rate.
-2010-07-01,2016-01-01,RO,RON,0.24,standard,
-2000-01-01,2010-07-01,RO,RON,0.19,standard,
-1998-02-01,2000-01-01,RO,RON,0.22,standard,
-1993-07-01,1998-02-01,RO,RON,0.18,standard,
-1990-07-01,,SE,SEK,0.25,standard,Sweden (member state) standard VAT rate.
-1983-01-01,1990-07-01,SE,SEK,0.2346,standard,
-1981-11-16,1983-01-01,SE,SEK,0.2151,standard,
-1980-09-08,1981-11-16,SE,SEK,0.2346,standard,
-1977-06-01,1980-09-08,SE,SEK,0.2063,standard,
-1971-01-01,1977-06-01,SE,SEK,0.1765,standard,
-1969-01-01,1971-01-01,SE,SEK,0.1111,standard,
-2011-01-04,,"AC
-SH
-SH-AC
-SH-HL",SHP,0,standard,Ascension and Saint Helena (British overseas territory) is exempted of VAT.
-2011-01-04,,"TA
-SH-TA",GBP,0,standard,Tristan da Cunha (British oversea territory) is exempted of VAT.
-2013-07-01,,SI,EUR,0.22,standard,Slovenia (member state) standard VAT rate.
-2002-01-01,2013-07-01,SI,EUR,0.2,standard,
-1999-07-01,2002-01-01,SI,EUR,0.19,standard,
-2011-01-01,,SK,EUR,0.2,standard,Slovakia (member state) standard VAT rate.
-2004-01-01,2011-01-01,SK,EUR,0.19,standard,
-2003-01-01,2004-01-01,SK,EUR,0.2,standard,
-1996-01-01,2003-01-01,SK,EUR,0.23,standard,
-1993-08-01,1996-01-01,SK,EUR,0.25,standard,
-1993-01-01,1993-08-01,SK,EUR,0.23,standard,
-2011-01-04,,TC,USD,0,standard,Turks and Caicos Islands (British overseas territory) is exempted of VAT.
-2011-01-04,,"GB
-UK
-IM",GBP,0.2,standard,"United Kingdom (member state) standard VAT rate.
-Isle of Man (British self-governing dependency) is member of the EU VAT area and subjected to UK's standard VAT rate."
-2010-01-01,2011-01-04,"GB
-UK
-IM",GBP,0.175,standard,
-2008-12-01,2010-01-01,"GB
-UK
-IM",GBP,0.15,standard,
-1991-04-01,2008-12-01,"GB
-UK
-IM",GBP,0.175,standard,
-1979-06-18,1991-04-01,"GB
-UK
-IM",GBP,0.15,standard,
-1974-07-29,1979-06-18,"GB
-UK
-IM",GBP,0.08,standard,
-1973-04-01,1974-07-29,"GB
-UK
-IM",GBP,0.1,standard,
-2011-01-04,,VG,USD,0,standard,British Virgin Islands (British overseas territory) is exempted of VAT.
-2014-01-01,,CP,EUR,0,standard,Clipperton Island (French overseas possession) is exempted of VAT.
-2019-11-15,,CH,CHF,0.077,standard,Switzerland standard VAT (added manually)
-2019-11-15,,MC,EUR,0.196,standard,Monaco standard VAT (added manually)
-2019-11-15,,FR,EUR,0.2,standard,France standard VAT (added manually)
-2019-11-15,,GR,EUR,0.24,standard,Greece standard VAT (added manually)
-2019-11-15,,GB,EUR,0.2,standard,UK standard VAT (added manually)
-2019-12-17,,AD,EUR,0.045,standard,Andorra standard VAT (added manually)
-2019-12-17,,TK,EUR,0.18,standard,Turkey standard VAT (added manually)
-2019-12-17,,IS,EUR,0.24,standard,Iceland standard VAT (added manually)
-2019-12-17,,FX,EUR,0.20,standard,France metropolitan standard VAT (added manually)
-2020-01-04,,CY,EUR,0.19,standard,Cyprus standard VAT (added manually)
-2019-01-04,,IL,EUR,0.23,standard,Ireland standard VAT (added manually)
-2019-01-04,,LI,EUR,0.077,standard,Liechtenstein standard VAT (added manually)
diff --git a/scripts/ucloud b/scripts/ucloud
new file mode 100755
index 0000000..7be6b24
--- /dev/null
+++ b/scripts/ucloud
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+import argparse
+import multiprocessing as mp
+import logging
+
+from os.path import join as join_path
+from ucloud.sanity_checks import check
+
+if __name__ == "__main__":
+ arg_parser = argparse.ArgumentParser(prog='ucloud',
+ description='Open Source Cloud Management Software')
+ arg_parser.add_argument('component',
+ choices=['api', 'scheduler', 'host',
+ 'filescanner', 'imagescanner',
+ 'metadata'])
+ arg_parser.add_argument('component_args', nargs='*')
+ args = arg_parser.parse_args()
+
+ logging.basicConfig(
+ level=logging.DEBUG,
+ filename=join_path("/", "etc", "ucloud", "log.txt"),
+ filemode="a",
+ format="%(name)s %(asctime)s: %(levelname)s - %(message)s",
+ datefmt="%d-%b-%y %H:%M:%S",
+ )
+ try:
+ check()
+
+ if args.component == 'api':
+ from ucloud.api.main import main
+
+ main()
+ elif args.component == 'host':
+ from ucloud.host.main import main
+
+ hostname = args.component_args
+ mp.set_start_method('spawn')
+ main(*hostname)
+ elif args.component == 'scheduler':
+ from ucloud.scheduler.main import main
+
+ main()
+ elif args.component == 'filescanner':
+ from ucloud.filescanner.main import main
+
+ main()
+ elif args.component == 'imagescanner':
+ from ucloud.imagescanner.main import main
+
+ main()
+ elif args.component == 'metadata':
+ from ucloud.metadata.main import main
+
+ main()
+
+ except Exception as e:
+ logging.exception(e)
+ print(e)
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..9a35f27
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,35 @@
+from setuptools import setup, find_packages
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+setup(name='ucloud',
+ version='0.0.1',
+ description='All ucloud server components.',
+ url='https://code.ungleich.ch/ucloud/ucloud',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ classifiers=[
+ 'Development Status :: 3 - Alpha',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
+ 'Programming Language :: Python :: 3'
+ ],
+ author='ungleich',
+ author_email='technik@ungleich.ch',
+ packages=find_packages(),
+ install_requires=[
+ 'requests',
+ 'python-decouple',
+ 'flask',
+ 'flask-restful',
+ 'bitmath',
+ 'pyotp',
+ 'sshtunnel',
+ 'sphinx',
+ 'pynetbox',
+ 'sphinx-rtd-theme',
+ 'etcd3_wrapper @ https://code.ungleich.ch/ungleich-public/etcd3_wrapper/repository/master/archive.tar.gz#egg=etcd3_wrapper',
+ 'etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3',
+ ],
+ scripts=['scripts/ucloud'],
+ zip_safe=False)
diff --git a/archive/uncloud_etcd_based/docs/__init__.py b/ucloud/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/docs/__init__.py
rename to ucloud/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/api/README.md b/ucloud/api/README.md
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/api/README.md
rename to ucloud/api/README.md
diff --git a/archive/uncloud_etcd_based/uncloud/api/__init__.py b/ucloud/api/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/api/__init__.py
rename to ucloud/api/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/api/common_fields.py b/ucloud/api/common_fields.py
similarity index 72%
rename from archive/uncloud_etcd_based/uncloud/api/common_fields.py
rename to ucloud/api/common_fields.py
index ba9fb37..e9903ac 100755
--- a/archive/uncloud_etcd_based/uncloud/api/common_fields.py
+++ b/ucloud/api/common_fields.py
@@ -1,6 +1,6 @@
import os
-from uncloud.common.shared import shared
+from ucloud.config import etcd_client, env_vars
class Optional:
@@ -19,16 +19,12 @@ class Field:
def is_valid(self):
if self.value == KeyError:
- self.add_error(
- "'{}' field is a required field".format(self.name)
- )
+ self.add_error("'{}' field is a required field".format(self.name))
else:
if isinstance(self.value, Optional):
pass
elif not isinstance(self.value, self.type):
- self.add_error(
- "Incorrect Type for '{}' field".format(self.name)
- )
+ self.add_error("Incorrect Type for '{}' field".format(self.name))
else:
self.validation()
@@ -52,8 +48,6 @@ class VmUUIDField(Field):
self.validation = self.vm_uuid_validation
def vm_uuid_validation(self):
- r = shared.etcd_client.get(
- os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
- )
+ r = etcd_client.get(os.path.join(env_vars.get('VM_PREFIX'), self.uuid))
if not r:
self.add_error("VM with uuid {} does not exists".format(self.uuid))
diff --git a/ucloud/api/create_image_store.py b/ucloud/api/create_image_store.py
new file mode 100755
index 0000000..17fa63c
--- /dev/null
+++ b/ucloud/api/create_image_store.py
@@ -0,0 +1,16 @@
+import json
+import os
+
+from uuid import uuid4
+
+from ucloud.config import etcd_client, env_vars
+
+data = {
+ "is_public": True,
+ "type": "ceph",
+ "name": "images",
+ "description": "first ever public image-store",
+ "attributes": {"list": [], "key": [], "pool": "images"},
+}
+
+etcd_client.put(os.path.join(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
diff --git a/archive/uncloud_etcd_based/uncloud/api/helper.py b/ucloud/api/helper.py
similarity index 55%
rename from archive/uncloud_etcd_based/uncloud/api/helper.py
rename to ucloud/api/helper.py
index 8ceb3a6..63d2f90 100755
--- a/archive/uncloud_etcd_based/uncloud/api/helper.py
+++ b/ucloud/api/helper.py
@@ -1,51 +1,48 @@
import binascii
import ipaddress
import random
-import logging
+import subprocess as sp
+
import requests
from pyotp import TOTP
-from uncloud.common.shared import shared
-
-logger = logging.getLogger(__name__)
+from ucloud.config import vm_pool, env_vars
def check_otp(name, realm, token):
try:
data = {
- "auth_name": shared.settings["otp"]["auth_name"],
- "auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
- "auth_realm": shared.settings["otp"]["auth_realm"],
+ "auth_name": env_vars.get("AUTH_NAME"),
+ "auth_token": TOTP(env_vars.get("AUTH_SEED")).now(),
+ "auth_realm": env_vars.get("AUTH_REALM"),
"name": name,
"realm": realm,
"token": token,
}
- except binascii.Error as err:
- logger.error(
- "Cannot compute OTP for seed: {}".format(
- shared.settings["otp"]["auth_seed"]
- )
- )
+ except binascii.Error:
return 400
response = requests.post(
- shared.settings["otp"]["verification_controller_url"], json=data
+ "{OTP_SERVER}{OTP_VERIFY_ENDPOINT}".format(
+ OTP_SERVER=env_vars.get("OTP_SERVER", ""),
+ OTP_VERIFY_ENDPOINT=env_vars.get("OTP_VERIFY_ENDPOINT", "verify/"),
+ ),
+ json=data,
)
return response.status_code
def resolve_vm_name(name, owner):
"""Return UUID of Virtual Machine of name == name and owner == owner
-
+
Input: name of vm, owner of vm.
Output: uuid of vm if found otherwise None
"""
result = next(
filter(
- lambda vm: vm.value["owner"] == owner
- and vm.value["name"] == name,
- shared.vm_pool.vms,
+ lambda vm: vm.value["owner"] == owner and vm.value["name"] == name,
+ vm_pool.vms,
),
None,
)
@@ -57,7 +54,7 @@ def resolve_vm_name(name, owner):
def resolve_image_name(name, etcd_client):
"""Return image uuid given its name and its store
-
+
* If the provided name is not in correct format
i.e {store_name}:{image_name} return ValueError
* If no such image found then return KeyError
@@ -73,35 +70,26 @@ def resolve_image_name(name, etcd_client):
"""
Examples, where it would work and where it would raise exception
"images:alpine" --> ["images", "alpine"]
-
+
"images" --> ["images"] it would raise Exception as non enough value to unpack
-
+
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
as too many values to unpack
"""
store_name, image_name = store_name_and_image_name
except Exception:
- raise ValueError(
- "Image name not in correct format i.e {store_name}:{image_name}"
- )
+ raise ValueError("Image name not in correct format i.e {store_name}:{image_name}")
- images = etcd_client.get_prefix(
- shared.settings["etcd"]["image_prefix"], value_in_json=True
- )
+ images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
# Try to find image with name == image_name and store_name == store_name
try:
- image = next(
- filter(
- lambda im: im.value["name"] == image_name
- and im.value["store_name"] == store_name,
- images,
- )
- )
+ image = next(filter(lambda im: im.value['name'] == image_name
+ and im.value['store_name'] == store_name, images))
except StopIteration:
raise KeyError("No image with name {} found.".format(name))
else:
- image_uuid = image.key.split("/")[-1]
+ image_uuid = image.key.split('/')[-1]
return image_uuid
@@ -110,7 +98,7 @@ def random_bytes(num=6):
return [random.randrange(256) for _ in range(num)]
-def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
+def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
mac = random_bytes()
if oui:
if type(oui) == str:
@@ -128,6 +116,36 @@ def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="
return separator.join(byte_fmt % b for b in mac)
+def get_ip_addr(mac_address, device):
+ """Return IP address of a device provided its mac address / link local address
+ and the device with which it is connected.
+
+ For Example, if we call get_ip_addr(mac_address="52:54:00:12:34:56", device="br0")
+ the following two scenarios can happen
+ 1. It would return None if we can't be able to find device whose mac_address is equal
+ to the arg:mac_address or the mentioned arg:device does not exists or the ip address
+ we found is local.
+ 2. It would return ip_address of device whose mac_address is equal to arg:mac_address
+ and is connected/neighbor of arg:device
+ """
+ try:
+ output = sp.check_output(['ip', '-6', 'neigh', 'show', 'dev', device], stderr=sp.PIPE)
+ except sp.CalledProcessError:
+ return None
+ else:
+ result = []
+ output = output.strip().decode("utf-8")
+ output = output.split("\n")
+ for entry in output:
+ entry = entry.split()
+ if entry:
+ ip = ipaddress.ip_address(entry[0])
+ mac = entry[2]
+ if ip.is_global and mac_address == mac:
+ result.append(ip)
+ return result
+
+
def mac2ipv6(mac, prefix):
# only accept MACs separated by a colon
parts = mac.split(":")
@@ -140,9 +158,8 @@ def mac2ipv6(mac, prefix):
# format output
ipv6_parts = [str(0)] * 4
for i in range(0, len(parts), 2):
- ipv6_parts.append("".join(parts[i : i + 2]))
+ ipv6_parts.append("".join(parts[i:i + 2]))
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
prefix = ipaddress.IPv6Address(prefix)
return str(prefix + int(lower_part))
-
diff --git a/ucloud/api/main.py b/ucloud/api/main.py
new file mode 100644
index 0000000..1475fb0
--- /dev/null
+++ b/ucloud/api/main.py
@@ -0,0 +1,517 @@
+import json
+import pynetbox
+
+from uuid import uuid4
+from os.path import join as join_path
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+
+from ucloud.common import counters
+from ucloud.common.vm import VMStatus
+from ucloud.common.request import RequestEntry, RequestType
+from ucloud.config import (etcd_client, request_pool, vm_pool, host_pool, env_vars, image_storage_handler)
+from . import schemas
+from .helper import generate_mac, mac2ipv6
+from . import logger
+
+app = Flask(__name__)
+api = Api(app)
+
+
+class CreateVM(Resource):
+ """API Request to Handle Creation of VM"""
+
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateVMSchema(data)
+ if validator.is_valid():
+ vm_uuid = uuid4().hex
+ vm_key = join_path(env_vars.get("VM_PREFIX"), vm_uuid)
+ specs = {
+ "cpu": validator.specs["cpu"],
+ "ram": validator.specs["ram"],
+ "os-ssd": validator.specs["os-ssd"],
+ "hdd": validator.specs["hdd"],
+ }
+ macs = [generate_mac() for _ in range(len(data["network"]))]
+ tap_ids = [counters.increment_etcd_counter(etcd_client, "/v1/counter/tap")
+ for _ in range(len(data["network"]))]
+ vm_entry = {
+ "name": data["vm_name"],
+ "owner": data["name"],
+ "owner_realm": data["realm"],
+ "specs": specs,
+ "hostname": "",
+ "status": VMStatus.stopped,
+ "image_uuid": validator.image_uuid,
+ "log": [],
+ "vnc_socket": "",
+ "network": list(zip(data["network"], macs, tap_ids)),
+ "metadata": {"ssh-keys": []},
+ }
+ etcd_client.put(vm_key, vm_entry, value_in_json=True)
+
+ # Create ScheduleVM Request
+ r = RequestEntry.from_scratch(
+ type=RequestType.ScheduleVM, uuid=vm_uuid,
+ request_prefix=env_vars.get("REQUEST_PREFIX")
+ )
+ request_pool.put(r)
+
+ return {"message": "VM Creation Queued"}, 200
+ return validator.get_errors(), 400
+
+
+class VmStatus(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.VMStatusSchema(data)
+ if validator.is_valid():
+ vm = vm_pool.get(
+ join_path(env_vars.get("VM_PREFIX"), data["uuid"])
+ )
+ vm_value = vm.value.copy()
+ vm_value["ip"] = []
+ for network_mac_and_tap in vm.network:
+ network_name, mac, tap = network_mac_and_tap
+ network = etcd_client.get(
+ join_path(
+ env_vars.get("NETWORK_PREFIX"),
+ data["name"],
+ network_name,
+ ),
+ value_in_json=True,
+ )
+ ipv6_addr = network.value.get("ipv6").split("::")[0] + "::"
+ vm_value["ip"].append(mac2ipv6(mac, ipv6_addr))
+ vm.value = vm_value
+ return vm.value
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateImage(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateImageSchema(data)
+ if validator.is_valid():
+ file_entry = etcd_client.get(
+ join_path(env_vars.get("FILE_PREFIX"), data["uuid"])
+ )
+ file_entry_value = json.loads(file_entry.value)
+
+ image_entry_json = {
+ "status": "TO_BE_CREATED",
+ "owner": file_entry_value["owner"],
+ "filename": file_entry_value["filename"],
+ "name": data["name"],
+ "store_name": data["image_store"],
+ "visibility": "public",
+ }
+ etcd_client.put(
+ join_path(env_vars.get("IMAGE_PREFIX"), data["uuid"]),
+ json.dumps(image_entry_json),
+ )
+
+ return {"message": "Image queued for creation."}
+ return validator.get_errors(), 400
+
+
+class ListPublicImages(Resource):
+ @staticmethod
+ def get():
+ images = etcd_client.get_prefix(
+ env_vars.get("IMAGE_PREFIX"), value_in_json=True
+ )
+ r = {
+ "images": []
+ }
+ for image in images:
+ image_key = "{}:{}".format(
+ image.value["store_name"], image.value["name"]
+ )
+ r["images"].append(
+ {"name": image_key, "status": image.value["status"]}
+ )
+ return r, 200
+
+
+class VMAction(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmActionSchema(data)
+
+ if validator.is_valid():
+ vm_entry = vm_pool.get(
+ join_path(env_vars.get("VM_PREFIX"), data["uuid"])
+ )
+ action = data["action"]
+
+ if action == "start":
+ action = "schedule"
+
+ if action == "delete" and vm_entry.hostname == "":
+ if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
+ r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
+ if r_status:
+ etcd_client.client.delete(vm_entry.key)
+ return {"message": "VM successfully deleted"}
+ else:
+ logger.error("Some Error Occurred while deleting VM")
+ return {"message": "VM deletion unsuccessfull"}
+ else:
+ etcd_client.client.delete(vm_entry.key)
+ return {"message": "VM successfully deleted"}
+
+ r = RequestEntry.from_scratch(
+ type="{}VM".format(action.title()),
+ uuid=data["uuid"],
+ hostname=vm_entry.hostname,
+ request_prefix=env_vars.get("REQUEST_PREFIX")
+ )
+ request_pool.put(r)
+ return {"message": "VM {} Queued".format(action.title())}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+class VMMigration(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmMigrationSchema(data)
+
+ if validator.is_valid():
+ vm = vm_pool.get(data["uuid"])
+
+ r = RequestEntry.from_scratch(
+ type=RequestType.ScheduleVM,
+ uuid=vm.uuid,
+ destination=join_path(
+ env_vars.get("HOST_PREFIX"), validator.destination.value
+ ),
+ migration=True,
+ request_prefix=env_vars.get("REQUEST_PREFIX")
+ )
+ request_pool.put(r)
+ return {"message": "VM Migration Initialization Queued"}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserVM(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ vms = etcd_client.get_prefix(
+ env_vars.get("VM_PREFIX"), value_in_json=True
+ )
+ return_vms = []
+ user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
+ for vm in user_vms:
+ return_vms.append(
+ {
+ "name": vm.value["name"],
+ "vm_uuid": vm.key.split("/")[-1],
+ "specs": vm.value["specs"],
+ "status": vm.value["status"],
+ "hostname": vm.value["hostname"],
+ # "mac": vm.value["mac"],
+ "vnc_socket": None
+ if vm.value.get("vnc_socket", None) is None
+ else vm.value["vnc_socket"],
+ }
+ )
+ if return_vms:
+ return {"message": return_vms}, 200
+ return {"message": "No VM found"}, 404
+
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserFiles(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ files = etcd_client.get_prefix(
+ env_vars.get("FILE_PREFIX"), value_in_json=True
+ )
+ return_files = []
+ user_files = list(
+ filter(lambda f: f.value["owner"] == data["name"], files)
+ )
+ for file in user_files:
+ return_files.append(
+ {
+ "filename": file.value["filename"],
+ "uuid": file.key.split("/")[-1],
+ }
+ )
+ return {"message": return_files}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateHost(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateHostSchema(data)
+ if validator.is_valid():
+ host_key = join_path(env_vars.get("HOST_PREFIX"), uuid4().hex)
+ host_entry = {
+ "specs": data["specs"],
+ "hostname": data["hostname"],
+ "status": "DEAD",
+ "last_heartbeat": "",
+ }
+ etcd_client.put(host_key, host_entry, value_in_json=True)
+
+ return {"message": "Host Created"}, 200
+
+ return validator.get_errors(), 400
+
+
+class ListHost(Resource):
+ @staticmethod
+ def get():
+ hosts = host_pool.hosts
+ r = {
+ host.key: {
+ "status": host.status,
+ "specs": host.specs,
+ "hostname": host.hostname,
+ }
+ for host in hosts
+ }
+ return r, 200
+
+
+class GetSSHKeys(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.GetSSHSchema(data)
+ if validator.is_valid():
+ if not validator.key_name.value:
+
+ # {user_prefix}/{realm}/{name}/key/
+ etcd_key = join_path(
+ env_vars.get('USER_PREFIX'),
+ data["realm"],
+ data["name"],
+ "key",
+ )
+ etcd_entry = etcd_client.get_prefix(
+ etcd_key, value_in_json=True
+ )
+
+ keys = {
+ key.key.split("/")[-1]: key.value for key in etcd_entry
+ }
+ return {"keys": keys}
+ else:
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ env_vars.get('USER_PREFIX'),
+ data["realm"],
+ data["name"],
+ "key",
+ data["key_name"],
+ )
+ etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
+
+ if etcd_entry:
+ return {
+ "keys": {
+ etcd_entry.key.split("/")[-1]: etcd_entry.value
+ }
+ }
+ else:
+ return {"keys": {}}
+ else:
+ return validator.get_errors(), 400
+
+
+class AddSSHKey(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.AddSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ env_vars.get("USER_PREFIX"),
+ data["realm"],
+ data["name"],
+ "key",
+ data["key_name"],
+ )
+ etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
+ if etcd_entry:
+ return {
+ "message": "Key with name '{}' already exists".format(
+ data["key_name"]
+ )
+ }
+ else:
+ # Key Not Found. It implies user' haven't added any key yet.
+ etcd_client.put(etcd_key, data["key"], value_in_json=True)
+ return {"message": "Key added successfully"}
+ else:
+ return validator.get_errors(), 400
+
+
+class RemoveSSHKey(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.RemoveSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ env_vars.get("USER_PREFIX"),
+ data["realm"],
+ data["name"],
+ "key",
+ data["key_name"],
+ )
+ etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
+ if etcd_entry:
+ etcd_client.client.delete(etcd_key)
+ return {"message": "Key successfully removed."}
+ else:
+ return {
+ "message": "No Key with name '{}' Exists at all.".format(
+ data["key_name"]
+ )
+ }
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateNetwork(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateNetwork(data)
+
+ if validator.is_valid():
+
+ network_entry = {
+ "id": counters.increment_etcd_counter(
+ etcd_client, "/v1/counter/vxlan"
+ ),
+ "type": data["type"],
+ }
+ if validator.user.value:
+ nb = pynetbox.api(
+ url=env_vars.get("NETBOX_URL"),
+ token=env_vars.get("NETBOX_TOKEN"),
+ )
+ nb_prefix = nb.ipam.prefixes.get(
+ prefix=env_vars.get("PREFIX")
+ )
+
+ prefix = nb_prefix.available_prefixes.create(
+ data={
+ "prefix_length": env_vars.get(
+ "PREFIX_LENGTH", cast=int
+ ),
+ "description": '{}\'s network "{}"'.format(
+ data["name"], data["network_name"]
+ ),
+ "is_pool": True,
+ }
+ )
+ network_entry["ipv6"] = prefix["prefix"]
+ else:
+ network_entry["ipv6"] = "fd00::/64"
+
+ network_key = join_path(
+ env_vars.get("NETWORK_PREFIX"),
+ data["name"],
+ data["network_name"],
+ )
+ etcd_client.put(network_key, network_entry, value_in_json=True)
+ return {"message": "Network successfully added."}
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserNetwork(Resource):
+ @staticmethod
+ def get():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ prefix = join_path(
+ env_vars.get("NETWORK_PREFIX"), data["name"]
+ )
+ networks = etcd_client.get_prefix(prefix, value_in_json=True)
+ user_networks = []
+ for net in networks:
+ net.value["name"] = net.key.split("/")[-1]
+ user_networks.append(net.value)
+ return {"networks": user_networks}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+api.add_resource(CreateVM, "/vm/create")
+api.add_resource(VmStatus, "/vm/status")
+
+api.add_resource(VMAction, "/vm/action")
+api.add_resource(VMMigration, "/vm/migrate")
+
+api.add_resource(CreateImage, "/image/create")
+api.add_resource(ListPublicImages, "/image/list-public")
+
+api.add_resource(ListUserVM, "/user/vms")
+api.add_resource(ListUserFiles, "/user/files")
+api.add_resource(ListUserNetwork, "/user/networks")
+
+api.add_resource(AddSSHKey, "/user/add-ssh")
+api.add_resource(RemoveSSHKey, "/user/remove-ssh")
+api.add_resource(GetSSHKeys, "/user/get-ssh")
+
+api.add_resource(CreateHost, "/host/create")
+api.add_resource(ListHost, "/host/list")
+
+api.add_resource(CreateNetwork, "/network/create")
+
+
+def main():
+ image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True))
+ if len(image_stores) == 0:
+ data = {
+ "is_public": True,
+ "type": "ceph",
+ "name": "images",
+ "description": "first ever public image-store",
+ "attributes": {"list": [], "key": [], "pool": "images"},
+ }
+
+ etcd_client.put(join_path(env_vars.get('IMAGE_STORE_PREFIX'), uuid4().hex), json.dumps(data))
+
+ app.run(host="::", debug=True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/api/schemas.py b/ucloud/api/schemas.py
similarity index 58%
rename from archive/uncloud_etcd_based/uncloud/api/schemas.py
rename to ucloud/api/schemas.py
index 87f20c9..c4f60ca 100755
--- a/archive/uncloud_etcd_based/uncloud/api/schemas.py
+++ b/ucloud/api/schemas.py
@@ -1,6 +1,6 @@
"""
This module contain classes thats validates and intercept/modify
-data coming from uncloud-cli (user)
+data coming from ucloud-cli (user)
It was primarily developed as an alternative to argument parser
of Flask_Restful which is going to be deprecated. I also tried
@@ -19,10 +19,10 @@ import os
import bitmath
-from uncloud.common.host import HostStatus
-from uncloud.common.vm import VMStatus
-from uncloud.common.shared import shared
-from . import helper, logger
+from ucloud.common.host import HostStatus
+from ucloud.common.vm import VMStatus
+from ucloud.config import etcd_client, env_vars, vm_pool, host_pool
+from . import helper
from .common_fields import Field, VmUUIDField
from .helper import check_otp, resolve_vm_name
@@ -79,12 +79,7 @@ class OTPSchema(BaseSchema):
super().__init__(data=data, fields=_fields)
def validation(self):
- if (
- check_otp(
- self.name.value, self.realm.value, self.token.value
- )
- != 200
- ):
+ if check_otp(self.name.value, self.realm.value, self.token.value) != 200:
self.add_error("Wrong Credentials")
@@ -96,9 +91,7 @@ class CreateImageSchema(BaseSchema):
# Fields
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
self.name = Field("name", str, data.get("name", KeyError))
- self.image_store = Field(
- "image_store", str, data.get("image_store", KeyError)
- )
+ self.image_store = Field("image_store", str, data.get("image_store", KeyError))
# Validations
self.uuid.validation = self.file_uuid_validation
@@ -109,51 +102,34 @@ class CreateImageSchema(BaseSchema):
super().__init__(data, fields)
def file_uuid_validation(self):
- file_entry = shared.etcd_client.get(
- os.path.join(
- shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
- )
- )
+ file_entry = etcd_client.get(os.path.join(env_vars.get('FILE_PREFIX'), self.uuid.value))
if file_entry is None:
self.add_error(
- "Image File with uuid '{}' Not Found".format(
- self.uuid.value
- )
+ "Image File with uuid '{}' Not Found".format(self.uuid.value)
)
def image_store_name_validation(self):
- image_stores = list(
- shared.etcd_client.get_prefix(
- shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
- )
- )
+ image_stores = list(etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX')))
image_store = next(
filter(
- lambda s: json.loads(s.value)["name"]
- == self.image_store.value,
+ lambda s: json.loads(s.value)["name"] == self.image_store.value,
image_stores,
),
None,
)
if not image_store:
- self.add_error(
- "Store '{}' does not exists".format(
- self.image_store.value
- )
- )
+ self.add_error("Store '{}' does not exists".format(self.image_store.value))
# Host Operations
-
class CreateHostSchema(OTPSchema):
def __init__(self, data):
+ self.parsed_specs = {}
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
- self.hostname = Field(
- "hostname", str, data.get("hostname", KeyError)
- )
+ self.hostname = Field("hostname", str, data.get("hostname", KeyError))
# Validation
self.specs.validation = self.specs_validation
@@ -165,28 +141,22 @@ class CreateHostSchema(OTPSchema):
def specs_validation(self):
ALLOWED_BASE = 10
- _cpu = self.specs.value.get("cpu", KeyError)
- _ram = self.specs.value.get("ram", KeyError)
- _os_ssd = self.specs.value.get("os-ssd", KeyError)
- _hdd = self.specs.value.get("hdd", KeyError)
+ _cpu = self.specs.value.get('cpu', KeyError)
+ _ram = self.specs.value.get('ram', KeyError)
+ _os_ssd = self.specs.value.get('os-ssd', KeyError)
+ _hdd = self.specs.value.get('hdd', KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
- self.add_error(
- "You must specify CPU, RAM and OS-SSD in your specs"
- )
+ self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
- self.add_error(
- "Your specified RAM is not in correct units"
- )
+ self.add_error("Your specified RAM is not in correct units")
if parsed_os_ssd.base != ALLOWED_BASE:
- self.add_error(
- "Your specified OS-SSD is not in correct units"
- )
+ self.add_error("Your specified OS-SSD is not in correct units")
if _cpu < 1:
self.add_error("CPU must be atleast 1")
@@ -201,9 +171,7 @@ class CreateHostSchema(OTPSchema):
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
- self.add_error(
- "Your specified HDD is not in correct units"
- )
+ self.add_error("Your specified HDD is not in correct units")
break
else:
parsed_hdd.append(str(_parsed_hdd))
@@ -214,17 +182,15 @@ class CreateHostSchema(OTPSchema):
else:
if self.get_errors():
self.specs = {
- "cpu": _cpu,
- "ram": str(parsed_ram),
- "os-ssd": str(parsed_os_ssd),
- "hdd": parsed_hdd,
+ 'cpu': _cpu,
+ 'ram': str(parsed_ram),
+ 'os-ssd': str(parsed_os_ssd),
+ 'hdd': parsed_hdd
}
def validation(self):
if self.realm.value != "ungleich-admin":
- self.add_error(
- "Invalid Credentials/Insufficient Permission"
- )
+ self.add_error("Invalid Credentials/Insufficient Permission")
# VM Operations
@@ -232,15 +198,13 @@ class CreateHostSchema(OTPSchema):
class CreateVMSchema(OTPSchema):
def __init__(self, data):
+ self.parsed_specs = {}
+
# Fields
self.specs = Field("specs", dict, data.get("specs", KeyError))
- self.vm_name = Field(
- "vm_name", str, data.get("vm_name", KeyError)
- )
+ self.vm_name = Field("vm_name", str, data.get("vm_name", KeyError))
self.image = Field("image", str, data.get("image", KeyError))
- self.network = Field(
- "network", list, data.get("network", KeyError)
- )
+ self.network = Field("network", list, data.get("network", KeyError))
# Validation
self.image.validation = self.image_validation
@@ -254,25 +218,16 @@ class CreateVMSchema(OTPSchema):
def image_validation(self):
try:
- image_uuid = helper.resolve_image_name(
- self.image.value, shared.etcd_client
- )
+ image_uuid = helper.resolve_image_name(self.image.value, etcd_client)
except Exception as e:
- logger.exception(
- "Cannot resolve image name = %s", self.image.value
- )
self.add_error(str(e))
else:
self.image_uuid = image_uuid
def vm_name_validation(self):
- if resolve_vm_name(
- name=self.vm_name.value, owner=self.name.value
- ):
+ if resolve_vm_name(name=self.vm_name.value, owner=self.name.value):
self.add_error(
- 'VM with same name "{}" already exists'.format(
- self.vm_name.value
- )
+ 'VM with same name "{}" already exists'.format(self.vm_name.value)
)
def network_validation(self):
@@ -280,48 +235,34 @@ class CreateVMSchema(OTPSchema):
if _network:
for net in _network:
- network = shared.etcd_client.get(
- os.path.join(
- shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
- self.name.value,
- net,
- ),
- value_in_json=True,
- )
+ network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
+ self.name.value,
+ net), value_in_json=True)
if not network:
- self.add_error(
- "Network with name {} does not exists".format(
- net
- )
- )
+ self.add_error("Network with name {} does not exists" \
+ .format(net))
def specs_validation(self):
ALLOWED_BASE = 10
- _cpu = self.specs.value.get("cpu", KeyError)
- _ram = self.specs.value.get("ram", KeyError)
- _os_ssd = self.specs.value.get("os-ssd", KeyError)
- _hdd = self.specs.value.get("hdd", KeyError)
+ _cpu = self.specs.value.get('cpu', KeyError)
+ _ram = self.specs.value.get('ram', KeyError)
+ _os_ssd = self.specs.value.get('os-ssd', KeyError)
+ _hdd = self.specs.value.get('hdd', KeyError)
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
- self.add_error(
- "You must specify CPU, RAM and OS-SSD in your specs"
- )
+ self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
return None
try:
parsed_ram = bitmath.parse_string_unsafe(_ram)
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
if parsed_ram.base != ALLOWED_BASE:
- self.add_error(
- "Your specified RAM is not in correct units"
- )
+ self.add_error("Your specified RAM is not in correct units")
if parsed_os_ssd.base != ALLOWED_BASE:
- self.add_error(
- "Your specified OS-SSD is not in correct units"
- )
+ self.add_error("Your specified OS-SSD is not in correct units")
- if int(_cpu) < 1:
+ if _cpu < 1:
self.add_error("CPU must be atleast 1")
if parsed_ram < bitmath.GB(1):
@@ -334,9 +275,7 @@ class CreateVMSchema(OTPSchema):
for hdd in _hdd:
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
if _parsed_hdd.base != ALLOWED_BASE:
- self.add_error(
- "Your specified HDD is not in correct units"
- )
+ self.add_error("Your specified HDD is not in correct units")
break
else:
parsed_hdd.append(str(_parsed_hdd))
@@ -347,24 +286,21 @@ class CreateVMSchema(OTPSchema):
else:
if self.get_errors():
self.specs = {
- "cpu": _cpu,
- "ram": str(parsed_ram),
- "os-ssd": str(parsed_os_ssd),
- "hdd": parsed_hdd,
+ 'cpu': _cpu,
+ 'ram': str(parsed_ram),
+ 'os-ssd': str(parsed_os_ssd),
+ 'hdd': parsed_hdd
}
class VMStatusSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(
- data.get("in_support_of", None)
- or data.get("name", None)
- ),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(data.get("in_support_of", None) or data.get("name", None)),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
@@ -373,10 +309,9 @@ class VMStatusSchema(OTPSchema):
super().__init__(data, fields)
def validation(self):
- vm = shared.vm_pool.get(self.uuid.value)
+ vm = vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value
- or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
@@ -384,14 +319,11 @@ class VMStatusSchema(OTPSchema):
class VmActionSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(
- data.get("in_support_of", None)
- or data.get("name", None)
- ),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(data.get("in_support_of", None) or data.get("name", None)),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
self.action = Field("action", str, data.get("action", KeyError))
@@ -406,23 +338,20 @@ class VmActionSchema(OTPSchema):
allowed_actions = ["start", "stop", "delete"]
if self.action.value not in allowed_actions:
self.add_error(
- "Invalid Action. Allowed Actions are {}".format(
- allowed_actions
- )
+ "Invalid Action. Allowed Actions are {}".format(allowed_actions)
)
def validation(self):
- vm = shared.vm_pool.get(self.uuid.value)
+ vm = vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value
- or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if (
- self.action.value == "start"
- and vm.status == VMStatus.running
- and vm.hostname != ""
+ self.action.value == "start"
+ and vm.status == VMStatus.running
+ and vm.hostname != ""
):
self.add_error("VM Already Running")
@@ -436,20 +365,15 @@ class VmActionSchema(OTPSchema):
class VmMigrationSchema(OTPSchema):
def __init__(self, data):
data["uuid"] = (
- resolve_vm_name(
- name=data.get("vm_name", None),
- owner=(
- data.get("in_support_of", None)
- or data.get("name", None)
- ),
- )
- or KeyError
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(data.get("in_support_of", None) or data.get("name", None)),
+ )
+ or KeyError
)
self.uuid = VmUUIDField(data)
- self.destination = Field(
- "destination", str, data.get("destination", KeyError)
- )
+ self.destination = Field("destination", str, data.get("destination", KeyError))
self.destination.validation = self.destination_validation
@@ -458,47 +382,31 @@ class VmMigrationSchema(OTPSchema):
def destination_validation(self):
hostname = self.destination.value
- host = next(
- filter(
- lambda h: h.hostname == hostname, shared.host_pool.hosts
- ),
- None,
- )
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
if not host:
- self.add_error(
- "No Such Host ({}) exists".format(
- self.destination.value
- )
- )
+ self.add_error("No Such Host ({}) exists".format(self.destination.value))
elif host.status != HostStatus.alive:
self.add_error("Destination Host is dead")
else:
self.destination.value = host.key
def validation(self):
- vm = shared.vm_pool.get(self.uuid.value)
+ vm = vm_pool.get(self.uuid.value)
if not (
- vm.value["owner"] == self.name.value
- or self.realm.value == "ungleich-admin"
+ vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
):
self.add_error("Invalid User")
if vm.status != VMStatus.running:
self.add_error("Can't migrate non-running VM")
- if vm.hostname == os.path.join(
- shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
- ):
- self.add_error(
- "Destination host couldn't be same as Source Host"
- )
+ if vm.hostname == os.path.join(env_vars.get('HOST_PREFIX'), self.destination.value):
+ self.add_error("Destination host couldn't be same as Source Host")
class AddSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field(
- "key_name", str, data.get("key_name", KeyError)
- )
+ self.key_name = Field("key_name", str, data.get("key_name", KeyError))
self.key = Field("key", str, data.get("key_name", KeyError))
fields = [self.key_name, self.key]
@@ -507,9 +415,7 @@ class AddSSHSchema(OTPSchema):
class RemoveSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field(
- "key_name", str, data.get("key_name", KeyError)
- )
+ self.key_name = Field("key_name", str, data.get("key_name", KeyError))
fields = [self.key_name]
super().__init__(data=data, fields=fields)
@@ -517,9 +423,7 @@ class RemoveSSHSchema(OTPSchema):
class GetSSHSchema(OTPSchema):
def __init__(self, data):
- self.key_name = Field(
- "key_name", str, data.get("key_name", None)
- )
+ self.key_name = Field("key_name", str, data.get("key_name", None))
fields = [self.key_name]
super().__init__(data=data, fields=fields)
@@ -538,20 +442,15 @@ class CreateNetwork(OTPSchema):
super().__init__(data, fields=fields)
def network_name_validation(self):
- key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
- network = shared.etcd_client.get(key, value_in_json=True)
+ network = etcd_client.get(os.path.join(env_vars.get('NETWORK_PREFIX'),
+ self.name.value,
+ self.network_name.value),
+ value_in_json=True)
if network:
- self.add_error(
- "Network with name {} already exists".format(
- self.network_name.value
- )
- )
+ self.add_error("Network with name {} already exists" \
+ .format(self.network_name.value))
def network_type_validation(self):
supported_network_types = ["vxlan"]
if self.type.value not in supported_network_types:
- self.add_error(
- "Unsupported Network Type. Supported network types are {}".format(
- supported_network_types
- )
- )
+ self.add_error("Unsupported Network Type. Supported network types are {}".format(supported_network_types))
diff --git a/archive/uncloud_etcd_based/uncloud/common/__init__.py b/ucloud/common/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/common/__init__.py
rename to ucloud/common/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/common/classes.py b/ucloud/common/classes.py
similarity index 93%
rename from archive/uncloud_etcd_based/uncloud/common/classes.py
rename to ucloud/common/classes.py
index 29dffd4..2eae809 100644
--- a/archive/uncloud_etcd_based/uncloud/common/classes.py
+++ b/ucloud/common/classes.py
@@ -1,4 +1,4 @@
-from .etcd_wrapper import EtcdEntry
+from etcd3_wrapper import EtcdEntry
class SpecificEtcdEntryBase:
diff --git a/archive/uncloud_etcd_based/uncloud/common/counters.py b/ucloud/common/counters.py
similarity index 91%
rename from archive/uncloud_etcd_based/uncloud/common/counters.py
rename to ucloud/common/counters.py
index 2d4a8e9..066a870 100644
--- a/archive/uncloud_etcd_based/uncloud/common/counters.py
+++ b/ucloud/common/counters.py
@@ -1,4 +1,4 @@
-from .etcd_wrapper import Etcd3Wrapper
+from etcd3_wrapper import Etcd3Wrapper
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
diff --git a/ucloud/common/helpers.py b/ucloud/common/helpers.py
new file mode 100644
index 0000000..1bdf0b4
--- /dev/null
+++ b/ucloud/common/helpers.py
@@ -0,0 +1,54 @@
+import logging
+import socket
+import requests
+import json
+
+from ipaddress import ip_address
+
+from os.path import join as join_path
+
+
+def create_package_loggers(packages, base_path, mode="a"):
+ loggers = {}
+ for pkg in packages:
+ logger = logging.getLogger(pkg)
+ logger_handler = logging.FileHandler(
+ join_path(base_path, "{}.txt".format(pkg)),
+ mode=mode
+ )
+ logger.setLevel(logging.DEBUG)
+ logger_handler.setFormatter(logging.Formatter(fmt="%(asctime)s: %(levelname)s - %(message)s",
+ datefmt="%d-%b-%y %H:%M:%S"))
+ logger.addHandler(logger_handler)
+ loggers[pkg] = logger
+
+
+# TODO: Should be removed as soon as migration
+# mechanism is finalized inside ucloud
+def get_ipv4_address():
+ # If host is connected to internet
+ # Return IPv4 address of machine
+ # Otherwise, return 127.0.0.1
+ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
+ try:
+ s.connect(("8.8.8.8", 80))
+ except socket.timeout:
+ address = "127.0.0.1"
+ except Exception as e:
+ logging.getLogger().exception(e)
+ address = "127.0.0.1"
+ else:
+ address = s.getsockname()[0]
+
+ return address
+
+
+def get_ipv6_address():
+ try:
+ r = requests.get("https://api6.ipify.org?format=json")
+ content = json.loads(r.content.decode("utf-8"))
+ ip = ip_address(content["ip"]).exploded
+ except Exception as e:
+ logging.exception(e)
+ else:
+ return ip
diff --git a/archive/uncloud_etcd_based/uncloud/common/host.py b/ucloud/common/host.py
similarity index 85%
rename from archive/uncloud_etcd_based/uncloud/common/host.py
rename to ucloud/common/host.py
index f7bb7d5..ccbf7a8 100644
--- a/archive/uncloud_etcd_based/uncloud/common/host.py
+++ b/ucloud/common/host.py
@@ -7,7 +7,7 @@ from .classes import SpecificEtcdEntryBase
class HostStatus:
- """Possible Statuses of uncloud host."""
+ """Possible Statuses of ucloud host."""
alive = "ALIVE"
dead = "DEAD"
@@ -26,13 +26,11 @@ class HostEntry(SpecificEtcdEntryBase):
def update_heartbeat(self):
self.status = HostStatus.alive
- self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+ self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
def is_alive(self):
- last_heartbeat = datetime.strptime(
- self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
- )
- delta = datetime.utcnow() - last_heartbeat
+ last_heartbeat = datetime.strptime(self.last_heartbeat, "%Y-%m-%d %H:%M:%S")
+ delta = datetime.now() - last_heartbeat
if delta.total_seconds() > 60:
return False
return True
diff --git a/archive/uncloud_etcd_based/uncloud/common/request.py b/ucloud/common/request.py
similarity index 74%
rename from archive/uncloud_etcd_based/uncloud/common/request.py
rename to ucloud/common/request.py
index cb0add5..cadac80 100644
--- a/archive/uncloud_etcd_based/uncloud/common/request.py
+++ b/ucloud/common/request.py
@@ -2,8 +2,9 @@ import json
from os.path import join
from uuid import uuid4
-from uncloud.common.etcd_wrapper import EtcdEntry
-from uncloud.common.classes import SpecificEtcdEntryBase
+from etcd3_wrapper.etcd3_wrapper import PsuedoEtcdEntry
+
+from .classes import SpecificEtcdEntryBase
class RequestType:
@@ -17,9 +18,8 @@ class RequestType:
class RequestEntry(SpecificEtcdEntryBase):
+
def __init__(self, e):
- self.destination_sock_path = None
- self.destination_host_key = None
self.type = None # type: str
self.migration = None # type: bool
self.destination = None # type: str
@@ -29,8 +29,8 @@ class RequestEntry(SpecificEtcdEntryBase):
@classmethod
def from_scratch(cls, request_prefix, **kwargs):
- e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
- value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
+ e = PsuedoEtcdEntry(join(request_prefix, uuid4().hex),
+ value=json.dumps(kwargs).encode("utf-8"), value_in_json=True)
return cls(e)
diff --git a/archive/uncloud_etcd_based/uncloud/common/storage_handlers.py b/ucloud/common/storage_handlers.py
similarity index 63%
rename from archive/uncloud_etcd_based/uncloud/common/storage_handlers.py
rename to ucloud/common/storage_handlers.py
index 58c2dc2..8b1097a 100644
--- a/archive/uncloud_etcd_based/uncloud/common/storage_handlers.py
+++ b/ucloud/common/storage_handlers.py
@@ -6,20 +6,17 @@ import stat
from abc import ABC
from . import logger
from os.path import join as join_path
-import uncloud.common.shared as shared
class ImageStorageHandler(ABC):
- handler_name = "base"
-
def __init__(self, image_base, vm_base):
self.image_base = image_base
self.vm_base = vm_base
def import_image(self, image_src, image_dest, protect=False):
"""Put an image at the destination
- :param image_src: An Image file
- :param image_dest: A path where :param src: is to be put.
+ :param src: An Image file
+ :param dest: A path where :param src: is to be put.
:param protect: If protect is true then the dest is protect (readonly etc)
The obj must exist on filesystem.
"""
@@ -29,8 +26,8 @@ class ImageStorageHandler(ABC):
def make_vm_image(self, image_path, path):
"""Copy image from src to dest
- :param image_path: A path
- :param path: A path
+ :param src: A path
+ :param dest: A path
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
"""
@@ -46,17 +43,14 @@ class ImageStorageHandler(ABC):
def delete_vm_image(self, path):
raise NotImplementedError()
- def execute_command(self, command, report=True, error_origin=None):
- if not error_origin:
- error_origin = self.handler_name
-
+ def execute_command(self, command, report=True):
command = list(map(str, command))
try:
- sp.check_output(command, stderr=sp.PIPE)
- except sp.CalledProcessError as e:
- _stderr = e.stderr.decode("utf-8").strip()
+ output = sp.check_output(command, stderr=sp.PIPE)
+ except Exception as e:
if report:
- logger.exception("%s:- %s", error_origin, _stderr)
+ print(e)
+ logger.exception(e)
return False
return True
@@ -71,16 +65,12 @@ class ImageStorageHandler(ABC):
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
- handler_name = "Filesystem"
-
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
try:
shutil.copy(src, dest)
if protect:
- os.chmod(
- dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
- )
+ os.chmod(dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
except Exception as e:
logger.exception(e)
return False
@@ -90,7 +80,7 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
src = join_path(self.image_base, src)
dest = join_path(self.vm_base, dest)
try:
- shutil.copyfile(src, dest)
+ shutil.copy(src, dest)
except Exception as e:
logger.exception(e)
return False
@@ -98,14 +88,7 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
def resize_vm_image(self, path, size):
path = join_path(self.vm_base, path)
- command = [
- "qemu-img",
- "resize",
- "-f",
- "raw",
- path,
- "{}M".format(size),
- ]
+ command = ["qemu-img", "resize", "-f", "raw", path, "{}M".format(size)]
if self.execute_command(command):
return True
else:
@@ -134,33 +117,17 @@ class FileSystemBasedImageStorageHandler(ImageStorageHandler):
class CEPHBasedImageStorageHandler(ImageStorageHandler):
- handler_name = "Ceph"
-
def import_image(self, src, dest, protect=False):
dest = join_path(self.image_base, dest)
- import_command = ["rbd", "import", src, dest]
- commands = [import_command]
+ command = ["rbd", "import", src, dest]
if protect:
- snap_create_command = [
- "rbd",
- "snap",
- "create",
- "{}@protected".format(dest),
- ]
- snap_protect_command = [
- "rbd",
- "snap",
- "protect",
- "{}@protected".format(dest),
- ]
- commands.append(snap_create_command)
- commands.append(snap_protect_command)
+ snap_create_command = ["rbd", "snap", "create", "{}@protected".format(dest)]
+ snap_protect_command = ["rbd", "snap", "protect", "{}@protected".format(dest)]
- result = True
- for command in commands:
- result = result and self.execute_command(command)
+ return self.execute_command(command) and self.execute_command(snap_create_command) and\
+ self.execute_command(snap_protect_command)
- return result
+ return self.execute_command(command)
def make_vm_image(self, src, dest):
src = join_path(self.image_base, src)
@@ -189,19 +156,3 @@ class CEPHBasedImageStorageHandler(ImageStorageHandler):
path = join_path(self.vm_base, path)
command = ["rbd", "info", path]
return self.execute_command(command, report=False)
-
-
-def get_storage_handler():
- __storage_backend = shared.shared.settings["storage"]["storage_backend"]
- if __storage_backend == "filesystem":
- return FileSystemBasedImageStorageHandler(
- vm_base=shared.shared.settings["storage"]["vm_dir"],
- image_base=shared.shared.settings["storage"]["image_dir"],
- )
- elif __storage_backend == "ceph":
- return CEPHBasedImageStorageHandler(
- vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
- image_base=shared.shared.settings["storage"]["ceph_image_pool"],
- )
- else:
- raise Exception("Unknown Image Storage Handler")
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/common/vm.py b/ucloud/common/vm.py
similarity index 92%
rename from archive/uncloud_etcd_based/uncloud/common/vm.py
rename to ucloud/common/vm.py
index d11046d..0fb5cea 100644
--- a/archive/uncloud_etcd_based/uncloud/common/vm.py
+++ b/ucloud/common/vm.py
@@ -12,13 +12,8 @@ class VMStatus:
error = "ERROR" # An error occurred that cannot be resolved automatically
-def declare_stopped(vm):
- vm["hostname"] = ""
- vm["in_migration"] = False
- vm["status"] = VMStatus.stopped
-
-
class VMEntry(SpecificEtcdEntryBase):
+
def __init__(self, e):
self.owner = None # type: str
self.specs = None # type: dict
@@ -47,9 +42,7 @@ class VMEntry(SpecificEtcdEntryBase):
def add_log(self, msg):
self.log = self.log[:5]
- self.log.append(
- "{} - {}".format(datetime.now().isoformat(), msg)
- )
+ self.log.append("{} - {}".format(datetime.now().isoformat(), msg))
class VmPool:
diff --git a/ucloud/config.py b/ucloud/config.py
new file mode 100644
index 0000000..7c141a3
--- /dev/null
+++ b/ucloud/config.py
@@ -0,0 +1,41 @@
+from etcd3_wrapper import Etcd3Wrapper
+
+from ucloud.common.host import HostPool
+from ucloud.common.request import RequestPool
+from ucloud.common.vm import VmPool
+from ucloud.common.storage_handlers import FileSystemBasedImageStorageHandler, CEPHBasedImageStorageHandler
+from decouple import Config, RepositoryEnv, RepositoryEmpty
+
+
+# Try importing config, but don't fail if it does not exist
+try:
+ env_vars = Config(RepositoryEnv('/etc/ucloud/ucloud.conf'))
+except FileNotFoundError:
+ env_vars = Config(RepositoryEmpty())
+
+
+etcd_wrapper_args = ()
+etcd_wrapper_kwargs = {
+ 'host': env_vars.get('ETCD_URL', 'localhost'),
+ 'port': env_vars.get('ETCD_PORT', 2379),
+ 'ca_cert': env_vars.get('CA_CERT', None),
+ 'cert_cert': env_vars.get('CERT_CERT', None),
+ 'cert_key': env_vars.get('CERT_KEY', None)
+}
+
+etcd_client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
+
+host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
+vm_pool = VmPool(etcd_client, env_vars.get('VM_PREFIX'))
+request_pool = RequestPool(etcd_client, env_vars.get('REQUEST_PREFIX'))
+
+running_vms = []
+
+__storage_backend = env_vars.get("STORAGE_BACKEND")
+if __storage_backend == "filesystem":
+ image_storage_handler = FileSystemBasedImageStorageHandler(vm_base=env_vars.get("VM_DIR"),
+ image_base=env_vars.get("IMAGE_DIR"))
+elif __storage_backend == "ceph":
+ image_storage_handler = CEPHBasedImageStorageHandler(vm_base="ssd", image_base="ssd")
+else:
+ raise Exception("Unknown Image Storage Handler")
diff --git a/archive/uncloud_etcd_based/docs/Makefile b/ucloud/docs/Makefile
similarity index 93%
rename from archive/uncloud_etcd_based/docs/Makefile
rename to ucloud/docs/Makefile
index 246b56c..5e7ea85 100644
--- a/archive/uncloud_etcd_based/docs/Makefile
+++ b/ucloud/docs/Makefile
@@ -7,7 +7,7 @@ SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source/
BUILDDIR = build/
-DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/uncloud/
+DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/ucloud/
.PHONY: all build clean
diff --git a/archive/uncloud_etcd_based/docs/source/__init__.py b/ucloud/docs/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/__init__.py
rename to ucloud/docs/__init__.py
diff --git a/archive/uncloud_etcd_based/test/__init__.py b/ucloud/docs/source/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/test/__init__.py
rename to ucloud/docs/source/__init__.py
diff --git a/archive/uncloud_etcd_based/docs/source/admin-guide.rst b/ucloud/docs/source/admin-guide
similarity index 72%
rename from archive/uncloud_etcd_based/docs/source/admin-guide.rst
rename to ucloud/docs/source/admin-guide
index b62808d..ec6597d 100644
--- a/archive/uncloud_etcd_based/docs/source/admin-guide.rst
+++ b/ucloud/docs/source/admin-guide
@@ -56,13 +56,40 @@ To start host we created earlier, execute the following command
ucloud host ungleich.ch
-File & image scanners
---------------------------
+Create OS Image
+---------------
-Let's assume we have uploaded an *alpine-uploaded.qcow2* disk images to our
-uncloud server. Currently, our *alpine-untouched.qcow2* is not tracked by
-ucloud. We can only make images from tracked files. So, we need to track the
-file by running File Scanner
+Create ucloud-init ready OS image (Optional)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This step is optional if you just want to test ucloud. However, sooner or later
+you want to create OS images with ucloud-init to properly
+contexualize VMs.
+
+1. Start a VM with OS image on which you want to install ucloud-init
+2. Execute the following command on the started VM
+
+ .. code-block:: sh
+
+ apk add git
+ git clone https://code.ungleich.ch/ucloud/ucloud-init.git
+ cd ucloud-init
+ sh ./install.sh
+3. Congratulations. Your image is now ucloud-init ready.
+
+
+Upload Sample OS Image
+~~~~~~~~~~~~~~~~~~~~~~
+Execute the following to get the sample OS image file.
+
+.. code-block:: sh
+
+ mkdir /var/www/admin
+ (cd /var/www/admin && wget https://cloud.ungleich.ch/s/qTb5dFYW5ii8KsD/download)
+
+Run File Scanner and Image Scanner
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Currently, our uploaded file *alpine-untouched.qcow2* is not tracked by ucloud. We can only make
+images from tracked files. So, we need to track the file by running File Scanner
.. code-block:: sh
diff --git a/archive/uncloud_etcd_based/docs/source/conf.py b/ucloud/docs/source/conf.py
similarity index 90%
rename from archive/uncloud_etcd_based/docs/source/conf.py
rename to ucloud/docs/source/conf.py
index c8138a7..9b133f9 100644
--- a/archive/uncloud_etcd_based/docs/source/conf.py
+++ b/ucloud/docs/source/conf.py
@@ -17,9 +17,9 @@
# -- Project information -----------------------------------------------------
-project = "uncloud"
-copyright = "2019, ungleich"
-author = "ungleich"
+project = 'ucloud'
+copyright = '2019, ungleich'
+author = 'ungleich'
# -- General configuration ---------------------------------------------------
@@ -27,12 +27,12 @@ author = "ungleich"
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
- "sphinx.ext.autodoc",
- "sphinx_rtd_theme",
+ 'sphinx.ext.autodoc',
+ 'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ["_templates"]
+templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -50,4 +50,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ["_static"]
+html_static_path = ['_static']
diff --git a/archive/uncloud_etcd_based/docs/source/diagram-code/ucloud b/ucloud/docs/source/diagram-code/ucloud
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/diagram-code/ucloud
rename to ucloud/docs/source/diagram-code/ucloud
diff --git a/archive/uncloud_etcd_based/docs/source/images/ucloud.svg b/ucloud/docs/source/images/ucloud.svg
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/images/ucloud.svg
rename to ucloud/docs/source/images/ucloud.svg
diff --git a/archive/uncloud_etcd_based/docs/source/index.rst b/ucloud/docs/source/index.rst
similarity index 90%
rename from archive/uncloud_etcd_based/docs/source/index.rst
rename to ucloud/docs/source/index.rst
index fad1f88..879ac32 100644
--- a/archive/uncloud_etcd_based/docs/source/index.rst
+++ b/ucloud/docs/source/index.rst
@@ -11,12 +11,12 @@ Welcome to ucloud's documentation!
:caption: Contents:
introduction
- setup-install
- vm-images
user-guide
+ setup-install
admin-guide
+ user-guide/how-to-create-an-os-image-for-ucloud
troubleshooting
- hacking
+
Indices and tables
==================
diff --git a/archive/uncloud_etcd_based/docs/source/introduction.rst b/ucloud/docs/source/introduction.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/introduction.rst
rename to ucloud/docs/source/introduction.rst
diff --git a/archive/uncloud_etcd_based/docs/source/misc/todo.rst b/ucloud/docs/source/misc/todo.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/misc/todo.rst
rename to ucloud/docs/source/misc/todo.rst
diff --git a/archive/uncloud_etcd_based/docs/source/setup-install.rst b/ucloud/docs/source/setup-install.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/setup-install.rst
rename to ucloud/docs/source/setup-install.rst
diff --git a/archive/uncloud_etcd_based/docs/source/theory/summary.rst b/ucloud/docs/source/theory/summary.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/theory/summary.rst
rename to ucloud/docs/source/theory/summary.rst
diff --git a/archive/uncloud_etcd_based/docs/source/troubleshooting.rst b/ucloud/docs/source/troubleshooting.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/troubleshooting.rst
rename to ucloud/docs/source/troubleshooting.rst
diff --git a/archive/uncloud_etcd_based/docs/source/user-guide.rst b/ucloud/docs/source/user-guide.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/user-guide.rst
rename to ucloud/docs/source/user-guide.rst
diff --git a/archive/uncloud_etcd_based/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst b/ucloud/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
similarity index 100%
rename from archive/uncloud_etcd_based/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
rename to ucloud/docs/source/user-guide/how-to-create-an-os-image-for-ucloud.rst
diff --git a/archive/uncloud_etcd_based/uncloud/filescanner/__init__.py b/ucloud/filescanner/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/filescanner/__init__.py
rename to ucloud/filescanner/__init__.py
diff --git a/ucloud/filescanner/main.py b/ucloud/filescanner/main.py
new file mode 100755
index 0000000..b70cb5b
--- /dev/null
+++ b/ucloud/filescanner/main.py
@@ -0,0 +1,126 @@
+import glob
+import os
+import pathlib
+import subprocess as sp
+import time
+from uuid import uuid4
+
+from . import logger
+from ucloud.config import env_vars, etcd_client
+
+
+def getxattr(file, attr):
+ """Get specified user extended attribute (arg:attr) of a file (arg:file)"""
+ try:
+ attr = "user." + attr
+ value = sp.check_output(['getfattr', file,
+ '--name', attr,
+ '--only-values',
+ '--absolute-names'], stderr=sp.DEVNULL)
+ value = value.decode("utf-8")
+ except sp.CalledProcessError as e:
+ logger.exception(e)
+ value = None
+
+ return value
+
+
+def setxattr(file, attr, value):
+ """Set specified user extended attribute (arg:attr) equal to (arg:value)
+ of a file (arg:file)"""
+
+ attr = "user." + attr
+ sp.check_output(['setfattr', file,
+ '--name', attr,
+ '--value', str(value)])
+
+
+def sha512sum(file: str):
+ """Use sha512sum utility to compute sha512 sum of arg:file
+
+ IF arg:file does not exists:
+ raise FileNotFoundError exception
+ ELSE IF sum successfully computer:
+ return computed sha512 sum
+ ELSE:
+ return None
+ """
+ if not isinstance(file, str): raise TypeError
+ try:
+ output = sp.check_output(["sha512sum", file], stderr=sp.PIPE)
+ except sp.CalledProcessError as e:
+ error = e.stderr.decode("utf-8")
+ if "No such file or directory" in error:
+ raise FileNotFoundError from None
+ else:
+ output = output.decode("utf-8").strip()
+ output = output.split(" ")
+ return output[0]
+ return None
+
+
+try:
+ sp.check_output(['which', 'getfattr'])
+ sp.check_output(['which', 'setfattr'])
+except Exception as e:
+ logger.exception(e)
+ print('Make sure you have getfattr and setfattr available')
+ exit(1)
+
+
+def main():
+ BASE_DIR = env_vars.get("BASE_DIR")
+
+ FILE_PREFIX = env_vars.get("FILE_PREFIX")
+
+ # Recursively Get All Files and Folder below BASE_DIR
+ files = glob.glob("{}/**".format(BASE_DIR), recursive=True)
+
+ # Retain only Files
+ files = list(filter(os.path.isfile, files))
+
+ untracked_files = list(
+ filter(lambda f: not bool(getxattr(f, "user.utracked")), files)
+ )
+
+ tracked_files = list(
+ filter(lambda f: f not in untracked_files, files)
+ )
+ for file in untracked_files:
+ file_id = uuid4()
+
+ # Get Username
+ owner = pathlib.Path(file).parts[3]
+ # Get Creation Date of File
+ # Here, we are assuming that ctime is creation time
+ # which is mostly not true.
+ creation_date = time.ctime(os.stat(file).st_ctime)
+
+ # Get File Size
+ size = os.path.getsize(file)
+
+ # Compute sha512 sum
+ sha_sum = sha512sum(file)
+
+ # File Path excluding base and username
+ file_path = pathlib.Path(file).parts[4:]
+ file_path = os.path.join(*file_path)
+
+ # Create Entry
+ entry_key = os.path.join(FILE_PREFIX, str(file_id))
+ entry_value = {
+ "filename": file_path,
+ "owner": owner,
+ "sha512sum": sha_sum,
+ "creation_date": creation_date,
+ "size": size
+ }
+
+ print("Tracking {}".format(file))
+ # Insert Entry
+ etcd_client.put(entry_key, entry_value, value_in_json=True)
+ setxattr(file, "user.utracked", True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/hack/README.org b/ucloud/hack/README.org
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/README.org
rename to ucloud/hack/README.org
diff --git a/archive/uncloud_etcd_based/uncloud/hack/conf.d/ucloud-host b/ucloud/hack/conf.d/ucloud-host
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/conf.d/ucloud-host
rename to ucloud/hack/conf.d/ucloud-host
diff --git a/archive/uncloud_etcd_based/uncloud/hack/nftables.conf b/ucloud/hack/nftables.conf
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/nftables.conf
rename to ucloud/hack/nftables.conf
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-api b/ucloud/hack/rc-scripts/ucloud-api
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-api
rename to ucloud/hack/rc-scripts/ucloud-api
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-host b/ucloud/hack/rc-scripts/ucloud-host
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-host
rename to ucloud/hack/rc-scripts/ucloud-host
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-metadata b/ucloud/hack/rc-scripts/ucloud-metadata
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-metadata
rename to ucloud/hack/rc-scripts/ucloud-metadata
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-scheduler b/ucloud/hack/rc-scripts/ucloud-scheduler
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-scheduler
rename to ucloud/hack/rc-scripts/ucloud-scheduler
diff --git a/archive/uncloud_etcd_based/uncloud/host/__init__.py b/ucloud/host/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/host/__init__.py
rename to ucloud/host/__init__.py
diff --git a/ucloud/host/helper.py b/ucloud/host/helper.py
new file mode 100644
index 0000000..edcb82d
--- /dev/null
+++ b/ucloud/host/helper.py
@@ -0,0 +1,13 @@
+import socket
+from contextlib import closing
+
+
+def find_free_port():
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ try:
+ s.bind(('', 0))
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ except Exception:
+ return None
+ else:
+ return s.getsockname()[1]
diff --git a/ucloud/host/main.py b/ucloud/host/main.py
new file mode 100755
index 0000000..ccf0a8d
--- /dev/null
+++ b/ucloud/host/main.py
@@ -0,0 +1,143 @@
+import argparse
+import multiprocessing as mp
+import time
+
+from etcd3_wrapper import Etcd3Wrapper
+
+from ucloud.common.request import RequestEntry, RequestType
+from ucloud.config import (vm_pool, request_pool,
+ etcd_client, running_vms,
+ etcd_wrapper_args, etcd_wrapper_kwargs,
+ HostPool, env_vars)
+
+from .helper import find_free_port
+from . import virtualmachine
+from ucloud.host import logger
+
+
+def update_heartbeat(hostname):
+ """Update Last HeartBeat Time for :param hostname: in etcd"""
+ client = Etcd3Wrapper(*etcd_wrapper_args, **etcd_wrapper_kwargs)
+ host_pool = HostPool(client, env_vars.get('HOST_PREFIX'))
+ this_host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+
+ while True:
+ this_host.update_heartbeat()
+ host_pool.put(this_host)
+ time.sleep(10)
+
+
+def maintenance(host):
+ # To capture vm running according to running_vms list
+
+ # This is to capture successful migration of a VM.
+ # Suppose, this host is running "vm1" and user initiated
+ # request to migrate this "vm1" to some other host. On,
+ # successful migration the destination host would set
+ # the vm hostname to itself. Thus, we are checking
+ # whether this host vm is successfully migrated. If yes
+ # then we shutdown "vm1" on this host.
+
+ to_be_removed = []
+ for running_vm in running_vms:
+ with vm_pool.get_put(running_vm.key) as vm_entry:
+ if vm_entry.hostname != host.key and not vm_entry.in_migration:
+ running_vm.handle.shutdown()
+ logger.info("VM migration not completed successfully.")
+ to_be_removed.append(running_vm)
+
+ for r in to_be_removed:
+ running_vms.remove(r)
+
+ # To check vm running according to etcd entries
+ alleged_running_vms = vm_pool.by_status("RUNNING", vm_pool.by_host(host.key))
+
+ for vm_entry in alleged_running_vms:
+ _vm = virtualmachine.get_vm(running_vms, vm_entry.key)
+ # Whether, the allegedly running vm is in our
+ # running_vms list or not if it is said to be
+ # running on this host but it is not then we
+ # need to shut it down
+
+ # This is to capture poweroff/shutdown of a VM
+ # initiated by user inside VM. OR crash of VM by some
+ # user running process
+ if (_vm and not _vm.handle.is_running()) or not _vm:
+ logger.debug("_vm = %s, is_running() = %s" % (_vm, _vm.handle.is_running()))
+ vm_entry.add_log("""{} is not running but is said to be running.
+ So, shutting it down and declare it killed""".format(vm_entry.key))
+ vm_entry.declare_killed()
+ vm_pool.put(vm_entry)
+ if _vm:
+ running_vms.remove(_vm)
+
+
+def main(hostname):
+ heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
+
+ host_pool = HostPool(etcd_client, env_vars.get('HOST_PREFIX'))
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+ assert host is not None, "No such host with name = {}".format(hostname)
+
+ try:
+ heartbeat_updating_process.start()
+ except Exception as e:
+ logger.info("No Need To Go Further. Our heartbeat updating mechanism is not working")
+ logger.exception(e)
+ exit(-1)
+
+ logger.info("%s Session Started %s", '*' * 5, '*' * 5)
+
+ # It is seen that under heavy load, timeout event doesn't come
+ # in a predictive manner (which is intentional because we give
+ # higher priority to customer's requests) which delays heart
+ # beat update which in turn misunderstood by scheduler that the
+ # host is dead when it is actually alive. So, to ensure that we
+ # update the heart beat in a predictive manner we start Heart
+ # beat updating mechanism in separated thread
+
+ for events_iterator in [
+ etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
+ etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=10, value_in_json=True),
+ ]:
+ for request_event in events_iterator:
+ request_event = RequestEntry(request_event)
+
+ if request_event.type == "TIMEOUT":
+ maintenance(host)
+ continue
+
+ # If the event is directed toward me OR I am destination of a InitVMMigration
+ if request_event.hostname == host.key or request_event.destination == host.key:
+ logger.debug("VM Request: %s", request_event)
+
+ request_pool.client.client.delete(request_event.key)
+ vm_entry = vm_pool.get(request_event.uuid)
+
+ if vm_entry:
+ if request_event.type == RequestType.StartVM:
+ virtualmachine.start(vm_entry)
+
+ elif request_event.type == RequestType.StopVM:
+ virtualmachine.stop(vm_entry)
+
+ elif request_event.type == RequestType.DeleteVM:
+ virtualmachine.delete(vm_entry)
+
+ elif request_event.type == RequestType.InitVMMigration:
+ virtualmachine.start(vm_entry, host.key, find_free_port())
+
+ elif request_event.type == RequestType.TransferVM:
+ virtualmachine.transfer(request_event)
+ else:
+ logger.info("VM Entry missing")
+
+ logger.info("Running VMs %s", running_vms)
+
+
+if __name__ == "__main__":
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument("hostname", help="Name of this host. e.g /v1/host/1")
+ args = argparser.parse_args()
+ mp.set_start_method('spawn')
+ main(args.hostname)
diff --git a/ucloud/host/qmp/__init__.py b/ucloud/host/qmp/__init__.py
new file mode 100755
index 0000000..775b397
--- /dev/null
+++ b/ucloud/host/qmp/__init__.py
@@ -0,0 +1,537 @@
+# QEMU library
+#
+# Copyright (C) 2015-2016 Red Hat Inc.
+# Copyright (C) 2012 IBM Corp.
+#
+# Authors:
+# Fam Zheng
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Based on qmp.py.
+#
+
+import errno
+import logging
+import os
+import shutil
+import socket
+import subprocess
+import tempfile
+
+from . import qmp
+
+LOG = logging.getLogger(__name__)
+
+# Mapping host architecture to any additional architectures it can
+# support which often includes its 32 bit cousin.
+ADDITIONAL_ARCHES = {
+ "x86_64": "i386",
+ "aarch64": "armhf"
+}
+
+
+def kvm_available(target_arch=None):
+ host_arch = os.uname()[4]
+ if target_arch and target_arch != host_arch:
+ if target_arch != ADDITIONAL_ARCHES.get(host_arch):
+ return False
+ return os.access("/dev/kvm", os.R_OK | os.W_OK)
+
+
+class QEMUMachineError(Exception):
+ """
+ Exception called when an error in QEMUMachine happens.
+ """
+
+
+class QEMUMachineAddDeviceError(QEMUMachineError):
+ """
+ Exception raised when a request to add a device can not be fulfilled
+
+ The failures are caused by limitations, lack of information or conflicting
+ requests on the QEMUMachine methods. This exception does not represent
+ failures reported by the QEMU binary itself.
+ """
+
+
+class MonitorResponseError(qmp.QMPError):
+ """
+ Represents erroneous QMP monitor reply
+ """
+
+ def __init__(self, reply):
+ try:
+ desc = reply["error"]["desc"]
+ except KeyError:
+ desc = reply
+ super(MonitorResponseError, self).__init__(desc)
+ self.reply = reply
+
+
+class QEMUMachine(object):
+ """
+ A QEMU VM
+
+ Use this object as a context manager to ensure the QEMU process terminates::
+
+ with VM(binary) as vm:
+ ...
+ # vm is guaranteed to be shut down here
+ """
+
+ def __init__(self, binary, args=None, wrapper=None, name=None,
+ test_dir="/var/tmp", monitor_address=None,
+ socket_scm_helper=None):
+ '''
+ Initialize a QEMUMachine
+
+ @param binary: path to the qemu binary
+ @param args: list of extra arguments
+ @param wrapper: list of arguments used as prefix to qemu binary
+ @param name: prefix for socket and log file names (default: qemu-PID)
+ @param test_dir: where to create socket and log file
+ @param monitor_address: address for QMP monitor
+ @param socket_scm_helper: helper program, required for send_fd_scm()
+ @note: Qemu process is not started until launch() is used.
+ '''
+ if args is None:
+ args = []
+ if wrapper is None:
+ wrapper = []
+ if name is None:
+ name = "qemu-%d" % os.getpid()
+ self._name = name
+ self._monitor_address = monitor_address
+ self._vm_monitor = None
+ self._qemu_log_path = None
+ self._qemu_log_file = None
+ self._popen = None
+ self._binary = binary
+ self._args = list(args) # Force copy args in case we modify them
+ self._wrapper = wrapper
+ self._events = []
+ self._iolog = None
+ self._socket_scm_helper = socket_scm_helper
+ self._qmp = None
+ self._qemu_full_args = None
+ self._test_dir = test_dir
+ self._temp_dir = None
+ self._launched = False
+ self._machine = None
+ self._console_set = False
+ self._console_device_type = None
+ self._console_address = None
+ self._console_socket = None
+
+ # just in case logging wasn't configured by the main script:
+ logging.basicConfig(level=logging.DEBUG)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.shutdown()
+ return False
+
+ # This can be used to add an unused monitor instance.
+ def add_monitor_null(self):
+ self._args.append('-monitor')
+ self._args.append('null')
+
+ def add_fd(self, fd, fdset, opaque, opts=''):
+ """
+ Pass a file descriptor to the VM
+ """
+ options = ['fd=%d' % fd,
+ 'set=%d' % fdset,
+ 'opaque=%s' % opaque]
+ if opts:
+ options.append(opts)
+
+ # This did not exist before 3.4, but since then it is
+ # mandatory for our purpose
+ if hasattr(os, 'set_inheritable'):
+ os.set_inheritable(fd, True)
+
+ self._args.append('-add-fd')
+ self._args.append(','.join(options))
+ return self
+
+ # Exactly one of fd and file_path must be given.
+ # (If it is file_path, the helper will open that file and pass its
+ # own fd)
+ def send_fd_scm(self, fd=None, file_path=None):
+ # In iotest.py, the qmp should always use unix socket.
+ assert self._qmp.is_scm_available()
+ if self._socket_scm_helper is None:
+ raise QEMUMachineError("No path to socket_scm_helper set")
+ if not os.path.exists(self._socket_scm_helper):
+ raise QEMUMachineError("%s does not exist" %
+ self._socket_scm_helper)
+
+ # This did not exist before 3.4, but since then it is
+ # mandatory for our purpose
+ if hasattr(os, 'set_inheritable'):
+ os.set_inheritable(self._qmp.get_sock_fd(), True)
+ if fd is not None:
+ os.set_inheritable(fd, True)
+
+ fd_param = ["%s" % self._socket_scm_helper,
+ "%d" % self._qmp.get_sock_fd()]
+
+ if file_path is not None:
+ assert fd is None
+ fd_param.append(file_path)
+ else:
+ assert fd is not None
+ fd_param.append(str(fd))
+
+ devnull = open(os.path.devnull, 'rb')
+ proc = subprocess.Popen(fd_param, stdin=devnull, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, close_fds=False)
+ output = proc.communicate()[0]
+ if output:
+ LOG.debug(output)
+
+ return proc.returncode
+
+ @staticmethod
+ def _remove_if_exists(path):
+ """
+ Remove file object at path if it exists
+ """
+ try:
+ os.remove(path)
+ except OSError as exception:
+ if exception.errno == errno.ENOENT:
+ return
+ raise
+
+ def is_running(self):
+ return self._popen is not None and self._popen.poll() is None
+
+ def exitcode(self):
+ if self._popen is None:
+ return None
+ return self._popen.poll()
+
+ def get_pid(self):
+ if not self.is_running():
+ return None
+ return self._popen.pid
+
+ def _load_io_log(self):
+ if self._qemu_log_path is not None:
+ with open(self._qemu_log_path, "r") as iolog:
+ self._iolog = iolog.read()
+
+ def _base_args(self):
+ if isinstance(self._monitor_address, tuple):
+ moncdev = "socket,id=mon,host=%s,port=%s" % (
+ self._monitor_address[0],
+ self._monitor_address[1])
+ else:
+ moncdev = 'socket,id=mon,path=%s' % self._vm_monitor
+ args = ['-chardev', moncdev,
+ '-mon', 'chardev=mon,mode=control']
+ if self._machine is not None:
+ args.extend(['-machine', self._machine])
+ if self._console_set:
+ self._console_address = os.path.join(self._temp_dir,
+ self._name + "-console.sock")
+ chardev = ('socket,id=console,path=%s,server,nowait' %
+ self._console_address)
+ args.extend(['-chardev', chardev])
+ if self._console_device_type is None:
+ args.extend(['-serial', 'chardev:console'])
+ else:
+ device = '%s,chardev=console' % self._console_device_type
+ args.extend(['-device', device])
+ return args
+
+ def _pre_launch(self):
+ self._temp_dir = tempfile.mkdtemp(dir=self._test_dir)
+ if self._monitor_address is not None:
+ self._vm_monitor = self._monitor_address
+ else:
+ self._vm_monitor = os.path.join(self._temp_dir,
+ self._name + "-monitor.sock")
+ self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
+ self._qemu_log_file = open(self._qemu_log_path, 'wb')
+
+ self._qmp = qmp.QEMUMonitorProtocol(self._vm_monitor,
+ server=True)
+
+ def _post_launch(self):
+ self._qmp.accept()
+
+ def _post_shutdown(self):
+ if self._qemu_log_file is not None:
+ self._qemu_log_file.close()
+ self._qemu_log_file = None
+
+ self._qemu_log_path = None
+
+ if self._console_socket is not None:
+ self._console_socket.close()
+ self._console_socket = None
+
+ if self._temp_dir is not None:
+ shutil.rmtree(self._temp_dir)
+ self._temp_dir = None
+
+ def launch(self):
+ """
+ Launch the VM and make sure we cleanup and expose the
+ command line/output in case of exception
+ """
+
+ if self._launched:
+ raise QEMUMachineError('VM already launched')
+
+ self._iolog = None
+ self._qemu_full_args = None
+ try:
+ self._launch()
+ self._launched = True
+ except:
+ self.shutdown()
+
+ LOG.debug('Error launching VM')
+ if self._qemu_full_args:
+ LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
+ if self._iolog:
+ LOG.debug('Output: %r', self._iolog)
+ raise Exception(self._iolog)
+ raise
+
+ def _launch(self):
+ """
+ Launch the VM and establish a QMP connection
+ """
+ devnull = open(os.path.devnull, 'rb')
+ self._pre_launch()
+ self._qemu_full_args = (self._wrapper + [self._binary] +
+ self._base_args() + self._args)
+ LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
+ self._popen = subprocess.Popen(self._qemu_full_args,
+ stdin=devnull,
+ stdout=self._qemu_log_file,
+ stderr=subprocess.STDOUT,
+ shell=False,
+ close_fds=False)
+ self._post_launch()
+
+ def wait(self):
+ """
+ Wait for the VM to power off
+ """
+ self._popen.wait()
+ self._qmp.close()
+ self._load_io_log()
+ self._post_shutdown()
+
+ def shutdown(self):
+ """
+ Terminate the VM and clean up
+ """
+ if self.is_running():
+ try:
+ self._qmp.cmd('quit')
+ self._qmp.close()
+ except:
+ self._popen.kill()
+ self._popen.wait()
+
+ self._load_io_log()
+ self._post_shutdown()
+
+ exitcode = self.exitcode()
+ if exitcode is not None and exitcode < 0:
+ msg = 'qemu received signal %i: %s'
+ if self._qemu_full_args:
+ command = ' '.join(self._qemu_full_args)
+ else:
+ command = ''
+ LOG.warn(msg, -exitcode, command)
+
+ self._launched = False
+
+ def qmp(self, cmd, conv_keys=True, **args):
+ """
+ Invoke a QMP command and return the response dict
+ """
+ qmp_args = dict()
+ for key, value in args.items():
+ if conv_keys:
+ qmp_args[key.replace('_', '-')] = value
+ else:
+ qmp_args[key] = value
+
+ return self._qmp.cmd(cmd, args=qmp_args)
+
+ def command(self, cmd, conv_keys=True, **args):
+ """
+ Invoke a QMP command.
+ On success return the response dict.
+ On failure raise an exception.
+ """
+ reply = self.qmp(cmd, conv_keys, **args)
+ if reply is None:
+ raise qmp.QMPError("Monitor is closed")
+ if "error" in reply:
+ raise MonitorResponseError(reply)
+ return reply["return"]
+
+ def get_qmp_event(self, wait=False):
+ """
+ Poll for one queued QMP events and return it
+ """
+ if len(self._events) > 0:
+ return self._events.pop(0)
+ return self._qmp.pull_event(wait=wait)
+
+ def get_qmp_events(self, wait=False):
+ """
+ Poll for queued QMP events and return a list of dicts
+ """
+ events = self._qmp.get_events(wait=wait)
+ events.extend(self._events)
+ del self._events[:]
+ self._qmp.clear_events()
+ return events
+
+ @staticmethod
+ def event_match(event, match=None):
+ """
+ Check if an event matches optional match criteria.
+
+ The match criteria takes the form of a matching subdict. The event is
+ checked to be a superset of the subdict, recursively, with matching
+ values whenever the subdict values are not None.
+
+ This has a limitation that you cannot explicitly check for None values.
+
+ Examples, with the subdict queries on the left:
+ - None matches any object.
+ - {"foo": None} matches {"foo": {"bar": 1}}
+ - {"foo": None} matches {"foo": 5}
+ - {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
+ - {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
+ """
+ if match is None:
+ return True
+
+ try:
+ for key in match:
+ if key in event:
+ if not QEMUMachine.event_match(event[key], match[key]):
+ return False
+ else:
+ return False
+ return True
+ except TypeError:
+ # either match or event wasn't iterable (not a dict)
+ return match == event
+
+ def event_wait(self, name, timeout=60.0, match=None):
+ """
+ event_wait waits for and returns a named event from QMP with a timeout.
+
+ name: The event to wait for.
+ timeout: QEMUMonitorProtocol.pull_event timeout parameter.
+ match: Optional match criteria. See event_match for details.
+ """
+ return self.events_wait([(name, match)], timeout)
+
+ def events_wait(self, events, timeout=60.0):
+ """
+ events_wait waits for and returns a named event from QMP with a timeout.
+
+ events: a sequence of (name, match_criteria) tuples.
+ The match criteria are optional and may be None.
+ See event_match for details.
+ timeout: QEMUMonitorProtocol.pull_event timeout parameter.
+ """
+
+ def _match(event):
+ for name, match in events:
+ if (event['event'] == name and
+ self.event_match(event, match)):
+ return True
+ return False
+
+ # Search cached events
+ for event in self._events:
+ if _match(event):
+ self._events.remove(event)
+ return event
+
+ # Poll for new events
+ while True:
+ event = self._qmp.pull_event(wait=timeout)
+ if _match(event):
+ return event
+ self._events.append(event)
+
+ return None
+
+ def get_log(self):
+ """
+ After self.shutdown or failed qemu execution, this returns the output
+ of the qemu process.
+ """
+ return self._iolog
+
+ def add_args(self, *args):
+ """
+ Adds to the list of extra arguments to be given to the QEMU binary
+ """
+ self._args.extend(args)
+
+ def set_machine(self, machine_type):
+ """
+ Sets the machine type
+
+ If set, the machine type will be added to the base arguments
+ of the resulting QEMU command line.
+ """
+ self._machine = machine_type
+
+ def set_console(self, device_type=None):
+ """
+ Sets the device type for a console device
+
+ If set, the console device and a backing character device will
+ be added to the base arguments of the resulting QEMU command
+ line.
+
+ This is a convenience method that will either use the provided
+ device type, or default to a "-serial chardev:console" command
+ line argument.
+
+ The actual setting of command line arguments will be be done at
+ machine launch time, as it depends on the temporary directory
+ to be created.
+
+ @param device_type: the device type, such as "isa-serial". If
+ None is given (the default value) a "-serial
+ chardev:console" command line argument will
+ be used instead, resorting to the machine's
+ default device type.
+ """
+ self._console_set = True
+ self._console_device_type = device_type
+
+ @property
+ def console_socket(self):
+ """
+ Returns a socket connected to the console
+ """
+ if self._console_socket is None:
+ self._console_socket = socket.socket(socket.AF_UNIX,
+ socket.SOCK_STREAM)
+ self._console_socket.connect(self._console_address)
+ return self._console_socket
diff --git a/ucloud/host/qmp/qmp.py b/ucloud/host/qmp/qmp.py
new file mode 100755
index 0000000..bf35d71
--- /dev/null
+++ b/ucloud/host/qmp/qmp.py
@@ -0,0 +1,255 @@
+# QEMU Monitor Protocol Python class
+#
+# Copyright (C) 2009, 2010 Red Hat Inc.
+#
+# Authors:
+# Luiz Capitulino
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+
+import errno
+import json
+import logging
+import socket
+
+
+class QMPError(Exception):
+ pass
+
+
+class QMPConnectError(QMPError):
+ pass
+
+
+class QMPCapabilitiesError(QMPError):
+ pass
+
+
+class QMPTimeoutError(QMPError):
+ pass
+
+
+class QEMUMonitorProtocol(object):
+ #: Logger object for debugging messages
+ logger = logging.getLogger('QMP')
+ #: Socket's error class
+ error = socket.error
+ #: Socket's timeout
+ timeout = socket.timeout
+
+ def __init__(self, address, server=False):
+ """
+ Create a QEMUMonitorProtocol class.
+
+ @param address: QEMU address, can be either a unix socket path (string)
+ or a tuple in the form ( address, port ) for a TCP
+ connection
+ @param server: server mode listens on the socket (bool)
+ @raise socket.error on socket connection errors
+ @note No connection is established, this is done by the connect() or
+ accept() methods
+ """
+ self.__events = []
+ self.__address = address
+ self.__sock = self.__get_sock()
+ self.__sockfile = None
+ if server:
+ self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.__sock.bind(self.__address)
+ self.__sock.listen(1)
+
+ def __get_sock(self):
+ if isinstance(self.__address, tuple):
+ family = socket.AF_INET
+ else:
+ family = socket.AF_UNIX
+ return socket.socket(family, socket.SOCK_STREAM)
+
+ def __negotiate_capabilities(self):
+ greeting = self.__json_read()
+ if greeting is None or "QMP" not in greeting:
+ raise QMPConnectError
+ # Greeting seems ok, negotiate capabilities
+ resp = self.cmd('qmp_capabilities')
+ if "return" in resp:
+ return greeting
+ raise QMPCapabilitiesError
+
+ def __json_read(self, only_event=False):
+ while True:
+ data = self.__sockfile.readline()
+ if not data:
+ return
+ resp = json.loads(data)
+ if 'event' in resp:
+ self.logger.debug("<<< %s", resp)
+ self.__events.append(resp)
+ if not only_event:
+ continue
+ return resp
+
+ def __get_events(self, wait=False):
+ """
+ Check for new events in the stream and cache them in __events.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be
+ retrieved or if some other error occurred.
+ """
+
+ # Check for new events regardless and pull them into the cache:
+ self.__sock.setblocking(0)
+ try:
+ self.__json_read()
+ except socket.error as err:
+ if err[0] == errno.EAGAIN:
+ # No data available
+ pass
+ self.__sock.setblocking(1)
+
+ # Wait for new events, if needed.
+ # if wait is 0.0, this means "no wait" and is also implicitly false.
+ if not self.__events and wait:
+ if isinstance(wait, float):
+ self.__sock.settimeout(wait)
+ try:
+ ret = self.__json_read(only_event=True)
+ except socket.timeout:
+ raise QMPTimeoutError("Timeout waiting for event")
+ except:
+ raise QMPConnectError("Error while reading from socket")
+ if ret is None:
+ raise QMPConnectError("Error while reading from socket")
+ self.__sock.settimeout(None)
+
+ def connect(self, negotiate=True):
+ """
+ Connect to the QMP Monitor and perform capabilities negotiation.
+
+ @return QMP greeting dict
+ @raise socket.error on socket connection errors
+ @raise QMPConnectError if the greeting is not received
+ @raise QMPCapabilitiesError if fails to negotiate capabilities
+ """
+ self.__sock.connect(self.__address)
+ self.__sockfile = self.__sock.makefile()
+ if negotiate:
+ return self.__negotiate_capabilities()
+
+ def accept(self):
+ """
+ Await connection from QMP Monitor and perform capabilities negotiation.
+
+ @return QMP greeting dict
+ @raise socket.error on socket connection errors
+ @raise QMPConnectError if the greeting is not received
+ @raise QMPCapabilitiesError if fails to negotiate capabilities
+ """
+ self.__sock.settimeout(15)
+ self.__sock, _ = self.__sock.accept()
+ self.__sockfile = self.__sock.makefile()
+ return self.__negotiate_capabilities()
+
+ def cmd_obj(self, qmp_cmd):
+ """
+ Send a QMP command to the QMP Monitor.
+
+ @param qmp_cmd: QMP command to be sent as a Python dict
+ @return QMP response as a Python dict or None if the connection has
+ been closed
+ """
+ self.logger.debug(">>> %s", qmp_cmd)
+ try:
+ self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
+ except socket.error as err:
+ if err[0] == errno.EPIPE:
+ return
+ raise socket.error(err)
+ resp = self.__json_read()
+ self.logger.debug("<<< %s", resp)
+ return resp
+
+ def cmd(self, name, args=None, cmd_id=None):
+ """
+ Build a QMP command and send it to the QMP Monitor.
+
+ @param name: command name (string)
+ @param args: command arguments (dict)
+ @param cmd_id: command id (dict, list, string or int)
+ """
+ qmp_cmd = {'execute': name}
+ if args:
+ qmp_cmd['arguments'] = args
+ if cmd_id:
+ qmp_cmd['id'] = cmd_id
+ return self.cmd_obj(qmp_cmd)
+
+ def command(self, cmd, **kwds):
+ """
+ Build and send a QMP command to the monitor, report errors if any
+ """
+ ret = self.cmd(cmd, kwds)
+ if "error" in ret:
+ raise Exception(ret['error']['desc'])
+ return ret['return']
+
+ def pull_event(self, wait=False):
+ """
+ Pulls a single event.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be
+ retrieved or if some other error occurred.
+
+ @return The first available QMP event, or None.
+ """
+ self.__get_events(wait)
+
+ if self.__events:
+ return self.__events.pop(0)
+ return None
+
+ def get_events(self, wait=False):
+ """
+ Get a list of available QMP events.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be
+ retrieved or if some other error occurred.
+
+ @return The list of available QMP events.
+ """
+ self.__get_events(wait)
+ return self.__events
+
+ def clear_events(self):
+ """
+ Clear current list of pending events.
+ """
+ self.__events = []
+
+ def close(self):
+ self.__sock.close()
+ self.__sockfile.close()
+
+ def settimeout(self, timeout):
+ self.__sock.settimeout(timeout)
+
+ def get_sock_fd(self):
+ return self.__sock.fileno()
+
+ def is_scm_available(self):
+ return self.__sock.family == socket.AF_UNIX
diff --git a/ucloud/host/virtualmachine.py b/ucloud/host/virtualmachine.py
new file mode 100755
index 0000000..7524083
--- /dev/null
+++ b/ucloud/host/virtualmachine.py
@@ -0,0 +1,384 @@
+# QEMU Manual
+# https://qemu.weilnetz.de/doc/qemu-doc.html
+
+# For QEMU Monitor Protocol Commands Information, See
+# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
+
+import os
+import random
+import subprocess as sp
+import tempfile
+import time
+
+from functools import wraps
+from string import Template
+from typing import Union
+from os.path import join as join_path
+
+import bitmath
+import sshtunnel
+
+from ucloud.common.helpers import get_ipv6_address
+from ucloud.common.request import RequestEntry, RequestType
+from ucloud.common.vm import VMEntry, VMStatus
+from ucloud.config import (etcd_client, request_pool,
+ running_vms, vm_pool, env_vars,
+ image_storage_handler)
+from . import qmp
+from ucloud.host import logger
+
+
+class VM:
+ def __init__(self, key, handle, vnc_socket_file):
+ self.key = key # type: str
+ self.handle = handle # type: qmp.QEMUMachine
+ self.vnc_socket_file = vnc_socket_file # type: tempfile.NamedTemporaryFile
+
+ def __repr__(self):
+ return "VM({})".format(self.key)
+
+
+def delete_network_interface(iface):
+ try:
+ sp.check_output(['ip', 'link', 'del', iface])
+ except Exception:
+ pass
+
+
+def resolve_network(network_name, network_owner):
+ network = etcd_client.get(join_path(env_vars.get("NETWORK_PREFIX"),
+ network_owner,
+ network_name),
+ value_in_json=True)
+ return network
+
+
+def delete_vm_network(vm_entry):
+ try:
+ for network in vm_entry.network:
+ network_name = network[0]
+ tap_mac = network[1]
+ tap_id = network[2]
+
+ delete_network_interface('tap{}'.format(tap_id))
+
+ owners_vms = vm_pool.by_owner(vm_entry.owner)
+ owners_running_vms = vm_pool.by_status(VMStatus.running,
+ _vms=owners_vms)
+
+ networks = map(lambda n: n[0],
+ map(lambda vm: vm.network, owners_running_vms)
+ )
+ networks_in_use_by_user_vms = [vm[0] for vm in networks]
+ if network_name not in networks_in_use_by_user_vms:
+ network_entry = resolve_network(network[0], vm_entry.owner)
+ if network_entry:
+ network_type = network_entry.value["type"]
+ network_id = network_entry.value["id"]
+ if network_type == "vxlan":
+ delete_network_interface('br{}'.format(network_id))
+ delete_network_interface('vxlan{}'.format(network_id))
+ except Exception:
+ logger.exception("Exception in network interface deletion")
+
+
+def create_dev(script, _id, dev, ip=None):
+ command = [script, _id, dev]
+ if ip:
+ command.append(ip)
+ try:
+ output = sp.check_output(command, stderr=sp.PIPE)
+ except Exception as e:
+ print(e.stderr)
+ return None
+ else:
+ return output.decode("utf-8").strip()
+
+
+def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
+ network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
+ vxlan = create_dev(script=os.path.join(network_script_base, 'create-vxlan.sh'),
+ _id=_id, dev=_dev)
+ if vxlan:
+ bridge = create_dev(script=os.path.join(network_script_base, 'create-bridge.sh'),
+ _id=_id, dev=vxlan, ip=ip)
+ if bridge:
+ tap = create_dev(script=os.path.join(network_script_base, 'create-tap.sh'),
+ _id=str(tap_id), dev=bridge)
+ if tap:
+ return tap
+
+
+def random_bytes(num=6):
+ return [random.randrange(256) for _ in range(num)]
+
+
+def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
+ mac = random_bytes()
+ if oui:
+ if type(oui) == str:
+ oui = [int(chunk) for chunk in oui.split(separator)]
+ mac = oui + random_bytes(num=6 - len(oui))
+ else:
+ if multicast:
+ mac[0] |= 1 # set bit 0
+ else:
+ mac[0] &= ~1 # clear bit 0
+ if uaa:
+ mac[0] &= ~(1 << 1) # clear bit 1
+ else:
+ mac[0] |= 1 << 1 # set bit 1
+ return separator.join(byte_fmt % b for b in mac)
+
+
+def update_radvd_conf(etcd_client):
+ network_script_base = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'network')
+
+ networks = {
+ net.value['ipv6']: net.value['id']
+ for net in etcd_client.get_prefix('/v1/network/', value_in_json=True)
+ if net.value.get('ipv6')
+ }
+ radvd_template = open(os.path.join(network_script_base,
+ 'radvd-template.conf'), 'r').read()
+ radvd_template = Template(radvd_template)
+
+ content = [radvd_template.safe_substitute(bridge='br{}'.format(networks[net]),
+ prefix=net)
+ for net in networks if networks.get(net)]
+
+ with open('/etc/radvd.conf', 'w') as radvd_conf:
+ radvd_conf.writelines(content)
+ try:
+ sp.check_output(['systemctl', 'restart', 'radvd'])
+ except Exception:
+ sp.check_output(['service', 'radvd', 'restart'])
+
+
+def get_start_command_args(vm_entry, vnc_sock_filename: str, migration=False, migration_port=None):
+ threads_per_core = 1
+ vm_memory = int(bitmath.parse_string_unsafe(vm_entry.specs["ram"]).to_MB())
+ vm_cpus = int(vm_entry.specs["cpu"])
+ vm_uuid = vm_entry.uuid
+ vm_networks = vm_entry.network
+
+ command = "-name {}_{}".format(vm_entry.owner, vm_entry.name)
+
+ command += " -drive file={},format=raw,if=virtio,cache=none".format(
+ image_storage_handler.qemu_path_string(vm_uuid)
+ )
+ command += " -device virtio-rng-pci -vnc unix:{}".format(vnc_sock_filename)
+ command += " -m {} -smp cores={},threads={}".format(
+ vm_memory, vm_cpus, threads_per_core
+ )
+
+ if migration:
+ command += " -incoming tcp:[::]:{}".format(migration_port)
+
+ tap = None
+ for network_mac_and_tap in vm_networks:
+ network_name, mac, tap = network_mac_and_tap
+
+ _key = os.path.join(env_vars.get('NETWORK_PREFIX'), vm_entry.owner, network_name)
+ network = etcd_client.get(_key, value_in_json=True)
+ network_type = network.value["type"]
+ network_id = str(network.value["id"])
+ network_ipv6 = network.value["ipv6"]
+
+ if network_type == "vxlan":
+ tap = create_vxlan_br_tap(_id=network_id,
+ _dev=env_vars.get("VXLAN_PHY_DEV"),
+ tap_id=tap,
+ ip=network_ipv6)
+ update_radvd_conf(etcd_client)
+
+ command += " -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no" \
+ " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}" \
+ .format(tap=tap, net_id=network_id, mac=mac)
+
+ return command.split(" ")
+
+
+def create_vm_object(vm_entry, migration=False, migration_port=None):
+ # NOTE: If migration suddenly stop working, having different
+ # VNC unix filename on source and destination host can
+ # be a possible cause of it.
+
+ # REQUIREMENT: Use Unix Socket instead of TCP Port for VNC
+ vnc_sock_file = tempfile.NamedTemporaryFile()
+
+ qemu_args = get_start_command_args(
+ vm_entry=vm_entry,
+ vnc_sock_filename=vnc_sock_file.name,
+ migration=migration,
+ migration_port=migration_port,
+ )
+ qemu_machine = qmp.QEMUMachine("/usr/bin/qemu-system-x86_64", args=qemu_args)
+ return VM(vm_entry.key, qemu_machine, vnc_sock_file)
+
+
+def get_vm(vm_list: list, vm_key) -> Union[VM, None]:
+ return next((vm for vm in vm_list if vm.key == vm_key), None)
+
+
+def need_running_vm(func):
+ @wraps(func)
+ def wrapper(e):
+ vm = get_vm(running_vms, e.key)
+ if vm:
+ try:
+ status = vm.handle.command("query-status")
+ logger.debug("VM Status Check - %s", status)
+ except Exception as exception:
+ logger.info("%s failed - VM %s %s", func.__name__, e, exception)
+ else:
+ return func(e)
+
+ return None
+ else:
+ logger.info("%s failed because VM %s is not running", func.__name__, e.key)
+ return None
+
+ return wrapper
+
+
+def create(vm_entry: VMEntry):
+ if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
+ # File Already exists. No Problem Continue
+ logger.debug("Image for vm %s exists", vm_entry.uuid)
+ else:
+ vm_hdd = int(bitmath.parse_string_unsafe(vm_entry.specs["os-ssd"]).to_MB())
+ if image_storage_handler.make_vm_image(src=vm_entry.image_uuid, dest=vm_entry.uuid):
+ if not image_storage_handler.resize_vm_image(path=vm_entry.uuid, size=vm_hdd):
+ vm_entry.status = VMStatus.error
+ else:
+ logger.info("New VM Created")
+
+
+def start(vm_entry: VMEntry, destination_host_key=None, migration_port=None):
+ _vm = get_vm(running_vms, vm_entry.key)
+
+ # VM already running. No need to proceed further.
+ if _vm:
+ logger.info("VM %s already running" % vm_entry.uuid)
+ return
+ else:
+ logger.info("Trying to start %s" % vm_entry.uuid)
+ if destination_host_key:
+ launch_vm(vm_entry, migration=True, migration_port=migration_port,
+ destination_host_key=destination_host_key)
+ else:
+ create(vm_entry)
+ launch_vm(vm_entry)
+
+
+@need_running_vm
+def stop(vm_entry):
+ vm = get_vm(running_vms, vm_entry.key)
+ vm.handle.shutdown()
+ if not vm.handle.is_running():
+ vm_entry.add_log("Shutdown successfully")
+ vm_entry.declare_stopped()
+ vm_pool.put(vm_entry)
+ running_vms.remove(vm)
+ delete_vm_network(vm_entry)
+
+
+def delete(vm_entry):
+ logger.info("Deleting VM | %s", vm_entry)
+ stop(vm_entry)
+
+ if image_storage_handler.is_vm_image_exists(vm_entry.uuid):
+ r_status = image_storage_handler.delete_vm_image(vm_entry.uuid)
+ if r_status:
+ etcd_client.client.delete(vm_entry.key)
+ else:
+ etcd_client.client.delete(vm_entry.key)
+
+def transfer(request_event):
+ # This function would run on source host i.e host on which the vm
+ # is running initially. This host would be responsible for transferring
+ # vm state to destination host.
+
+ _host, _port = request_event.parameters["host"], request_event.parameters["port"]
+ _uuid = request_event.uuid
+ _destination = request_event.destination_host_key
+ vm = get_vm(running_vms, join_path(env_vars.get('VM_PREFIX'), _uuid))
+
+ if vm:
+ tunnel = sshtunnel.SSHTunnelForwarder(
+ _host,
+ ssh_username=env_vars.get("ssh_username"),
+ ssh_pkey=env_vars.get("ssh_pkey"),
+ remote_bind_address=("127.0.0.1", _port),
+ ssh_proxy_enabled=True,
+ ssh_proxy=(_host, 22)
+ )
+ try:
+ tunnel.start()
+ except sshtunnel.BaseSSHTunnelForwarderError:
+ logger.exception("Couldn't establish connection to (%s, 22)", _host)
+ else:
+ vm.handle.command(
+ "migrate", uri="tcp:0.0.0.0:{}".format(tunnel.local_bind_port)
+ )
+
+ status = vm.handle.command("query-migrate")["status"]
+ while status not in ["failed", "completed"]:
+ time.sleep(2)
+ status = vm.handle.command("query-migrate")["status"]
+
+ with vm_pool.get_put(request_event.uuid) as source_vm:
+ if status == "failed":
+ source_vm.add_log("Migration Failed")
+ elif status == "completed":
+ # If VM is successfully migrated then shutdown the VM
+ # on this host and update hostname to destination host key
+ source_vm.add_log("Successfully migrated")
+ source_vm.hostname = _destination
+ running_vms.remove(vm)
+ vm.handle.shutdown()
+ source_vm.in_migration = False # VM transfer finished
+ finally:
+ tunnel.close()
+
+
+def launch_vm(vm_entry, migration=False, migration_port=None, destination_host_key=None):
+ logger.info("Starting %s" % vm_entry.key)
+
+ vm = create_vm_object(vm_entry, migration=migration, migration_port=migration_port)
+ try:
+ vm.handle.launch()
+ except Exception:
+ logger.exception("Error Occured while starting VM")
+ vm.handle.shutdown()
+
+ if migration:
+ # We don't care whether MachineError or any other error occurred
+ pass
+ else:
+ # Error during typical launch of a vm
+ vm.handle.shutdown()
+ vm_entry.declare_killed()
+ vm_pool.put(vm_entry)
+ else:
+ vm_entry.vnc_socket = vm.vnc_socket_file.name
+ running_vms.append(vm)
+
+ if migration:
+ vm_entry.in_migration = True
+ r = RequestEntry.from_scratch(
+ type=RequestType.TransferVM,
+ hostname=vm_entry.hostname,
+ parameters={"host": get_ipv6_address(), "port": migration_port},
+ uuid=vm_entry.uuid,
+ destination_host_key=destination_host_key,
+ request_prefix=env_vars.get("REQUEST_PREFIX")
+ )
+ request_pool.put(r)
+ else:
+ # Typical launching of a vm
+ vm_entry.status = VMStatus.running
+ vm_entry.add_log("Started successfully")
+
+ vm_pool.put(vm_entry)
diff --git a/archive/uncloud_etcd_based/uncloud/imagescanner/__init__.py b/ucloud/imagescanner/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/imagescanner/__init__.py
rename to ucloud/imagescanner/__init__.py
diff --git a/ucloud/imagescanner/main.py b/ucloud/imagescanner/main.py
new file mode 100755
index 0000000..20ce9d5
--- /dev/null
+++ b/ucloud/imagescanner/main.py
@@ -0,0 +1,78 @@
+import json
+import os
+import subprocess
+
+from os.path import join as join_path
+from ucloud.config import etcd_client, env_vars, image_storage_handler
+from ucloud.imagescanner import logger
+
+
+def qemu_img_type(path):
+ qemu_img_info_command = ["qemu-img", "info", "--output", "json", path]
+ try:
+ qemu_img_info = subprocess.check_output(qemu_img_info_command)
+ except Exception as e:
+ logger.exception(e)
+ return None
+ else:
+ qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
+ return qemu_img_info["format"]
+
+
+def main():
+ # We want to get images entries that requests images to be created
+ images = etcd_client.get_prefix(env_vars.get('IMAGE_PREFIX'), value_in_json=True)
+ images_to_be_created = list(filter(lambda im: im.value['status'] == 'TO_BE_CREATED', images))
+
+ for image in images_to_be_created:
+ try:
+ image_uuid = image.key.split('/')[-1]
+ image_owner = image.value['owner']
+ image_filename = image.value['filename']
+ image_store_name = image.value['store_name']
+ image_full_path = join_path(env_vars.get('BASE_DIR'), image_owner, image_filename)
+
+ image_stores = etcd_client.get_prefix(env_vars.get('IMAGE_STORE_PREFIX'), value_in_json=True)
+ user_image_store = next(filter(
+ lambda s, store_name=image_store_name: s.value["name"] == store_name,
+ image_stores
+ ))
+
+ image_store_pool = user_image_store.value['attributes']['pool']
+
+ except Exception as e:
+ logger.exception(e)
+ else:
+ # At least our basic data is available
+ qemu_img_convert_command = ["qemu-img", "convert", "-f", "qcow2",
+ "-O", "raw", image_full_path, "image.raw"]
+
+ if qemu_img_type(image_full_path) == "qcow2":
+ try:
+ # Convert .qcow2 to .raw
+ subprocess.check_output(qemu_img_convert_command)
+ except Exception as e:
+ logger.exception(e)
+ else:
+ # Import and Protect
+ r_status = image_storage_handler.import_image(src="image.raw",
+ dest=image_uuid,
+ protect=True)
+ if r_status:
+ # Everything is successfully done
+ image.value["status"] = "CREATED"
+ etcd_client.put(image.key, json.dumps(image.value))
+
+ else:
+ # The user provided image is either not found or of invalid format
+ image.value["status"] = "INVALID_IMAGE"
+ etcd_client.put(image.key, json.dumps(image.value))
+
+ try:
+ os.remove("image.raw")
+ except Exception:
+ pass
+
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/cli/__init__.py b/ucloud/metadata/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/cli/__init__.py
rename to ucloud/metadata/__init__.py
diff --git a/ucloud/metadata/main.py b/ucloud/metadata/main.py
new file mode 100644
index 0000000..e7cb33b
--- /dev/null
+++ b/ucloud/metadata/main.py
@@ -0,0 +1,91 @@
+import os
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+
+from ucloud.config import etcd_client, env_vars, vm_pool
+
+app = Flask(__name__)
+api = Api(app)
+
+
+def get_vm_entry(mac_addr):
+ return next(filter(lambda vm: mac_addr in list(zip(*vm.network))[1], vm_pool.vms), None)
+
+
+# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
+def ipv62mac(ipv6):
+ # remove subnet info if given
+ subnet_index = ipv6.find('/')
+ if subnet_index != -1:
+ ipv6 = ipv6[:subnet_index]
+
+ ipv6_parts = ipv6.split(':')
+ mac_parts = list()
+ for ipv6_part in ipv6_parts[-4:]:
+ while len(ipv6_part) < 4:
+ ipv6_part = '0' + ipv6_part
+ mac_parts.append(ipv6_part[:2])
+ mac_parts.append(ipv6_part[-2:])
+
+ # modify parts to match MAC value
+ mac_parts[0] = '%02x' % (int(mac_parts[0], 16) ^ 2)
+ del mac_parts[4]
+ del mac_parts[3]
+ return ':'.join(mac_parts)
+
+
+class Root(Resource):
+ @staticmethod
+ def get():
+ data = get_vm_entry(ipv62mac(request.remote_addr))
+
+ if not data:
+ return {'message': 'Metadata for such VM does not exists.'}, 404
+ else:
+
+ # {env_vars.get('USER_PREFIX')}/{realm}/{name}/key
+ etcd_key = os.path.join(env_vars.get('USER_PREFIX'), data.value['owner_realm'],
+ data.value['owner'], 'key')
+ etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
+ user_personal_ssh_keys = [key.value for key in etcd_entry]
+ data.value['metadata']['ssh-keys'] += user_personal_ssh_keys
+ return data.value['metadata'], 200
+
+ @staticmethod
+ def post():
+ return {'message': 'Previous Implementation is deprecated.'}
+ # data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
+ # print(data)
+ # if data:
+ # for k in request.json:
+ # if k not in data.value:
+ # data.value[k] = request.json[k]
+ # if k.endswith("-list"):
+ # data.value[k] = [request.json[k]]
+ # else:
+ # if k.endswith("-list"):
+ # data.value[k].append(request.json[k])
+ # else:
+ # data.value[k] = request.json[k]
+ # etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
+ # data.value, value_in_json=True)
+ # else:
+ # data = {}
+ # for k in request.json:
+ # data[k] = request.json[k]
+ # if k.endswith("-list"):
+ # data[k] = [request.json[k]]
+ # etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
+ # data, value_in_json=True)
+
+
+api.add_resource(Root, '/')
+
+
+def main():
+ app.run(debug=True, host="::", port="80")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/network/README b/ucloud/network/README
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/network/README
rename to ucloud/network/README
diff --git a/archive/uncloud_etcd_based/uncloud/client/__init__.py b/ucloud/network/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/client/__init__.py
rename to ucloud/network/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-bridge.sh b/ucloud/network/create-bridge.sh
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/network/create-bridge.sh
rename to ucloud/network/create-bridge.sh
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-tap.sh b/ucloud/network/create-tap.sh
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/network/create-tap.sh
rename to ucloud/network/create-tap.sh
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-vxlan.sh b/ucloud/network/create-vxlan.sh
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/network/create-vxlan.sh
rename to ucloud/network/create-vxlan.sh
diff --git a/archive/uncloud_etcd_based/uncloud/network/radvd-template.conf b/ucloud/network/radvd-template.conf
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/network/radvd-template.conf
rename to ucloud/network/radvd-template.conf
diff --git a/ucloud/sanity_checks.py b/ucloud/sanity_checks.py
new file mode 100644
index 0000000..143f767
--- /dev/null
+++ b/ucloud/sanity_checks.py
@@ -0,0 +1,33 @@
+import sys
+import subprocess as sp
+
+from os.path import isdir
+from ucloud.config import env_vars
+
+
+def check():
+ #########################
+ # ucloud-image-scanner #
+ #########################
+ if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('IMAGE_DIR')):
+ print("You have set STORAGE_BACKEND to filesystem. So,"
+ "the {} must exists. But, it don't".format(env_vars.get('IMAGE_DIR')))
+ sys.exit(1)
+
+ try:
+ sp.check_output(['which', 'qemu-img'])
+ except Exception:
+ print("qemu-img missing")
+ sys.exit(1)
+
+ ###############
+ # ucloud-host #
+ ###############
+
+ if env_vars.get('STORAGE_BACKEND') == 'filesystem' and not isdir(env_vars.get('VM_DIR')):
+ print("You have set STORAGE_BACKEND to filesystem. So, the vm directory mentioned"
+ " in .env file must exists. But, it don't.")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ check()
\ No newline at end of file
diff --git a/ucloud/scheduler/__init__.py b/ucloud/scheduler/__init__.py
new file mode 100644
index 0000000..95e1be0
--- /dev/null
+++ b/ucloud/scheduler/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/helper.py b/ucloud/scheduler/helper.py
similarity index 50%
rename from archive/uncloud_etcd_based/uncloud/scheduler/helper.py
rename to ucloud/scheduler/helper.py
index 79db322..ba577d6 100755
--- a/archive/uncloud_etcd_based/uncloud/scheduler/helper.py
+++ b/ucloud/scheduler/helper.py
@@ -3,10 +3,10 @@ from functools import reduce
import bitmath
-from uncloud.common.host import HostStatus
-from uncloud.common.request import RequestEntry, RequestType
-from uncloud.common.vm import VMStatus
-from uncloud.common.shared import shared
+from ucloud.common.host import HostStatus
+from ucloud.common.request import RequestEntry, RequestType
+from ucloud.common.vm import VMStatus
+from ucloud.config import vm_pool, host_pool, request_pool, env_vars
def accumulated_specs(vms_specs):
@@ -23,35 +23,17 @@ def remaining_resources(host_specs, vms_specs):
for component in _vms_specs:
if isinstance(_vms_specs[component], str):
- _vms_specs[component] = int(
- bitmath.parse_string_unsafe(
- _vms_specs[component]
- ).to_MB()
- )
+ _vms_specs[component] = int(bitmath.parse_string_unsafe(_vms_specs[component]).to_MB())
elif isinstance(_vms_specs[component], list):
- _vms_specs[component] = map(
- lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
- _vms_specs[component],
- )
- _vms_specs[component] = reduce(
- lambda x, y: x + y, _vms_specs[component], 0
- )
+ _vms_specs[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _vms_specs[component])
+ _vms_specs[component] = reduce(lambda x, y: x + y, _vms_specs[component], 0)
for component in _remaining:
if isinstance(_remaining[component], str):
- _remaining[component] = int(
- bitmath.parse_string_unsafe(
- _remaining[component]
- ).to_MB()
- )
+ _remaining[component] = int(bitmath.parse_string_unsafe(_remaining[component]).to_MB())
elif isinstance(_remaining[component], list):
- _remaining[component] = map(
- lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
- _remaining[component],
- )
- _remaining[component] = reduce(
- lambda x, y: x + y, _remaining[component], 0
- )
+ _remaining[component] = map(lambda x: int(bitmath.parse_string_unsafe(x).to_MB()), _remaining[component])
+ _remaining[component] = reduce(lambda x, y: x + y, _remaining[component], 0)
_remaining.subtract(_vms_specs)
@@ -64,27 +46,23 @@ class NoSuitableHostFound(Exception):
def get_suitable_host(vm_specs, hosts=None):
if hosts is None:
- hosts = shared.host_pool.by_status(HostStatus.alive)
+ hosts = host_pool.by_status(HostStatus.alive)
for host in hosts:
# Filter them by host_name
- vms = shared.vm_pool.by_host(host.key)
+ vms = vm_pool.by_host(host.key)
# Filter them by status
- vms = shared.vm_pool.by_status(VMStatus.running, vms)
+ vms = vm_pool.by_status(VMStatus.running, vms)
running_vms_specs = [vm.specs for vm in vms]
# Accumulate all of their combined specs
- running_vms_accumulated_specs = accumulated_specs(
- running_vms_specs
- )
+ running_vms_accumulated_specs = accumulated_specs(running_vms_specs)
# Find out remaining resources after
# host_specs - already running vm_specs
- remaining = remaining_resources(
- host.specs, running_vms_accumulated_specs
- )
+ remaining = remaining_resources(host.specs, running_vms_accumulated_specs)
# Find out remaining - new_vm_specs
remaining = remaining_resources(remaining, vm_specs)
@@ -97,7 +75,7 @@ def get_suitable_host(vm_specs, hosts=None):
def dead_host_detection():
# Bring out your dead! - Monty Python and the Holy Grail
- hosts = shared.host_pool.by_status(HostStatus.alive)
+ hosts = host_pool.by_status(HostStatus.alive)
dead_hosts_keys = []
for host in hosts:
@@ -111,27 +89,25 @@ def dead_host_detection():
def dead_host_mitigation(dead_hosts_keys):
for host_key in dead_hosts_keys:
- host = shared.host_pool.get(host_key)
+ host = host_pool.get(host_key)
host.declare_dead()
- vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
+ vms_hosted_on_dead_host = vm_pool.by_host(host_key)
for vm in vms_hosted_on_dead_host:
- vm.status = "UNKNOWN"
- shared.vm_pool.put(vm)
- shared.host_pool.put(host)
+ vm.declare_killed()
+ vm_pool.put(vm)
+ host_pool.put(host)
def assign_host(vm):
vm.hostname = get_suitable_host(vm.specs)
- shared.vm_pool.put(vm)
+ vm_pool.put(vm)
- r = RequestEntry.from_scratch(
- type=RequestType.StartVM,
- uuid=vm.uuid,
- hostname=vm.hostname,
- request_prefix=shared.settings["etcd"]["request_prefix"],
- )
- shared.request_pool.put(r)
+ r = RequestEntry.from_scratch(type=RequestType.StartVM,
+ uuid=vm.uuid,
+ hostname=vm.hostname,
+ request_prefix=env_vars.get("REQUEST_PREFIX"))
+ request_pool.put(r)
vm.log.append("VM scheduled for starting")
return vm.hostname
diff --git a/ucloud/scheduler/main.py b/ucloud/scheduler/main.py
new file mode 100755
index 0000000..e2c975a
--- /dev/null
+++ b/ucloud/scheduler/main.py
@@ -0,0 +1,93 @@
+# TODO
+# 1. send an email to an email address defined by env['admin-email']
+# if resources are finished
+# 2. Introduce a status endpoint of the scheduler -
+# maybe expose a prometheus compatible output
+
+from ucloud.common.request import RequestEntry, RequestType
+from ucloud.config import etcd_client
+from ucloud.config import host_pool, request_pool, vm_pool, env_vars
+from .helper import (get_suitable_host, dead_host_mitigation, dead_host_detection,
+ assign_host, NoSuitableHostFound)
+from . import logger
+
+
+def main():
+ logger.info("%s SESSION STARTED %s", '*' * 5, '*' * 5)
+
+ pending_vms = []
+
+ for request_iterator in [
+ etcd_client.get_prefix(env_vars.get('REQUEST_PREFIX'), value_in_json=True),
+ etcd_client.watch_prefix(env_vars.get('REQUEST_PREFIX'), timeout=5, value_in_json=True),
+ ]:
+ for request_event in request_iterator:
+ request_entry = RequestEntry(request_event)
+ # Never Run time critical mechanism inside timeout
+ # mechanism because timeout mechanism only comes
+ # when no other event is happening. It means under
+ # heavy load there would not be a timeout event.
+ if request_entry.type == "TIMEOUT":
+
+ # Detect hosts that are dead and set their status
+ # to "DEAD", and their VMs' status to "KILLED"
+ dead_hosts = dead_host_detection()
+ if dead_hosts:
+ logger.debug("Dead hosts: %s", dead_hosts)
+ dead_host_mitigation(dead_hosts)
+
+ # If there are VMs that weren't assigned a host
+ # because there wasn't a host available which
+ # meets requirement of that VM then we would
+ # create a new ScheduleVM request for that VM
+ # on our behalf.
+ while pending_vms:
+ pending_vm_entry = pending_vms.pop()
+ r = RequestEntry.from_scratch(type="ScheduleVM",
+ uuid=pending_vm_entry.uuid,
+ hostname=pending_vm_entry.hostname,
+ request_prefix=env_vars.get("REQUEST_PREFIX"))
+ request_pool.put(r)
+
+ elif request_entry.type == RequestType.ScheduleVM:
+ logger.debug("%s, %s", request_entry.key, request_entry.value)
+
+ vm_entry = vm_pool.get(request_entry.uuid)
+ if vm_entry is None:
+ logger.info("Trying to act on {} but it is deleted".format(request_entry.uuid))
+ continue
+ etcd_client.client.delete(request_entry.key) # consume Request
+
+ # If the Request is about a VM which is labelled as "migration"
+ # and has a destination
+ if hasattr(request_entry, "migration") and request_entry.migration \
+ and hasattr(request_entry, "destination") and request_entry.destination:
+ try:
+ get_suitable_host(vm_specs=vm_entry.specs,
+ hosts=[host_pool.get(request_entry.destination)])
+ except NoSuitableHostFound:
+ logger.info("Requested destination host doesn't have enough capacity"
+ "to hold %s" % vm_entry.uuid)
+ else:
+ r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
+ uuid=request_entry.uuid,
+ destination=request_entry.destination,
+ request_prefix=env_vars.get("REQUEST_PREFIX"))
+ request_pool.put(r)
+
+ # If the Request is about a VM that just want to get started/created
+ else:
+ # assign_host only returns None when we couldn't be able to assign
+ # a host to a VM because of resource constraints
+ try:
+ assign_host(vm_entry)
+ except NoSuitableHostFound:
+ vm_entry.add_log("Can't schedule VM. No Resource Left.")
+ vm_pool.put(vm_entry)
+
+ pending_vms.append(vm_entry)
+ logger.info("No Resource Left. Emailing admin....")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/configure/__init__.py b/ucloud/scheduler/tests/__init__.py
similarity index 100%
rename from archive/uncloud_etcd_based/uncloud/configure/__init__.py
rename to ucloud/scheduler/tests/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py b/ucloud/scheduler/tests/test_basics.py
similarity index 83%
rename from archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py
rename to ucloud/scheduler/tests/test_basics.py
index defeb23..92b3a83 100755
--- a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py
+++ b/ucloud/scheduler/tests/test_basics.py
@@ -15,7 +15,7 @@ from main import (
main,
)
-from uncloud.config import etcd_client
+from ucloud.config import etcd_client
class TestFunctions(unittest.TestCase):
@@ -70,15 +70,9 @@ class TestFunctions(unittest.TestCase):
"last_heartbeat": datetime.utcnow().isoformat(),
}
with self.client.client.lock("lock"):
- self.client.put(
- f"{self.host_prefix}/1", host1, value_in_json=True
- )
- self.client.put(
- f"{self.host_prefix}/2", host2, value_in_json=True
- )
- self.client.put(
- f"{self.host_prefix}/3", host3, value_in_json=True
- )
+ self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
+ self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
+ self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
def create_vms(self):
vm1 = json.dumps(
@@ -152,17 +146,15 @@ class TestFunctions(unittest.TestCase):
{"cpu": 8, "ram": 32},
]
self.assertEqual(
- accumulated_specs(vms),
- {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
+ accumulated_specs(vms), {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
)
def test_remaining_resources(self):
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
- self.assertEqual(
- remaining_resources(host_specs, vms_specs), resultant_specs
- )
+ self.assertEqual(remaining_resources(host_specs, vms_specs),
+ resultant_specs)
def test_vmpool(self):
self.p.join(1)
@@ -175,12 +167,7 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/1",
{
"owner": "meow",
- "specs": {
- "cpu": 4,
- "ram": 8,
- "hdd": 100,
- "sdd": 256,
- },
+ "specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
"hostname": f"{self.host_prefix}/3",
"status": "SCHEDULED_DEPLOY",
},
@@ -195,12 +182,7 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/7",
{
"owner": "meow",
- "specs": {
- "cpu": 10,
- "ram": 22,
- "hdd": 146,
- "sdd": 0,
- },
+ "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
},
@@ -215,12 +197,7 @@ class TestFunctions(unittest.TestCase):
f"{self.vm_prefix}/7",
{
"owner": "meow",
- "specs": {
- "cpu": 10,
- "ram": 22,
- "hdd": 146,
- "sdd": 0,
- },
+ "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
"hostname": "",
"status": "REQUESTED_NEW",
},
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py b/ucloud/scheduler/tests/test_dead_host_mechanism.py
similarity index 70%
rename from archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py
rename to ucloud/scheduler/tests/test_dead_host_mechanism.py
index 466b9ee..0b403ef 100755
--- a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py
+++ b/ucloud/scheduler/tests/test_dead_host_mechanism.py
@@ -6,7 +6,11 @@ from os.path import dirname
BASE_DIR = dirname(dirname(__file__))
sys.path.insert(0, BASE_DIR)
-from main import dead_host_detection, dead_host_mitigation, config
+from main import (
+ dead_host_detection,
+ dead_host_mitigation,
+ config
+)
class TestDeadHostMechanism(unittest.TestCase):
@@ -48,23 +52,13 @@ class TestDeadHostMechanism(unittest.TestCase):
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
}
with self.client.client.lock("lock"):
- self.client.put(
- f"{self.host_prefix}/1", host1, value_in_json=True
- )
- self.client.put(
- f"{self.host_prefix}/2", host2, value_in_json=True
- )
- self.client.put(
- f"{self.host_prefix}/3", host3, value_in_json=True
- )
- self.client.put(
- f"{self.host_prefix}/4", host4, value_in_json=True
- )
+ self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
+ self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
+ self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
+ self.client.put(f"{self.host_prefix}/4", host4, value_in_json=True)
def test_dead_host_detection(self):
- hosts = self.client.get_prefix(
- self.host_prefix, value_in_json=True
- )
+ hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
deads = dead_host_detection(hosts)
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
return deads
@@ -72,9 +66,7 @@ class TestDeadHostMechanism(unittest.TestCase):
def test_dead_host_mitigation(self):
deads = self.test_dead_host_detection()
dead_host_mitigation(self.client, deads)
- hosts = self.client.get_prefix(
- self.host_prefix, value_in_json=True
- )
+ hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
deads = dead_host_detection(hosts)
self.assertEqual(deads, [])
diff --git a/uncloud/.gitignore b/uncloud/.gitignore
deleted file mode 100644
index b03e0a5..0000000
--- a/uncloud/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-local_settings.py
-ldap_max_uid_file
\ No newline at end of file
diff --git a/uncloud/__init__.py b/uncloud/__init__.py
deleted file mode 100644
index 3ce2d95..0000000
--- a/uncloud/__init__.py
+++ /dev/null
@@ -1,253 +0,0 @@
-from django.utils.translation import gettext_lazy as _
-import decimal
-
-# Define DecimalField properties, used to represent amounts of money.
-AMOUNT_MAX_DIGITS=10
-AMOUNT_DECIMALS=2
-
-decimal.getcontext().prec = AMOUNT_DECIMALS
-
-# http://xml.coverpages.org/country3166.html
-COUNTRIES = (
- ('AD', _('Andorra')),
- ('AE', _('United Arab Emirates')),
- ('AF', _('Afghanistan')),
- ('AG', _('Antigua & Barbuda')),
- ('AI', _('Anguilla')),
- ('AL', _('Albania')),
- ('AM', _('Armenia')),
- ('AN', _('Netherlands Antilles')),
- ('AO', _('Angola')),
- ('AQ', _('Antarctica')),
- ('AR', _('Argentina')),
- ('AS', _('American Samoa')),
- ('AT', _('Austria')),
- ('AU', _('Australia')),
- ('AW', _('Aruba')),
- ('AZ', _('Azerbaijan')),
- ('BA', _('Bosnia and Herzegovina')),
- ('BB', _('Barbados')),
- ('BD', _('Bangladesh')),
- ('BE', _('Belgium')),
- ('BF', _('Burkina Faso')),
- ('BG', _('Bulgaria')),
- ('BH', _('Bahrain')),
- ('BI', _('Burundi')),
- ('BJ', _('Benin')),
- ('BM', _('Bermuda')),
- ('BN', _('Brunei Darussalam')),
- ('BO', _('Bolivia')),
- ('BR', _('Brazil')),
- ('BS', _('Bahama')),
- ('BT', _('Bhutan')),
- ('BV', _('Bouvet Island')),
- ('BW', _('Botswana')),
- ('BY', _('Belarus')),
- ('BZ', _('Belize')),
- ('CA', _('Canada')),
- ('CC', _('Cocos (Keeling) Islands')),
- ('CF', _('Central African Republic')),
- ('CG', _('Congo')),
- ('CH', _('Switzerland')),
- ('CI', _('Ivory Coast')),
- ('CK', _('Cook Iislands')),
- ('CL', _('Chile')),
- ('CM', _('Cameroon')),
- ('CN', _('China')),
- ('CO', _('Colombia')),
- ('CR', _('Costa Rica')),
- ('CU', _('Cuba')),
- ('CV', _('Cape Verde')),
- ('CX', _('Christmas Island')),
- ('CY', _('Cyprus')),
- ('CZ', _('Czech Republic')),
- ('DE', _('Germany')),
- ('DJ', _('Djibouti')),
- ('DK', _('Denmark')),
- ('DM', _('Dominica')),
- ('DO', _('Dominican Republic')),
- ('DZ', _('Algeria')),
- ('EC', _('Ecuador')),
- ('EE', _('Estonia')),
- ('EG', _('Egypt')),
- ('EH', _('Western Sahara')),
- ('ER', _('Eritrea')),
- ('ES', _('Spain')),
- ('ET', _('Ethiopia')),
- ('FI', _('Finland')),
- ('FJ', _('Fiji')),
- ('FK', _('Falkland Islands (Malvinas)')),
- ('FM', _('Micronesia')),
- ('FO', _('Faroe Islands')),
- ('FR', _('France')),
- ('FX', _('France, Metropolitan')),
- ('GA', _('Gabon')),
- ('GB', _('United Kingdom (Great Britain)')),
- ('GD', _('Grenada')),
- ('GE', _('Georgia')),
- ('GF', _('French Guiana')),
- ('GH', _('Ghana')),
- ('GI', _('Gibraltar')),
- ('GL', _('Greenland')),
- ('GM', _('Gambia')),
- ('GN', _('Guinea')),
- ('GP', _('Guadeloupe')),
- ('GQ', _('Equatorial Guinea')),
- ('GR', _('Greece')),
- ('GS', _('South Georgia and the South Sandwich Islands')),
- ('GT', _('Guatemala')),
- ('GU', _('Guam')),
- ('GW', _('Guinea-Bissau')),
- ('GY', _('Guyana')),
- ('HK', _('Hong Kong')),
- ('HM', _('Heard & McDonald Islands')),
- ('HN', _('Honduras')),
- ('HR', _('Croatia')),
- ('HT', _('Haiti')),
- ('HU', _('Hungary')),
- ('ID', _('Indonesia')),
- ('IE', _('Ireland')),
- ('IL', _('Israel')),
- ('IN', _('India')),
- ('IO', _('British Indian Ocean Territory')),
- ('IQ', _('Iraq')),
- ('IR', _('Islamic Republic of Iran')),
- ('IS', _('Iceland')),
- ('IT', _('Italy')),
- ('JM', _('Jamaica')),
- ('JO', _('Jordan')),
- ('JP', _('Japan')),
- ('KE', _('Kenya')),
- ('KG', _('Kyrgyzstan')),
- ('KH', _('Cambodia')),
- ('KI', _('Kiribati')),
- ('KM', _('Comoros')),
- ('KN', _('St. Kitts and Nevis')),
- ('KP', _('Korea, Democratic People\'s Republic of')),
- ('KR', _('Korea, Republic of')),
- ('KW', _('Kuwait')),
- ('KY', _('Cayman Islands')),
- ('KZ', _('Kazakhstan')),
- ('LA', _('Lao People\'s Democratic Republic')),
- ('LB', _('Lebanon')),
- ('LC', _('Saint Lucia')),
- ('LI', _('Liechtenstein')),
- ('LK', _('Sri Lanka')),
- ('LR', _('Liberia')),
- ('LS', _('Lesotho')),
- ('LT', _('Lithuania')),
- ('LU', _('Luxembourg')),
- ('LV', _('Latvia')),
- ('LY', _('Libyan Arab Jamahiriya')),
- ('MA', _('Morocco')),
- ('MC', _('Monaco')),
- ('MD', _('Moldova, Republic of')),
- ('MG', _('Madagascar')),
- ('MH', _('Marshall Islands')),
- ('ML', _('Mali')),
- ('MN', _('Mongolia')),
- ('MM', _('Myanmar')),
- ('MO', _('Macau')),
- ('MP', _('Northern Mariana Islands')),
- ('MQ', _('Martinique')),
- ('MR', _('Mauritania')),
- ('MS', _('Monserrat')),
- ('MT', _('Malta')),
- ('MU', _('Mauritius')),
- ('MV', _('Maldives')),
- ('MW', _('Malawi')),
- ('MX', _('Mexico')),
- ('MY', _('Malaysia')),
- ('MZ', _('Mozambique')),
- ('NA', _('Namibia')),
- ('NC', _('New Caledonia')),
- ('NE', _('Niger')),
- ('NF', _('Norfolk Island')),
- ('NG', _('Nigeria')),
- ('NI', _('Nicaragua')),
- ('NL', _('Netherlands')),
- ('NO', _('Norway')),
- ('NP', _('Nepal')),
- ('NR', _('Nauru')),
- ('NU', _('Niue')),
- ('NZ', _('New Zealand')),
- ('OM', _('Oman')),
- ('PA', _('Panama')),
- ('PE', _('Peru')),
- ('PF', _('French Polynesia')),
- ('PG', _('Papua New Guinea')),
- ('PH', _('Philippines')),
- ('PK', _('Pakistan')),
- ('PL', _('Poland')),
- ('PM', _('St. Pierre & Miquelon')),
- ('PN', _('Pitcairn')),
- ('PR', _('Puerto Rico')),
- ('PT', _('Portugal')),
- ('PW', _('Palau')),
- ('PY', _('Paraguay')),
- ('QA', _('Qatar')),
- ('RE', _('Reunion')),
- ('RO', _('Romania')),
- ('RU', _('Russian Federation')),
- ('RW', _('Rwanda')),
- ('SA', _('Saudi Arabia')),
- ('SB', _('Solomon Islands')),
- ('SC', _('Seychelles')),
- ('SD', _('Sudan')),
- ('SE', _('Sweden')),
- ('SG', _('Singapore')),
- ('SH', _('St. Helena')),
- ('SI', _('Slovenia')),
- ('SJ', _('Svalbard & Jan Mayen Islands')),
- ('SK', _('Slovakia')),
- ('SL', _('Sierra Leone')),
- ('SM', _('San Marino')),
- ('SN', _('Senegal')),
- ('SO', _('Somalia')),
- ('SR', _('Suriname')),
- ('ST', _('Sao Tome & Principe')),
- ('SV', _('El Salvador')),
- ('SY', _('Syrian Arab Republic')),
- ('SZ', _('Swaziland')),
- ('TC', _('Turks & Caicos Islands')),
- ('TD', _('Chad')),
- ('TF', _('French Southern Territories')),
- ('TG', _('Togo')),
- ('TH', _('Thailand')),
- ('TJ', _('Tajikistan')),
- ('TK', _('Tokelau')),
- ('TM', _('Turkmenistan')),
- ('TN', _('Tunisia')),
- ('TO', _('Tonga')),
- ('TP', _('East Timor')),
- ('TR', _('Turkey')),
- ('TT', _('Trinidad & Tobago')),
- ('TV', _('Tuvalu')),
- ('TW', _('Taiwan, Province of China')),
- ('TZ', _('Tanzania, United Republic of')),
- ('UA', _('Ukraine')),
- ('UG', _('Uganda')),
- ('UM', _('United States Minor Outlying Islands')),
- ('US', _('United States of America')),
- ('UY', _('Uruguay')),
- ('UZ', _('Uzbekistan')),
- ('VA', _('Vatican City State (Holy See)')),
- ('VC', _('St. Vincent & the Grenadines')),
- ('VE', _('Venezuela')),
- ('VG', _('British Virgin Islands')),
- ('VI', _('United States Virgin Islands')),
- ('VN', _('Viet Nam')),
- ('VU', _('Vanuatu')),
- ('WF', _('Wallis & Futuna Islands')),
- ('WS', _('Samoa')),
- ('YE', _('Yemen')),
- ('YT', _('Mayotte')),
- ('YU', _('Yugoslavia')),
- ('ZA', _('South Africa')),
- ('ZM', _('Zambia')),
- ('ZR', _('Zaire')),
- ('ZW', _('Zimbabwe')),
-)
-
-
-__all__ = ()
diff --git a/uncloud/admin.py b/uncloud/admin.py
deleted file mode 100644
index 38f8cce..0000000
--- a/uncloud/admin.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from django.contrib import admin
-
-from .models import *
-
-for m in [ UncloudProvider, UncloudNetwork ]:
- admin.site.register(m)
diff --git a/uncloud/asgi.py b/uncloud/asgi.py
deleted file mode 100644
index 2b5a7a3..0000000
--- a/uncloud/asgi.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""
-ASGI config for uncloud project.
-
-It exposes the ASGI callable as a module-level variable named ``application``.
-
-For more information on this file, see
-https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
-"""
-
-import os
-
-from django.core.asgi import get_asgi_application
-
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uncloud.settings')
-
-application = get_asgi_application()
diff --git a/uncloud/forms.py b/uncloud/forms.py
deleted file mode 100644
index 153a49a..0000000
--- a/uncloud/forms.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from django import forms
-from django.contrib.auth.models import User
-
-
-class UserDeleteForm(forms.ModelForm):
- class Meta:
- model = User
- fields = []
diff --git a/uncloud/management/commands/db-add-defaults.py b/uncloud/management/commands/db-add-defaults.py
deleted file mode 100644
index 605c8f5..0000000
--- a/uncloud/management/commands/db-add-defaults.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import random
-import string
-
-from django.core.management.base import BaseCommand
-from django.core.exceptions import ObjectDoesNotExist
-from django.contrib.auth import get_user_model
-from django.conf import settings
-
-from uncloud_pay.models import BillingAddress, RecurringPeriod, Product
-from uncloud.models import UncloudProvider, UncloudNetwork
-
-
-class Command(BaseCommand):
- help = 'Add standard uncloud values'
-
- def add_arguments(self, parser):
- pass
-
- def handle(self, *args, **options):
- # Order matters, objects can be dependent on each other
-
- admin_username="uncloud-admin"
- pw_length = 32
-
- # Only set password if the user did not exist before
- try:
- admin_user = get_user_model().objects.get(username=settings.UNCLOUD_ADMIN_NAME)
- except ObjectDoesNotExist:
- random_password = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(pw_length))
-
- admin_user = get_user_model().objects.create_user(username=settings.UNCLOUD_ADMIN_NAME, password=random_password)
- admin_user.is_superuser=True
- admin_user.is_staff=True
- admin_user.save()
-
- print(f"Created admin user '{admin_username}' with password '{random_password}'")
-
- BillingAddress.populate_db_defaults()
- RecurringPeriod.populate_db_defaults()
- Product.populate_db_defaults()
-
- UncloudNetwork.populate_db_defaults()
- UncloudProvider.populate_db_defaults()
diff --git a/uncloud/management/commands/uncloud.py b/uncloud/management/commands/uncloud.py
deleted file mode 100644
index bd47c6b..0000000
--- a/uncloud/management/commands/uncloud.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import sys
-from datetime import datetime
-
-from django.core.management.base import BaseCommand
-
-from django.contrib.auth import get_user_model
-
-from opennebula.models import VM as VMModel
-from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct, VMCluster
-
-import logging
-log = logging.getLogger(__name__)
-
-
-class Command(BaseCommand):
- help = 'General uncloud commands'
-
- def add_arguments(self, parser):
- parser.add_argument('--bootstrap', action='store_true', help='Bootstrap a typical uncloud installation')
-
- def handle(self, *args, **options):
-
- if options['bootstrap']:
- self.bootstrap()
-
- def bootstrap(self):
- default_cluster = VMCluster.objects.get_or_create(name="default")
-# local_host =
diff --git a/uncloud/migrations/0001_initial.py b/uncloud/migrations/0001_initial.py
deleted file mode 100644
index 10d1144..0000000
--- a/uncloud/migrations/0001_initial.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Generated by Django 3.1 on 2020-12-13 10:38
-
-import django.core.validators
-from django.db import migrations, models
-import django.db.models.deletion
-import uncloud.models
-
-
-class Migration(migrations.Migration):
-
- initial = True
-
- dependencies = [
- ]
-
- operations = [
- migrations.CreateModel(
- name='UncloudNetwork',
- fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('network_address', models.GenericIPAddressField(unique=True)),
- ('network_mask', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(128)])),
- ('description', models.CharField(max_length=256)),
- ],
- ),
- migrations.CreateModel(
- name='UncloudProvider',
- fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('full_name', models.CharField(max_length=256)),
- ('organization', models.CharField(blank=True, max_length=256, null=True)),
- ('street', models.CharField(max_length=256)),
- ('city', models.CharField(max_length=256)),
- ('postal_code', models.CharField(max_length=64)),
- ('country', uncloud.models.CountryField(blank=True, choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua & Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AN', 'Netherlands Antilles'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia'), ('BR', 'Brazil'), ('BS', 'Bahama'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', 'Ivory Coast'), ('CK', 'Cook Iislands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cape Verde'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('FX', 'France, Metropolitan'), ('GA', 'Gabon'), ('GB', 'United Kingdom (Great Britain)'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard & McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Islamic Republic of Iran'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'St. Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libyan Arab Jamahiriya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('ML', 'Mali'), ('MN', 'Mongolia'), ('MM', 'Myanmar'), ('MO', 'Macau'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Monserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'St. Pierre & Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'St. Helena'), ('SI', 'Slovenia'), ('SJ', 'Svalbard & Jan Mayen Islands'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('ST', 'Sao Tome & Principe'), ('SV', 'El Salvador'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks & Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TP', 'East Timor'), ('TR', 'Turkey'), ('TT', 'Trinidad & Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States of America'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Vatican City State (Holy See)'), ('VC', 'St. Vincent & the Grenadines'), ('VE', 'Venezuela'), ('VG', 'British Virgin Islands'), ('VI', 'United States Virgin Islands'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis & Futuna Islands'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('YU', 'Yugoslavia'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZR', 'Zaire'), ('ZW', 'Zimbabwe')], default='CH', max_length=2)),
- ('starting_date', models.DateField()),
- ('ending_date', models.DateField(blank=True, null=True)),
- ('billing_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudproviderbill', to='uncloud.uncloudnetwork')),
- ('coupon_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudprovidercoupon', to='uncloud.uncloudnetwork')),
- ('referral_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudproviderreferral', to='uncloud.uncloudnetwork')),
- ],
- options={
- 'abstract': False,
- },
- ),
- ]
diff --git a/uncloud/migrations/0002_uncloudtasks.py b/uncloud/migrations/0002_uncloudtasks.py
deleted file mode 100644
index 9c69606..0000000
--- a/uncloud/migrations/0002_uncloudtasks.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Generated by Django 3.1 on 2020-12-20 17:16
-
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud', '0001_initial'),
- ]
-
- operations = [
- migrations.CreateModel(
- name='UncloudTasks',
- fields=[
- ('task_id', models.UUIDField(primary_key=True, serialize=False)),
- ],
- ),
- ]
diff --git a/uncloud/migrations/0003_auto_20201220_1728.py b/uncloud/migrations/0003_auto_20201220_1728.py
deleted file mode 100644
index 2ec0eec..0000000
--- a/uncloud/migrations/0003_auto_20201220_1728.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Generated by Django 3.1 on 2020-12-20 17:28
-
-from django.db import migrations
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud', '0002_uncloudtasks'),
- ]
-
- operations = [
- migrations.RenameModel(
- old_name='UncloudTasks',
- new_name='UncloudTask',
- ),
- ]
diff --git a/uncloud/migrations/0004_auto_20210101_1308.py b/uncloud/migrations/0004_auto_20210101_1308.py
deleted file mode 100644
index 8385b16..0000000
--- a/uncloud/migrations/0004_auto_20210101_1308.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Generated by Django 3.1 on 2021-01-01 13:08
-
-from django.db import migrations
-import uncloud.models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud', '0003_auto_20201220_1728'),
- ]
-
- operations = [
- migrations.AlterField(
- model_name='uncloudprovider',
- name='country',
- field=uncloud.models.CountryField(choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua & Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AN', 'Netherlands Antilles'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia'), ('BR', 'Brazil'), ('BS', 'Bahama'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', 'Ivory Coast'), ('CK', 'Cook Iislands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cape Verde'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('FX', 'France, Metropolitan'), ('GA', 'Gabon'), ('GB', 'United Kingdom (Great Britain)'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard & McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Islamic Republic of Iran'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'St. Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libyan Arab Jamahiriya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('ML', 'Mali'), ('MN', 'Mongolia'), ('MM', 'Myanmar'), ('MO', 'Macau'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Monserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'St. Pierre & Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'St. Helena'), ('SI', 'Slovenia'), ('SJ', 'Svalbard & Jan Mayen Islands'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('ST', 'Sao Tome & Principe'), ('SV', 'El Salvador'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks & Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TP', 'East Timor'), ('TR', 'Turkey'), ('TT', 'Trinidad & Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States of America'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Vatican City State (Holy See)'), ('VC', 'St. Vincent & the Grenadines'), ('VE', 'Venezuela'), ('VG', 'British Virgin Islands'), ('VI', 'United States Virgin Islands'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis & Futuna Islands'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('YU', 'Yugoslavia'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZR', 'Zaire'), ('ZW', 'Zimbabwe')], default='CH', max_length=2),
- ),
- ]
diff --git a/uncloud/migrations/0005_delete_uncloudtask.py b/uncloud/migrations/0005_delete_uncloudtask.py
deleted file mode 100644
index 6d9b095..0000000
--- a/uncloud/migrations/0005_delete_uncloudtask.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Generated by Django 3.2.4 on 2021-07-07 15:11
-
-from django.db import migrations
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud', '0004_auto_20210101_1308'),
- ]
-
- operations = [
- migrations.DeleteModel(
- name='UncloudTask',
- ),
- ]
diff --git a/uncloud/migrations/__init__.py b/uncloud/migrations/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/uncloud/models.py b/uncloud/models.py
deleted file mode 100644
index c2b3cf9..0000000
--- a/uncloud/models.py
+++ /dev/null
@@ -1,209 +0,0 @@
-from django.db import models
-from django.db.models import JSONField, Q
-from django.utils import timezone
-from django.utils.translation import gettext_lazy as _
-from django.core.validators import MinValueValidator, MaxValueValidator
-from django.core.exceptions import FieldError
-
-from uncloud import COUNTRIES
-from .selectors import filter_for_when
-
-class UncloudModel(models.Model):
- """
- This class extends the standard model with an
- extra_data field that can be used to include public,
- but internal information.
-
- For instance if you migrate from an existing virtualisation
- framework to uncloud.
-
- The extra_data attribute should be considered a hack and whenever
- data is necessary for running uncloud, it should **not** be stored
- in there.
-
- """
-
- extra_data = JSONField(editable=False, blank=True, null=True)
-
- class Meta:
- abstract = True
-
-# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
-class UncloudStatus(models.TextChoices):
- PENDING = 'PENDING', _('Pending')
- AWAITING_PAYMENT = 'AWAITING_PAYMENT', _('Awaiting payment')
- BEING_CREATED = 'BEING_CREATED', _('Being created')
- SCHEDULED = 'SCHEDULED', _('Scheduled') # resource selected, waiting for dispatching
- ACTIVE = 'ACTIVE', _('Active')
- MODIFYING = 'MODIFYING', _('Modifying') # Resource is being changed
- DELETED = 'DELETED', _('Deleted') # Resource has been deleted
- DISABLED = 'DISABLED', _('Disabled') # Is usable, but cannot be used for new things
- UNUSABLE = 'UNUSABLE', _('Unusable'), # Has some kind of error
-
-
-
-###
-# General address handling
-class CountryField(models.CharField):
- def __init__(self, *args, **kwargs):
- kwargs.setdefault('choices', COUNTRIES)
- kwargs.setdefault('default', 'CH')
- kwargs.setdefault('max_length', 2)
-
- super().__init__(*args, **kwargs)
-
- def get_internal_type(self):
- return "CharField"
-
-
-class UncloudAddress(models.Model):
- full_name = models.CharField(max_length=256, null=False)
- organization = models.CharField(max_length=256, blank=True, null=True)
- street = models.CharField(max_length=256, null=False)
- city = models.CharField(max_length=256, null=False)
- postal_code = models.CharField(max_length=64)
- country = CountryField(blank=False, null=False)
-
- class Meta:
- abstract = True
-
-
-class UncloudValidTimeFrame(models.Model):
- """
- A model that allows to limit validity of something to a certain
- time frame. Used for versioning basically.
-
- Logic:
-
- """
-
- class Meta:
- abstract = True
-
- constraints = [
- models.UniqueConstraint(fields=['owner'],
- condition=models.Q(active=True),
- name='one_active_card_per_user')
- ]
-
-
- valid_from = models.DateTimeField(default=timezone.now, null=True, blank=True)
- valid_to = models.DateTimeField(null=True, blank=True)
-
- @classmethod
- def get_current(cls, *args, **kwargs):
- now = timezone.now()
-
- # With both given
- cls.objects.filter(valid_from__lte=now,
- valid_to__gte=now)
-
- # With to missing
- cls.objects.filter(valid_from__lte=now,
- valid_to__isnull=true)
-
- # With from missing
- cls.objects.filter(valid_from__isnull=true,
- valid_to__gte=now)
-
- # Both missing
- cls.objects.filter(valid_from__isnull=true,
- valid_to__gte=now)
-
-
-
-
-
-###
-# UncloudNetworks are used as identifiers - such they are a base of uncloud
-
-class UncloudNetwork(models.Model):
- """
- Storing IP networks
- """
-
- network_address = models.GenericIPAddressField(null=False, unique=True)
- network_mask = models.IntegerField(null=False,
- validators=[MinValueValidator(0),
- MaxValueValidator(128)]
- )
-
- description = models.CharField(max_length=256)
-
- @classmethod
- def populate_db_defaults(cls):
- for net, desc in [
- ( "2a0a:e5c0:11::", "uncloud Billing" ),
- ( "2a0a:e5c0:11:1::", "uncloud Referral" ),
- ( "2a0a:e5c0:11:2::", "uncloud Coupon" )
- ]:
- obj, created = cls.objects.get_or_create(network_address=net,
- defaults= {
- 'network_mask': 64,
- 'description': desc
- }
- )
-
-
- def save(self, *args, **kwargs):
- if not ':' in self.network_address and self.network_mask > 32:
- raise FieldError("Mask cannot exceed 32 for IPv4")
-
- super().save(*args, **kwargs)
-
-
- def __str__(self):
- return f"{self.network_address}/{self.network_mask} {self.description}"
-
-###
-# Who is running / providing this instance of uncloud?
-
-class UncloudProvider(UncloudAddress):
- """
- A class resembling who is running this uncloud instance.
- This might change over time so we allow starting/ending dates
-
- This also defines the taxation rules.
-
- starting/ending date define from when to when this is valid. This way
- we can model address changes and have it correct in the bills.
- """
-
- # Meta:
- # FIXMe: only allow non overlapping time frames -- how to define this as a constraint?
- starting_date = models.DateField()
- ending_date = models.DateField(blank=True, null=True)
-
- billing_network = models.ForeignKey(UncloudNetwork, related_name="uncloudproviderbill", on_delete=models.CASCADE)
- referral_network = models.ForeignKey(UncloudNetwork, related_name="uncloudproviderreferral", on_delete=models.CASCADE)
- coupon_network = models.ForeignKey(UncloudNetwork, related_name="uncloudprovidercoupon", on_delete=models.CASCADE)
-
-
- @classmethod
- def get_provider(cls, when=None):
- """
- Find active provide at a certain time - if there was any
- """
-
-
- return cls.objects.get(Q(starting_date__gte=when, ending_date__lte=when) |
- Q(starting_date__gte=when, ending_date__isnull=True))
-
-
- @classmethod
- def populate_db_defaults(cls):
- obj, created = cls.objects.get_or_create(full_name="ungleich glarus ag",
- street="Bahnhofstrasse 1",
- postal_code="8783",
- city="Linthal",
- country="CH",
- starting_date=timezone.now(),
- billing_network=UncloudNetwork.objects.get(description="uncloud Billing"),
- referral_network=UncloudNetwork.objects.get(description="uncloud Referral"),
- coupon_network=UncloudNetwork.objects.get(description="uncloud Coupon")
- )
-
-
- def __str__(self):
- return f"{self.full_name} {self.country}"
-
diff --git a/uncloud/selectors.py b/uncloud/selectors.py
deleted file mode 100644
index 52b8548..0000000
--- a/uncloud/selectors.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from django.db.models import Q
-from django.utils import timezone
-
-def filter_for_when(queryset, when=None):
- """
- Return a filtered queryset which is valid for the given date
-
- Logic:
-
- Look for entries that have a starting date before when
- and either
- - No ending date
- - Ending date after "when"
-
- Returns a queryset, you'll neet to apply .first() or similar on it
-
- """
-
- if not when:
- when = timezone.now()
-
- return queryset.filter(starting_date__lte=when).filter(Q(ending_date__gte=when) |
- Q(ending_date__isnull=True))
diff --git a/uncloud/settings.py b/uncloud/settings.py
deleted file mode 100644
index be6cc11..0000000
--- a/uncloud/settings.py
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-Django settings for uncloud project.
-
-Generated by 'django-admin startproject' using Django 3.0.3.
-
-For more information on this file, see
-https://docs.djangoproject.com/en/3.0/topics/settings/
-
-For the full list of settings and their values, see
-https://docs.djangoproject.com/en/3.0/ref/settings/
-"""
-
-import os
-import re
-import ldap
-import sys
-
-from django.core.management.utils import get_random_secret_key
-from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion
-
-
-LOGGING = {}
-
-# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-sys.modules['fontawesome_free'] = __import__('fontawesome-free')
-
-# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
-DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.sqlite3',
- 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
- }
-}
-
-
-# Quick-start development settings - unsuitable for production
-# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
-
-# SECURITY WARNING: don't run with debug turned on in production!
-DEBUG = True
-
-SITE_ID = 1
-
-# Application definition
-
-INSTALLED_APPS = [
- 'django.contrib.admin',
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.messages',
- 'django.contrib.sites',
- 'allauth',
- 'allauth.account',
- 'allauth.socialaccount',
- 'django.contrib.staticfiles',
- 'django_extensions',
- 'rest_framework',
- 'bootstrap5',
- 'django_q',
- 'fontawesome_free',
- 'uncloud',
- 'uncloud_pay',
- 'uncloud_auth',
- 'uncloud_net',
- 'uncloud_storage',
- 'uncloud_vm',
- 'uncloud_service',
- 'opennebula',
- 'matrixhosting',
-]
-
-MIDDLEWARE = [
- 'django.middleware.security.SecurityMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.common.CommonMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.messages.middleware.MessageMiddleware',
- 'django.middleware.clickjacking.XFrameOptionsMiddleware',
-]
-
-ROOT_URLCONF = 'uncloud.urls'
-
-TEMPLATES = [
- {
- 'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'DIRS': [],
- 'APP_DIRS': True,
- 'OPTIONS': {
- 'context_processors': [
- 'django.template.context_processors.debug',
- 'django.template.context_processors.request',
- 'django.contrib.auth.context_processors.auth',
- 'django.contrib.messages.context_processors.messages',
- ],
- },
- },
-]
-
-WSGI_APPLICATION = 'uncloud.wsgi.application'
-
-
-# Password validation
-# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
-
-AUTH_PASSWORD_VALIDATORS = [
- {
- 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
- },
- {
- 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
- },
- {
- 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
- },
- {
- 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
- },
-]
-###############################################################################
-# Authall Settings
-ACCOUNT_AUTHENTICATION_METHOD = "username"
-ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1
-ACCOUNT_EMAIL_REQUIRED = False
-ACCOUNT_EMAIL_VERIFICATION = "optional"
-ACCOUNT_UNIQUE_EMAIL = False
-################################################################################
-# AUTH/LDAP
-
-AUTH_LDAP_SERVER_URI = ""
-AUTH_LDAP_BIND_DN = ""
-AUTH_LDAP_BIND_PASSWORD = ""
-AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=example,dc=com",
- ldap.SCOPE_SUBTREE,
- "(uid=%(user)s)")
-
-AUTH_LDAP_USER_ATTR_MAP = {
- "first_name": "givenName",
- "last_name": "sn",
- "email": "mail"
-}
-
-################################################################################
-# AUTH/Django
-AUTHENTICATION_BACKENDS = [
- "django_auth_ldap.backend.LDAPBackend",
- "django.contrib.auth.backends.ModelBackend",
- 'allauth.account.auth_backends.AuthenticationBackend',
-]
-
-AUTH_USER_MODEL = 'uncloud_auth.User'
-
-
-################################################################################
-# AUTH/REST
-REST_FRAMEWORK = {
- 'DEFAULT_AUTHENTICATION_CLASSES': [
- 'rest_framework.authentication.BasicAuthentication',
- 'rest_framework.authentication.SessionAuthentication',
- ]
-}
-
-
-# Internationalization
-# https://docs.djangoproject.com/en/3.0/topics/i18n/
-
-LANGUAGE_CODE = 'en-us'
-
-TIME_ZONE = 'UTC'
-
-USE_I18N = True
-
-USE_L10N = True
-
-USE_TZ = True
-
-
-# Static files (CSS, JavaScript, Images)
-# https://docs.djangoproject.com/en/3.0/howto/static-files/
-STATIC_URL = '/static/'
-STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static") ]
-STATICFILES_FINDERS = [
- 'django.contrib.staticfiles.finders.FileSystemFinder',
- 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
-]
-
-#VM Deployment TEMPLATE
-GITLAB_SERVER = 'https://code.ungleich.ch'
-GITLAB_OAUTH_TOKEN = ''
-GITLAB_PROJECT_ID = 388
-GITLAB_AUTHOR_EMAIL = ''
-GITLAB_AUTHOR_NAME = ''
-GITLAB_YAML_DIR = ''
-
-# XML-RPC interface of opennebula
-OPENNEBULA_URL = 'https://opennebula.example.com:2634/RPC2'
-
-# user:pass for accessing opennebula
-OPENNEBULA_USER_PASS = 'user:password'
-
-# Stripe (Credit Card payments)
-STRIPE_KEY=""
-STRIPE_PUBLIC_KEY=""
-BILL_PAYMENT_DELAY = 0
-# The django secret key
-SECRET_KEY=get_random_secret_key()
-
-ALLOWED_HOSTS = []
-
-# required for hardcopy / pdf rendering: https://github.com/loftylabs/django-hardcopy
-CHROME_PATH = '/usr/bin/chromium-browser'
-
-# Username that is created by default and owns the configuration objects
-UNCLOUD_ADMIN_NAME = "uncloud-admin"
-
-LOGIN_REDIRECT_URL = '/'
-LOGOUT_REDIRECT_URL = '/'
-
-# replace these in local_settings.py
-AUTH_LDAP_SERVER_URI = "ldaps://ldap1.example.com,ldaps://ldap2.example.com"
-AUTH_LDAP_BIND_DN="uid=django,ou=system,dc=example,dc=com"
-AUTH_LDAP_BIND_PASSWORD="a very secure ldap password"
-AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=example,dc=com",
- ldap.SCOPE_SUBTREE,
- "(uid=%(user)s)")
-
-# where to create customers
-LDAP_CUSTOMER_DN="ou=customer,dc=example,dc=com"
-
-EMAIL_USE_TLS = True
-EMAIL_HOST = ''
-EMAIL_PORT = 465
-EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = ''
-EMAIL_HOST_PASSWORD = ''
-DEFAULT_FROM_EMAIL = ''
-RENEWAL_FROM_EMAIL = 'test@example.com'
-# Should be removed in production
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-
-##############
-# Jobs
-Q_CLUSTER = {
- 'name': 'matrixhosting',
- 'workers': 1,
- 'recycle': 500,
- 'timeout': 60,
- 'compress': True,
- 'cpu_affinity': 1,
- 'save_limit': 250,
- 'queue_limit': 500,
- 'label': 'Django Q',
- 'redis': {
- 'host': '127.0.0.1',
- 'port': 6379,
- 'db': 0, }
-}
-
-# Overwrite settings with local settings, if existing
-try:
- from uncloud.local_settings import *
-except (ModuleNotFoundError, ImportError):
- pass
diff --git a/uncloud/static/uncloud/uncloud.css b/uncloud/static/uncloud/uncloud.css
deleted file mode 100644
index 51d93ef..0000000
--- a/uncloud/static/uncloud/uncloud.css
+++ /dev/null
@@ -1,4 +0,0 @@
-#content {
- width: 400px;
- margin: auto;
-}
diff --git a/uncloud/templates/uncloud/base.html b/uncloud/templates/uncloud/base.html
deleted file mode 100644
index cbf0686..0000000
--- a/uncloud/templates/uncloud/base.html
+++ /dev/null
@@ -1,29 +0,0 @@
-{% extends 'bootstrap5/bootstrap5.html' %}
-{% block bootstrap5_before_content %}
-
-
-{% endblock %}
diff --git a/uncloud/templates/uncloud/index.html b/uncloud/templates/uncloud/index.html
deleted file mode 100644
index b8b5828..0000000
--- a/uncloud/templates/uncloud/index.html
+++ /dev/null
@@ -1,170 +0,0 @@
-{% extends 'uncloud/base.html' %}
-{% block title %}Welcome to uncloud [beta]{% endblock %}
-
-{% block bootstrap5_content %}
-
-
-
-
-
Welcome to uncloud [beta]
-
-
-
-
-
About uncloud
-
-
- Welcome to uncloud, the Open Source cloud management
- system by ungleich.
- It is an API driven system with
- some convience views provided by
- the Django Rest
- Framework. You can
- freely access
- the source code of uncloud.
- This is a BETA service. As such, some
- functionality might not be very sophisticated.
-
-
-
-
-
Getting started
-
-
uncloud is designed to be as easy as possible to use. However,
- there are some "real world" requirements that need to be met to
- start using uncloud:
-
-
If you have forgotten your password or other issues with
- logging in, you can contact the ungleich support
- via support at ungleich.ch.
-
-
Secondy you will need to
- create a billing
- address. This is required for determining the correct
- tax.
-
Next you will need to
- register a credit card
- from which payments can be made. Your credit card will not
- be charged without your consent.
-
-
-
-
-
Introduction to uncloud concepts
-
-
We plan to offer many services on uncloud ranging from
- for free, for a small amount or regular charges. As transfer
- fees are a major challenge for our business, we based uncloud
- on the pre-paid account model. Which means
- that you can charge your account and then use your balance to
- pay for product usage.
-
-
-
-
-
Credit cards
-
-
- Credit cards are registered with stripe. We only save a the
- last 4 digits and the expiry date of the card to make
- identification for you easier.
-
-
-
Register a credit card
- (this is required to be done via Javascript so that we never see
- your credit card, but it is sent directly to stripe)
-
You can list your
- credit cards
- By default the first credit card is used for charging
- ("active: true") and later added cards will not be
- used. To change this, first disable the active flag and
- then set it on another credit card.
-
-
-
-
Billing Address, Payments and Balance
-
-
Billing addresses behave similar to credit cards: you can
- have many of them, but only one can be active. The active
- billing address is taken for creating new orders.
-
-
In uncloud we use the pre-paid model: you can add money to
- your account via payments. You can always check your
- balance. The products you use will automatically be charged from
- your existing balance.
-
-
-
In the future you will be able opt-in to automatically
- recharging your account at a certain time frame or whenever it
- is below a certain amount
- By submitting I authorise to send instructions to
- the financial institution that issued my card to take
- payments from my card account in accordance with the
- terms of my agreement with you.
-