`_ tool.
+
+Network configuration
+~~~~~~~~~~~~~~~~~~~~~
+All VMs in ucloud are required to support IPv6. The primary network
+configuration is always done using SLAAC. A VM thus needs only to be
+configured to
+
+* accept router advertisements on all network interfaces
+* use the router advertisements to configure the network interfaces
+* accept the DNS entries from the router advertisements
+
+
+Configuring SSH keys
+~~~~~~~~~~~~~~~~~~~~
+
+To be able to access the VM, ucloud support provisioning SSH keys.
+
+To accept ssh keys in your VM, request the URL
+*http://metadata/ssh_keys*. Add the content to the appropriate user's
+**authorized_keys** file. Below you find sample code to accomplish
+this task:
+
+.. code-block:: sh
+
+ tmp=$(mktemp)
+ curl -s http://metadata/ssk_keys > "$tmp"
+ touch ~/.ssh/authorized_keys # ensure it exists
+ cat ~/.ssh/authorized_keys >> "$tmp"
+ sort "$tmp" | uniq > ~/.ssh/authorized_keys
+
+
+Disk resize
+~~~~~~~~~~~
+In virtualised environments, the disk sizes might grow. The operating
+system should detect disks that are bigger than the existing partition
+table and resize accordingly. This task is os specific.
+
+ucloud does not support shrinking disks due to the complexity and
+intra OS dependencies.
diff --git a/archive/uncloud_etcd_based/scripts/uncloud b/archive/uncloud_etcd_based/scripts/uncloud
new file mode 100755
index 0000000..9517b01
--- /dev/null
+++ b/archive/uncloud_etcd_based/scripts/uncloud
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+import logging
+import sys
+import importlib
+import argparse
+import os
+
+from etcd3.exceptions import ConnectionFailedError
+
+from uncloud.common import settings
+from uncloud import UncloudException
+from uncloud.common.cli import resolve_otp_credentials
+
+# Components that use etcd
+ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
+ 'imagescanner', 'metadata', 'configure', 'hack']
+
+ALL_COMPONENTS = ETCD_COMPONENTS.copy()
+ALL_COMPONENTS.append('oneshot')
+#ALL_COMPONENTS.append('cli')
+
+
+if __name__ == '__main__':
+ arg_parser = argparse.ArgumentParser()
+ subparsers = arg_parser.add_subparsers(dest='command')
+
+ parent_parser = argparse.ArgumentParser(add_help=False)
+ parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
+ help='More verbose logging')
+ parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
+ default=os.path.expanduser('~/uncloud'))
+
+ etcd_parser = argparse.ArgumentParser(add_help=False)
+ etcd_parser.add_argument('--etcd-host')
+ etcd_parser.add_argument('--etcd-port')
+ etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
+ etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
+ etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
+
+ for component in ALL_COMPONENTS:
+ mod = importlib.import_module('uncloud.{}.main'.format(component))
+ parser = getattr(mod, 'arg_parser')
+
+ if component in ETCD_COMPONENTS:
+ subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
+ else:
+ subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
+
+ arguments = vars(arg_parser.parse_args())
+ etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
+ etcd_arguments = {
+ 'etcd': {
+ key.replace('etcd_', ''): arguments[key]
+ for key in etcd_arguments
+ }
+ }
+ if not arguments['command']:
+ arg_parser.print_help()
+ else:
+ # Initializing Settings and resolving otp_credentials
+ # It is neccessary to resolve_otp_credentials after argument parsing is done because
+ # previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
+ # providing the default values for --name, --realm and --seed arguments from the values
+ # we read from file. But, now we are asking user about where the config file lives. So,
+ # to providing default value is not possible before parsing arguments. So, we are doing
+ # it after..
+# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
+# resolve_otp_credentials(arguments)
+
+ name = arguments.pop('command')
+ mod = importlib.import_module('uncloud.{}.main'.format(name))
+ main = getattr(mod, 'main')
+
+ if arguments['debug']:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ log = logging.getLogger()
+
+ try:
+ main(arguments)
+ except UncloudException as err:
+ log.error(err)
+ sys.exit(1)
+# except ConnectionFailedError as err:
+# log.error('Cannot connect to etcd: {}'.format(err))
+ except Exception as err:
+ log.exception(err)
diff --git a/archive/uncloud_etcd_based/setup.py b/archive/uncloud_etcd_based/setup.py
new file mode 100644
index 0000000..f5e0718
--- /dev/null
+++ b/archive/uncloud_etcd_based/setup.py
@@ -0,0 +1,51 @@
+import os
+
+from setuptools import setup, find_packages
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+try:
+ import uncloud.version
+
+ version = uncloud.version.VERSION
+except:
+ import subprocess
+
+ c = subprocess.check_output(["git", "describe"])
+ version = c.decode("utf-8").strip()
+
+
+setup(
+ name="uncloud",
+ version=version,
+ description="uncloud cloud management",
+ url="https://code.ungleich.ch/uncloud/uncloud",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ classifiers=[
+ "Development Status :: 3 - Alpha",
+ "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
+ "Programming Language :: Python :: 3",
+ ],
+ author="ungleich",
+ author_email="technik@ungleich.ch",
+ packages=find_packages(),
+ install_requires=[
+ "requests",
+ "Flask>=1.1.1",
+ "flask-restful",
+ "bitmath",
+ "pyotp",
+ "pynetbox",
+ "colorama",
+ "etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
+ "marshmallow",
+ "ldap3"
+ ],
+ scripts=["scripts/uncloud"],
+ data_files=[
+ (os.path.expanduser("~/uncloud/"), ["conf/uncloud.conf"])
+ ],
+ zip_safe=False,
+)
diff --git a/uncloud/uncloud_auth/__init__.py b/archive/uncloud_etcd_based/test/__init__.py
similarity index 100%
rename from uncloud/uncloud_auth/__init__.py
rename to archive/uncloud_etcd_based/test/__init__.py
diff --git a/archive/uncloud_etcd_based/test/test_mac_local.py b/archive/uncloud_etcd_based/test/test_mac_local.py
new file mode 100644
index 0000000..3a4ac3a
--- /dev/null
+++ b/archive/uncloud_etcd_based/test/test_mac_local.py
@@ -0,0 +1,37 @@
+import unittest
+from unittest.mock import Mock
+
+from uncloud.hack.mac import MAC
+from uncloud import UncloudException
+
+class TestMacLocal(unittest.TestCase):
+ def setUp(self):
+ self.config = Mock()
+ self.config.arguments = {"no_db":True}
+ self.mac = MAC(self.config)
+ self.mac.create()
+
+ def testMacInt(self):
+ self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong first MAC index")
+
+ def testMacRepr(self):
+ self.assertEqual(self.mac.__repr__(), '420000000001', "wrong first MAC index")
+
+ def testMacStr(self):
+ self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong first MAC index")
+
+ def testValidationRaise(self):
+ with self.assertRaises(UncloudException):
+ self.mac.validate_mac("2")
+
+ def testValidation(self):
+ self.assertTrue(self.mac.validate_mac("42:00:00:00:00:01"), "Validation of a given MAC not working properly")
+
+ def testNextMAC(self):
+ self.mac.create()
+ self.assertEqual(self.mac.__repr__(), '420000000001', "wrong second MAC index")
+ self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong second MAC index")
+ self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong second MAC index")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/archive/uncloud_etcd_based/uncloud/__init__.py b/archive/uncloud_etcd_based/uncloud/__init__.py
new file mode 100644
index 0000000..2920f47
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/__init__.py
@@ -0,0 +1,2 @@
+class UncloudException(Exception):
+ pass
diff --git a/archive/uncloud_etcd_based/uncloud/api/README.md b/archive/uncloud_etcd_based/uncloud/api/README.md
new file mode 100755
index 0000000..e28d676
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/README.md
@@ -0,0 +1,12 @@
+# ucloud-api
+[](https://www.repostatus.org/#wip)
+
+## Installation
+
+**Make sure you have Python >= 3.5 and Pipenv installed.**
+
+1. Clone the repository and `cd` into it.
+2. Run the following commands
+ - `pipenv install`
+ - `pipenv shell`
+ - `python main.py`
diff --git a/archive/uncloud_etcd_based/uncloud/api/__init__.py b/archive/uncloud_etcd_based/uncloud/api/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/api/common_fields.py b/archive/uncloud_etcd_based/uncloud/api/common_fields.py
new file mode 100755
index 0000000..ba9fb37
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/common_fields.py
@@ -0,0 +1,59 @@
+import os
+
+from uncloud.common.shared import shared
+
+
+class Optional:
+ pass
+
+
+class Field:
+ def __init__(self, _name, _type, _value=None):
+ self.name = _name
+ self.value = _value
+ self.type = _type
+ self.__errors = []
+
+ def validation(self):
+ return True
+
+ def is_valid(self):
+ if self.value == KeyError:
+ self.add_error(
+ "'{}' field is a required field".format(self.name)
+ )
+ else:
+ if isinstance(self.value, Optional):
+ pass
+ elif not isinstance(self.value, self.type):
+ self.add_error(
+ "Incorrect Type for '{}' field".format(self.name)
+ )
+ else:
+ self.validation()
+
+ if self.__errors:
+ return False
+ return True
+
+ def get_errors(self):
+ return self.__errors
+
+ def add_error(self, error):
+ self.__errors.append(error)
+
+
+class VmUUIDField(Field):
+ def __init__(self, data):
+ self.uuid = data.get("uuid", KeyError)
+
+ super().__init__("uuid", str, self.uuid)
+
+ self.validation = self.vm_uuid_validation
+
+ def vm_uuid_validation(self):
+ r = shared.etcd_client.get(
+ os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
+ )
+ if not r:
+ self.add_error("VM with uuid {} does not exists".format(self.uuid))
diff --git a/archive/uncloud_etcd_based/uncloud/api/create_image_store.py b/archive/uncloud_etcd_based/uncloud/api/create_image_store.py
new file mode 100755
index 0000000..90e0f92
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/create_image_store.py
@@ -0,0 +1,19 @@
+import json
+import os
+
+from uuid import uuid4
+
+from uncloud.common.shared import shared
+
+data = {
+ 'is_public': True,
+ 'type': 'ceph',
+ 'name': 'images',
+ 'description': 'first ever public image-store',
+ 'attributes': {'list': [], 'key': [], 'pool': 'images'},
+}
+
+shared.etcd_client.put(
+ os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
+ json.dumps(data),
+)
diff --git a/archive/uncloud_etcd_based/uncloud/api/helper.py b/archive/uncloud_etcd_based/uncloud/api/helper.py
new file mode 100755
index 0000000..8ceb3a6
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/helper.py
@@ -0,0 +1,148 @@
+import binascii
+import ipaddress
+import random
+import logging
+import requests
+
+from pyotp import TOTP
+
+from uncloud.common.shared import shared
+
+logger = logging.getLogger(__name__)
+
+
+def check_otp(name, realm, token):
+ try:
+ data = {
+ "auth_name": shared.settings["otp"]["auth_name"],
+ "auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
+ "auth_realm": shared.settings["otp"]["auth_realm"],
+ "name": name,
+ "realm": realm,
+ "token": token,
+ }
+ except binascii.Error as err:
+ logger.error(
+ "Cannot compute OTP for seed: {}".format(
+ shared.settings["otp"]["auth_seed"]
+ )
+ )
+ return 400
+
+ response = requests.post(
+ shared.settings["otp"]["verification_controller_url"], json=data
+ )
+ return response.status_code
+
+
+def resolve_vm_name(name, owner):
+ """Return UUID of Virtual Machine of name == name and owner == owner
+
+ Input: name of vm, owner of vm.
+ Output: uuid of vm if found otherwise None
+ """
+ result = next(
+ filter(
+ lambda vm: vm.value["owner"] == owner
+ and vm.value["name"] == name,
+ shared.vm_pool.vms,
+ ),
+ None,
+ )
+ if result:
+ return result.key.split("/")[-1]
+
+ return None
+
+
+def resolve_image_name(name, etcd_client):
+ """Return image uuid given its name and its store
+
+ * If the provided name is not in correct format
+ i.e {store_name}:{image_name} return ValueError
+ * If no such image found then return KeyError
+
+ """
+
+ seperator = ":"
+
+ # Ensure, user/program passed valid name that is of type string
+ try:
+ store_name_and_image_name = name.split(seperator)
+
+ """
+ Examples, where it would work and where it would raise exception
+ "images:alpine" --> ["images", "alpine"]
+
+ "images" --> ["images"] it would raise Exception as non enough value to unpack
+
+ "images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
+ as too many values to unpack
+ """
+ store_name, image_name = store_name_and_image_name
+ except Exception:
+ raise ValueError(
+ "Image name not in correct format i.e {store_name}:{image_name}"
+ )
+
+ images = etcd_client.get_prefix(
+ shared.settings["etcd"]["image_prefix"], value_in_json=True
+ )
+
+ # Try to find image with name == image_name and store_name == store_name
+ try:
+ image = next(
+ filter(
+ lambda im: im.value["name"] == image_name
+ and im.value["store_name"] == store_name,
+ images,
+ )
+ )
+ except StopIteration:
+ raise KeyError("No image with name {} found.".format(name))
+ else:
+ image_uuid = image.key.split("/")[-1]
+
+ return image_uuid
+
+
+def random_bytes(num=6):
+ return [random.randrange(256) for _ in range(num)]
+
+
+def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
+ mac = random_bytes()
+ if oui:
+ if type(oui) == str:
+ oui = [int(chunk) for chunk in oui.split(separator)]
+ mac = oui + random_bytes(num=6 - len(oui))
+ else:
+ if multicast:
+ mac[0] |= 1 # set bit 0
+ else:
+ mac[0] &= ~1 # clear bit 0
+ if uaa:
+ mac[0] &= ~(1 << 1) # clear bit 1
+ else:
+ mac[0] |= 1 << 1 # set bit 1
+ return separator.join(byte_fmt % b for b in mac)
+
+
+def mac2ipv6(mac, prefix):
+ # only accept MACs separated by a colon
+ parts = mac.split(":")
+
+ # modify parts to match IPv6 value
+ parts.insert(3, "ff")
+ parts.insert(4, "fe")
+ parts[0] = "%x" % (int(parts[0], 16) ^ 2)
+
+ # format output
+ ipv6_parts = [str(0)] * 4
+ for i in range(0, len(parts), 2):
+ ipv6_parts.append("".join(parts[i : i + 2]))
+
+ lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
+ prefix = ipaddress.IPv6Address(prefix)
+ return str(prefix + int(lower_part))
+
diff --git a/archive/uncloud_etcd_based/uncloud/api/main.py b/archive/uncloud_etcd_based/uncloud/api/main.py
new file mode 100644
index 0000000..73e8e21
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/main.py
@@ -0,0 +1,600 @@
+import json
+import pynetbox
+import logging
+import argparse
+
+from uuid import uuid4
+from os.path import join as join_path
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+from werkzeug.exceptions import HTTPException
+
+from uncloud.common.shared import shared
+
+from uncloud.common import counters
+from uncloud.common.vm import VMStatus
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.api import schemas
+from uncloud.api.helper import generate_mac, mac2ipv6
+from uncloud import UncloudException
+
+logger = logging.getLogger(__name__)
+
+app = Flask(__name__)
+api = Api(app)
+app.logger.handlers.clear()
+
+arg_parser = argparse.ArgumentParser('api', add_help=False)
+arg_parser.add_argument('--port', '-p')
+
+
+@app.errorhandler(Exception)
+def handle_exception(e):
+ app.logger.error(e)
+ # pass through HTTP errors
+ if isinstance(e, HTTPException):
+ return e
+
+ # now you're handling non-HTTP exceptions only
+ return {'message': 'Server Error'}, 500
+
+
+class CreateVM(Resource):
+ """API Request to Handle Creation of VM"""
+
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateVMSchema(data)
+ if validator.is_valid():
+ vm_uuid = uuid4().hex
+ vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
+ specs = {
+ 'cpu': validator.specs['cpu'],
+ 'ram': validator.specs['ram'],
+ 'os-ssd': validator.specs['os-ssd'],
+ 'hdd': validator.specs['hdd'],
+ }
+ macs = [generate_mac() for _ in range(len(data['network']))]
+ tap_ids = [
+ counters.increment_etcd_counter(
+ shared.etcd_client, shared.settings['etcd']['tap_counter']
+ )
+ for _ in range(len(data['network']))
+ ]
+ vm_entry = {
+ 'name': data['vm_name'],
+ 'owner': data['name'],
+ 'owner_realm': data['realm'],
+ 'specs': specs,
+ 'hostname': '',
+ 'status': VMStatus.stopped,
+ 'image_uuid': validator.image_uuid,
+ 'log': [],
+ 'vnc_socket': '',
+ 'network': list(zip(data['network'], macs, tap_ids)),
+ 'metadata': {'ssh-keys': []},
+ 'in_migration': False,
+ }
+ shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
+
+ # Create ScheduleVM Request
+ r = RequestEntry.from_scratch(
+ type=RequestType.ScheduleVM,
+ uuid=vm_uuid,
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+ shared.request_pool.put(r)
+
+ return {'message': 'VM Creation Queued'}, 200
+ return validator.get_errors(), 400
+
+
+class VmStatus(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VMStatusSchema(data)
+ if validator.is_valid():
+ vm = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
+ )
+ vm_value = vm.value.copy()
+ vm_value['ip'] = []
+ for network_mac_and_tap in vm.network:
+ network_name, mac, tap = network_mac_and_tap
+ network = shared.etcd_client.get(
+ join_path(
+ shared.settings['etcd']['network_prefix'],
+ data['name'],
+ network_name,
+ ),
+ value_in_json=True,
+ )
+ ipv6_addr = (
+ network.value.get('ipv6').split('::')[0] + '::'
+ )
+ vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
+ vm.value = vm_value
+ return vm.value
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateImage(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateImageSchema(data)
+ if validator.is_valid():
+ file_entry = shared.etcd_client.get(
+ join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
+ )
+ file_entry_value = json.loads(file_entry.value)
+
+ image_entry_json = {
+ 'status': 'TO_BE_CREATED',
+ 'owner': file_entry_value['owner'],
+ 'filename': file_entry_value['filename'],
+ 'name': data['name'],
+ 'store_name': data['image_store'],
+ 'visibility': 'public',
+ }
+ shared.etcd_client.put(
+ join_path(
+ shared.settings['etcd']['image_prefix'], data['uuid']
+ ),
+ json.dumps(image_entry_json),
+ )
+
+ return {'message': 'Image queued for creation.'}
+ return validator.get_errors(), 400
+
+
+class ListPublicImages(Resource):
+ @staticmethod
+ def get():
+ images = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['image_prefix'], value_in_json=True
+ )
+ r = {'images': []}
+ for image in images:
+ image_key = '{}:{}'.format(
+ image.value['store_name'], image.value['name']
+ )
+ r['images'].append(
+ {'name': image_key, 'status': image.value['status']}
+ )
+ return r, 200
+
+
+class VMAction(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmActionSchema(data)
+
+ if validator.is_valid():
+ vm_entry = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
+ )
+ action = data['action']
+
+ if action == 'start':
+ action = 'schedule'
+
+ if action == 'delete' and vm_entry.hostname == '':
+ if shared.storage_handler.is_vm_image_exists(
+ vm_entry.uuid
+ ):
+ r_status = shared.storage_handler.delete_vm_image(
+ vm_entry.uuid
+ )
+ if r_status:
+ shared.etcd_client.client.delete(vm_entry.key)
+ return {'message': 'VM successfully deleted'}
+ else:
+ logger.error(
+ 'Some Error Occurred while deleting VM'
+ )
+ return {'message': 'VM deletion unsuccessfull'}
+ else:
+ shared.etcd_client.client.delete(vm_entry.key)
+ return {'message': 'VM successfully deleted'}
+
+ r = RequestEntry.from_scratch(
+ type='{}VM'.format(action.title()),
+ uuid=data['uuid'],
+ hostname=vm_entry.hostname,
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+ shared.request_pool.put(r)
+ return (
+ {'message': 'VM {} Queued'.format(action.title())},
+ 200,
+ )
+ else:
+ return validator.get_errors(), 400
+
+
+class VMMigration(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.VmMigrationSchema(data)
+
+ if validator.is_valid():
+ vm = shared.vm_pool.get(data['uuid'])
+ r = RequestEntry.from_scratch(
+ type=RequestType.InitVMMigration,
+ uuid=vm.uuid,
+ hostname=join_path(
+ shared.settings['etcd']['host_prefix'],
+ validator.destination.value,
+ ),
+ request_prefix=shared.settings['etcd']['request_prefix'],
+ )
+
+ shared.request_pool.put(r)
+ return (
+ {'message': 'VM Migration Initialization Queued'},
+ 200,
+ )
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserVM(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ vms = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['vm_prefix'], value_in_json=True
+ )
+ return_vms = []
+ user_vms = filter(
+ lambda v: v.value['owner'] == data['name'], vms
+ )
+ for vm in user_vms:
+ return_vms.append(
+ {
+ 'name': vm.value['name'],
+ 'vm_uuid': vm.key.split('/')[-1],
+ 'specs': vm.value['specs'],
+ 'status': vm.value['status'],
+ 'hostname': vm.value['hostname'],
+ 'vnc_socket': vm.value.get('vnc_socket', None),
+ }
+ )
+ if return_vms:
+ return {'message': return_vms}, 200
+ return {'message': 'No VM found'}, 404
+
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserFiles(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ files = shared.etcd_client.get_prefix(
+ shared.settings['etcd']['file_prefix'], value_in_json=True
+ )
+ return_files = []
+ user_files = [f for f in files if f.value['owner'] == data['name']]
+ for file in user_files:
+ file_uuid = file.key.split('/')[-1]
+ file = file.value
+ file['uuid'] = file_uuid
+
+ file.pop('sha512sum', None)
+ file.pop('owner', None)
+
+ return_files.append(file)
+ return {'message': return_files}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateHost(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateHostSchema(data)
+ if validator.is_valid():
+ host_key = join_path(
+ shared.settings['etcd']['host_prefix'], uuid4().hex
+ )
+ host_entry = {
+ 'specs': data['specs'],
+ 'hostname': data['hostname'],
+ 'status': 'DEAD',
+ 'last_heartbeat': '',
+ }
+ shared.etcd_client.put(
+ host_key, host_entry, value_in_json=True
+ )
+
+ return {'message': 'Host Created'}, 200
+
+ return validator.get_errors(), 400
+
+
+class ListHost(Resource):
+ @staticmethod
+ def get():
+ hosts = shared.host_pool.hosts
+ r = {
+ host.key: {
+ 'status': host.status,
+ 'specs': host.specs,
+ 'hostname': host.hostname,
+ }
+ for host in hosts
+ }
+ return r, 200
+
+
+class GetSSHKeys(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.GetSSHSchema(data)
+ if validator.is_valid():
+ if not validator.key_name.value:
+
+ # {user_prefix}/{realm}/{name}/key/
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ )
+ etcd_entry = shared.etcd_client.get_prefix(
+ etcd_key, value_in_json=True
+ )
+
+ keys = {
+ key.key.split('/')[-1]: key.value
+ for key in etcd_entry
+ }
+ return {'keys': keys}
+ else:
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+
+ if etcd_entry:
+ return {
+ 'keys': {
+ etcd_entry.key.split('/')[
+ -1
+ ]: etcd_entry.value
+ }
+ }
+ else:
+ return {'keys': {}}
+ else:
+ return validator.get_errors(), 400
+
+
+class AddSSHKey(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.AddSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+ if etcd_entry:
+ return {
+ 'message': 'Key with name "{}" already exists'.format(
+ data['key_name']
+ )
+ }
+ else:
+ # Key Not Found. It implies user' haven't added any key yet.
+ shared.etcd_client.put(
+ etcd_key, data['key'], value_in_json=True
+ )
+ return {'message': 'Key added successfully'}
+ else:
+ return validator.get_errors(), 400
+
+
+class RemoveSSHKey(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.RemoveSSHSchema(data)
+ if validator.is_valid():
+
+ # {user_prefix}/{realm}/{name}/key/{key_name}
+ etcd_key = join_path(
+ shared.settings['etcd']['user_prefix'],
+ data['realm'],
+ data['name'],
+ 'key',
+ data['key_name'],
+ )
+ etcd_entry = shared.etcd_client.get(
+ etcd_key, value_in_json=True
+ )
+ if etcd_entry:
+ shared.etcd_client.client.delete(etcd_key)
+ return {'message': 'Key successfully removed.'}
+ else:
+ return {
+ 'message': 'No Key with name "{}" Exists at all.'.format(
+ data['key_name']
+ )
+ }
+ else:
+ return validator.get_errors(), 400
+
+
+class CreateNetwork(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.CreateNetwork(data)
+
+ if validator.is_valid():
+
+ network_entry = {
+ 'id': counters.increment_etcd_counter(
+ shared.etcd_client, shared.settings['etcd']['vxlan_counter']
+ ),
+ 'type': data['type'],
+ }
+ if validator.user.value:
+ try:
+ nb = pynetbox.api(
+ url=shared.settings['netbox']['url'],
+ token=shared.settings['netbox']['token'],
+ )
+ nb_prefix = nb.ipam.prefixes.get(
+ prefix=shared.settings['network']['prefix']
+ )
+ prefix = nb_prefix.available_prefixes.create(
+ data={
+ 'prefix_length': int(
+ shared.settings['network']['prefix_length']
+ ),
+ 'description': '{}\'s network "{}"'.format(
+ data['name'], data['network_name']
+ ),
+ 'is_pool': True,
+ }
+ )
+ except Exception as err:
+ app.logger.error(err)
+ return {
+ 'message': 'Error occured while creating network.'
+ }
+ else:
+ network_entry['ipv6'] = prefix['prefix']
+ else:
+ network_entry['ipv6'] = 'fd00::/64'
+
+ network_key = join_path(
+ shared.settings['etcd']['network_prefix'],
+ data['name'],
+ data['network_name'],
+ )
+ shared.etcd_client.put(
+ network_key, network_entry, value_in_json=True
+ )
+ return {'message': 'Network successfully added.'}
+ else:
+ return validator.get_errors(), 400
+
+
+class ListUserNetwork(Resource):
+ @staticmethod
+ def post():
+ data = request.json
+ validator = schemas.OTPSchema(data)
+
+ if validator.is_valid():
+ prefix = join_path(
+ shared.settings['etcd']['network_prefix'], data['name']
+ )
+ networks = shared.etcd_client.get_prefix(
+ prefix, value_in_json=True
+ )
+ user_networks = []
+ for net in networks:
+ net.value['name'] = net.key.split('/')[-1]
+ user_networks.append(net.value)
+ return {'networks': user_networks}, 200
+ else:
+ return validator.get_errors(), 400
+
+
+api.add_resource(CreateVM, '/vm/create')
+api.add_resource(VmStatus, '/vm/status')
+
+api.add_resource(VMAction, '/vm/action')
+api.add_resource(VMMigration, '/vm/migrate')
+
+api.add_resource(CreateImage, '/image/create')
+api.add_resource(ListPublicImages, '/image/list-public')
+
+api.add_resource(ListUserVM, '/user/vms')
+api.add_resource(ListUserFiles, '/user/files')
+api.add_resource(ListUserNetwork, '/user/networks')
+
+api.add_resource(AddSSHKey, '/user/add-ssh')
+api.add_resource(RemoveSSHKey, '/user/remove-ssh')
+api.add_resource(GetSSHKeys, '/user/get-ssh')
+
+api.add_resource(CreateHost, '/host/create')
+api.add_resource(ListHost, '/host/list')
+
+api.add_resource(CreateNetwork, '/network/create')
+
+
+def main(arguments):
+ debug = arguments['debug']
+ port = arguments['port']
+
+ try:
+ image_stores = list(
+ shared.etcd_client.get_prefix(
+ shared.settings['etcd']['image_store_prefix'], value_in_json=True
+ )
+ )
+ except KeyError:
+ image_stores = False
+
+ # Do not inject default values that might be very wrong
+ # fail when required, not before
+ #
+ # if not image_stores:
+ # data = {
+ # 'is_public': True,
+ # 'type': 'ceph',
+ # 'name': 'images',
+ # 'description': 'first ever public image-store',
+ # 'attributes': {'list': [], 'key': [], 'pool': 'images'},
+ # }
+
+ # shared.etcd_client.put(
+ # join_path(
+ # shared.settings['etcd']['image_store_prefix'], uuid4().hex
+ # ),
+ # json.dumps(data),
+ # )
+
+ try:
+ app.run(host='::', port=port, debug=debug)
+ except OSError as e:
+ raise UncloudException('Failed to start Flask: {}'.format(e))
diff --git a/archive/uncloud_etcd_based/uncloud/api/schemas.py b/archive/uncloud_etcd_based/uncloud/api/schemas.py
new file mode 100755
index 0000000..87f20c9
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/api/schemas.py
@@ -0,0 +1,557 @@
+"""
+This module contain classes thats validates and intercept/modify
+data coming from uncloud-cli (user)
+
+It was primarily developed as an alternative to argument parser
+of Flask_Restful which is going to be deprecated. I also tried
+marshmallow for that purpose but it was an overkill (because it
+do validation + serialization + deserialization) and little
+inflexible for our purpose.
+"""
+
+# TODO: Fix error message when user's mentioned VM (referred by name)
+# does not exists.
+#
+# Currently, it says uuid is a required field.
+
+import json
+import os
+
+import bitmath
+
+from uncloud.common.host import HostStatus
+from uncloud.common.vm import VMStatus
+from uncloud.common.shared import shared
+from . import helper, logger
+from .common_fields import Field, VmUUIDField
+from .helper import check_otp, resolve_vm_name
+
+
+class BaseSchema:
+ def __init__(self, data, fields=None):
+ _ = data # suppress linter warning
+ self.__errors = []
+ if fields is None:
+ self.fields = []
+ else:
+ self.fields = fields
+
+ def validation(self):
+ # custom validation is optional
+ return True
+
+ def is_valid(self):
+ for field in self.fields:
+ field.is_valid()
+ self.add_field_errors(field)
+
+ for parent in self.__class__.__bases__:
+ try:
+ parent.validation(self)
+ except AttributeError:
+ pass
+ if not self.__errors:
+ self.validation()
+
+ if self.__errors:
+ return False
+ return True
+
+ def get_errors(self):
+ return {"message": self.__errors}
+
+ def add_field_errors(self, field: Field):
+ self.__errors += field.get_errors()
+
+ def add_error(self, error):
+ self.__errors.append(error)
+
+
+class OTPSchema(BaseSchema):
+ def __init__(self, data: dict, fields=None):
+ self.name = Field("name", str, data.get("name", KeyError))
+ self.realm = Field("realm", str, data.get("realm", KeyError))
+ self.token = Field("token", str, data.get("token", KeyError))
+
+ _fields = [self.name, self.realm, self.token]
+ if fields:
+ _fields += fields
+ super().__init__(data=data, fields=_fields)
+
+ def validation(self):
+ if (
+ check_otp(
+ self.name.value, self.realm.value, self.token.value
+ )
+ != 200
+ ):
+ self.add_error("Wrong Credentials")
+
+
+########################## Image Operations ###############################################
+
+
+class CreateImageSchema(BaseSchema):
+ def __init__(self, data):
+ # Fields
+ self.uuid = Field("uuid", str, data.get("uuid", KeyError))
+ self.name = Field("name", str, data.get("name", KeyError))
+ self.image_store = Field(
+ "image_store", str, data.get("image_store", KeyError)
+ )
+
+ # Validations
+ self.uuid.validation = self.file_uuid_validation
+ self.image_store.validation = self.image_store_name_validation
+
+ # All Fields
+ fields = [self.uuid, self.name, self.image_store]
+ super().__init__(data, fields)
+
+ def file_uuid_validation(self):
+ file_entry = shared.etcd_client.get(
+ os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
+ )
+ )
+ if file_entry is None:
+ self.add_error(
+ "Image File with uuid '{}' Not Found".format(
+ self.uuid.value
+ )
+ )
+
+ def image_store_name_validation(self):
+ image_stores = list(
+ shared.etcd_client.get_prefix(
+ shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
+ )
+ )
+
+ image_store = next(
+ filter(
+ lambda s: json.loads(s.value)["name"]
+ == self.image_store.value,
+ image_stores,
+ ),
+ None,
+ )
+ if not image_store:
+ self.add_error(
+ "Store '{}' does not exists".format(
+ self.image_store.value
+ )
+ )
+
+
+# Host Operations
+
+
+class CreateHostSchema(OTPSchema):
+ def __init__(self, data):
+ # Fields
+ self.specs = Field("specs", dict, data.get("specs", KeyError))
+ self.hostname = Field(
+ "hostname", str, data.get("hostname", KeyError)
+ )
+
+ # Validation
+ self.specs.validation = self.specs_validation
+
+ fields = [self.hostname, self.specs]
+
+ super().__init__(data=data, fields=fields)
+
+ def specs_validation(self):
+ ALLOWED_BASE = 10
+
+ _cpu = self.specs.value.get("cpu", KeyError)
+ _ram = self.specs.value.get("ram", KeyError)
+ _os_ssd = self.specs.value.get("os-ssd", KeyError)
+ _hdd = self.specs.value.get("hdd", KeyError)
+
+ if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
+ self.add_error(
+ "You must specify CPU, RAM and OS-SSD in your specs"
+ )
+ return None
+ try:
+ parsed_ram = bitmath.parse_string_unsafe(_ram)
+ parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
+
+ if parsed_ram.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified RAM is not in correct units"
+ )
+ if parsed_os_ssd.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified OS-SSD is not in correct units"
+ )
+
+ if _cpu < 1:
+ self.add_error("CPU must be atleast 1")
+
+ if parsed_ram < bitmath.GB(1):
+ self.add_error("RAM must be atleast 1 GB")
+
+ if parsed_os_ssd < bitmath.GB(10):
+ self.add_error("OS-SSD must be atleast 10 GB")
+
+ parsed_hdd = []
+ for hdd in _hdd:
+ _parsed_hdd = bitmath.parse_string_unsafe(hdd)
+ if _parsed_hdd.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified HDD is not in correct units"
+ )
+ break
+ else:
+ parsed_hdd.append(str(_parsed_hdd))
+
+ except ValueError:
+ # TODO: Find some good error message
+ self.add_error("Specs are not correct.")
+ else:
+ if self.get_errors():
+ self.specs = {
+ "cpu": _cpu,
+ "ram": str(parsed_ram),
+ "os-ssd": str(parsed_os_ssd),
+ "hdd": parsed_hdd,
+ }
+
+ def validation(self):
+ if self.realm.value != "ungleich-admin":
+ self.add_error(
+ "Invalid Credentials/Insufficient Permission"
+ )
+
+
+# VM Operations
+
+
+class CreateVMSchema(OTPSchema):
+ def __init__(self, data):
+ # Fields
+ self.specs = Field("specs", dict, data.get("specs", KeyError))
+ self.vm_name = Field(
+ "vm_name", str, data.get("vm_name", KeyError)
+ )
+ self.image = Field("image", str, data.get("image", KeyError))
+ self.network = Field(
+ "network", list, data.get("network", KeyError)
+ )
+
+ # Validation
+ self.image.validation = self.image_validation
+ self.vm_name.validation = self.vm_name_validation
+ self.specs.validation = self.specs_validation
+ self.network.validation = self.network_validation
+
+ fields = [self.vm_name, self.image, self.specs, self.network]
+
+ super().__init__(data=data, fields=fields)
+
+ def image_validation(self):
+ try:
+ image_uuid = helper.resolve_image_name(
+ self.image.value, shared.etcd_client
+ )
+ except Exception as e:
+ logger.exception(
+ "Cannot resolve image name = %s", self.image.value
+ )
+ self.add_error(str(e))
+ else:
+ self.image_uuid = image_uuid
+
+ def vm_name_validation(self):
+ if resolve_vm_name(
+ name=self.vm_name.value, owner=self.name.value
+ ):
+ self.add_error(
+ 'VM with same name "{}" already exists'.format(
+ self.vm_name.value
+ )
+ )
+
+ def network_validation(self):
+ _network = self.network.value
+
+ if _network:
+ for net in _network:
+ network = shared.etcd_client.get(
+ os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
+ self.name.value,
+ net,
+ ),
+ value_in_json=True,
+ )
+ if not network:
+ self.add_error(
+ "Network with name {} does not exists".format(
+ net
+ )
+ )
+
+ def specs_validation(self):
+ ALLOWED_BASE = 10
+
+ _cpu = self.specs.value.get("cpu", KeyError)
+ _ram = self.specs.value.get("ram", KeyError)
+ _os_ssd = self.specs.value.get("os-ssd", KeyError)
+ _hdd = self.specs.value.get("hdd", KeyError)
+
+ if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
+ self.add_error(
+ "You must specify CPU, RAM and OS-SSD in your specs"
+ )
+ return None
+ try:
+ parsed_ram = bitmath.parse_string_unsafe(_ram)
+ parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
+
+ if parsed_ram.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified RAM is not in correct units"
+ )
+ if parsed_os_ssd.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified OS-SSD is not in correct units"
+ )
+
+ if int(_cpu) < 1:
+ self.add_error("CPU must be atleast 1")
+
+ if parsed_ram < bitmath.GB(1):
+ self.add_error("RAM must be atleast 1 GB")
+
+ if parsed_os_ssd < bitmath.GB(1):
+ self.add_error("OS-SSD must be atleast 1 GB")
+
+ parsed_hdd = []
+ for hdd in _hdd:
+ _parsed_hdd = bitmath.parse_string_unsafe(hdd)
+ if _parsed_hdd.base != ALLOWED_BASE:
+ self.add_error(
+ "Your specified HDD is not in correct units"
+ )
+ break
+ else:
+ parsed_hdd.append(str(_parsed_hdd))
+
+ except ValueError:
+ # TODO: Find some good error message
+ self.add_error("Specs are not correct.")
+ else:
+ if self.get_errors():
+ self.specs = {
+ "cpu": _cpu,
+ "ram": str(parsed_ram),
+ "os-ssd": str(parsed_os_ssd),
+ "hdd": parsed_hdd,
+ }
+
+
+class VMStatusSchema(OTPSchema):
+ def __init__(self, data):
+ data["uuid"] = (
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
+ )
+ self.uuid = VmUUIDField(data)
+
+ fields = [self.uuid]
+
+ super().__init__(data, fields)
+
+ def validation(self):
+ vm = shared.vm_pool.get(self.uuid.value)
+ if not (
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
+ ):
+ self.add_error("Invalid User")
+
+
+class VmActionSchema(OTPSchema):
+ def __init__(self, data):
+ data["uuid"] = (
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
+ )
+ self.uuid = VmUUIDField(data)
+ self.action = Field("action", str, data.get("action", KeyError))
+
+ self.action.validation = self.action_validation
+
+ _fields = [self.uuid, self.action]
+
+ super().__init__(data=data, fields=_fields)
+
+ def action_validation(self):
+ allowed_actions = ["start", "stop", "delete"]
+ if self.action.value not in allowed_actions:
+ self.add_error(
+ "Invalid Action. Allowed Actions are {}".format(
+ allowed_actions
+ )
+ )
+
+ def validation(self):
+ vm = shared.vm_pool.get(self.uuid.value)
+ if not (
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
+ ):
+ self.add_error("Invalid User")
+
+ if (
+ self.action.value == "start"
+ and vm.status == VMStatus.running
+ and vm.hostname != ""
+ ):
+ self.add_error("VM Already Running")
+
+ if self.action.value == "stop":
+ if vm.status == VMStatus.stopped:
+ self.add_error("VM Already Stopped")
+ elif vm.status != VMStatus.running:
+ self.add_error("Cannot stop non-running VM")
+
+
+class VmMigrationSchema(OTPSchema):
+ def __init__(self, data):
+ data["uuid"] = (
+ resolve_vm_name(
+ name=data.get("vm_name", None),
+ owner=(
+ data.get("in_support_of", None)
+ or data.get("name", None)
+ ),
+ )
+ or KeyError
+ )
+
+ self.uuid = VmUUIDField(data)
+ self.destination = Field(
+ "destination", str, data.get("destination", KeyError)
+ )
+
+ self.destination.validation = self.destination_validation
+
+ fields = [self.destination]
+ super().__init__(data=data, fields=fields)
+
+ def destination_validation(self):
+ hostname = self.destination.value
+ host = next(
+ filter(
+ lambda h: h.hostname == hostname, shared.host_pool.hosts
+ ),
+ None,
+ )
+ if not host:
+ self.add_error(
+ "No Such Host ({}) exists".format(
+ self.destination.value
+ )
+ )
+ elif host.status != HostStatus.alive:
+ self.add_error("Destination Host is dead")
+ else:
+ self.destination.value = host.key
+
+ def validation(self):
+ vm = shared.vm_pool.get(self.uuid.value)
+ if not (
+ vm.value["owner"] == self.name.value
+ or self.realm.value == "ungleich-admin"
+ ):
+ self.add_error("Invalid User")
+
+ if vm.status != VMStatus.running:
+ self.add_error("Can't migrate non-running VM")
+
+ if vm.hostname == os.path.join(
+ shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
+ ):
+ self.add_error(
+ "Destination host couldn't be same as Source Host"
+ )
+
+
+class AddSSHSchema(OTPSchema):
+ def __init__(self, data):
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", KeyError)
+ )
+ self.key = Field("key", str, data.get("key_name", KeyError))
+
+ fields = [self.key_name, self.key]
+ super().__init__(data=data, fields=fields)
+
+
+class RemoveSSHSchema(OTPSchema):
+ def __init__(self, data):
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", KeyError)
+ )
+
+ fields = [self.key_name]
+ super().__init__(data=data, fields=fields)
+
+
+class GetSSHSchema(OTPSchema):
+ def __init__(self, data):
+ self.key_name = Field(
+ "key_name", str, data.get("key_name", None)
+ )
+
+ fields = [self.key_name]
+ super().__init__(data=data, fields=fields)
+
+
+class CreateNetwork(OTPSchema):
+ def __init__(self, data):
+ self.network_name = Field("network_name", str, data.get("network_name", KeyError))
+ self.type = Field("type", str, data.get("type", KeyError))
+ self.user = Field("user", bool, bool(data.get("user", False)))
+
+ self.network_name.validation = self.network_name_validation
+ self.type.validation = self.network_type_validation
+
+ fields = [self.network_name, self.type, self.user]
+ super().__init__(data, fields=fields)
+
+ def network_name_validation(self):
+ key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
+ network = shared.etcd_client.get(key, value_in_json=True)
+ if network:
+ self.add_error(
+ "Network with name {} already exists".format(
+ self.network_name.value
+ )
+ )
+
+ def network_type_validation(self):
+ supported_network_types = ["vxlan"]
+ if self.type.value not in supported_network_types:
+ self.add_error(
+ "Unsupported Network Type. Supported network types are {}".format(
+ supported_network_types
+ )
+ )
diff --git a/uncloud/uncloud_auth/migrations/__init__.py b/archive/uncloud_etcd_based/uncloud/cli/__init__.py
similarity index 100%
rename from uncloud/uncloud_auth/migrations/__init__.py
rename to archive/uncloud_etcd_based/uncloud/cli/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/cli/helper.py b/archive/uncloud_etcd_based/uncloud/cli/helper.py
new file mode 100644
index 0000000..51a4355
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/helper.py
@@ -0,0 +1,46 @@
+import requests
+import json
+import argparse
+import binascii
+
+from pyotp import TOTP
+from os.path import join as join_path
+from uncloud.common.shared import shared
+
+
+def get_otp_parser():
+ otp_parser = argparse.ArgumentParser('otp')
+ otp_parser.add_argument('--name')
+ otp_parser.add_argument('--realm')
+ otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
+
+ return otp_parser
+
+
+def load_dump_pretty(content):
+ if isinstance(content, bytes):
+ content = content.decode('utf-8')
+ parsed = json.loads(content)
+ return json.dumps(parsed, indent=4, sort_keys=True)
+
+
+def make_request(*args, data=None, request_method=requests.post):
+ try:
+ r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
+ except requests.exceptions.RequestException:
+ print('Error occurred while connecting to API server.')
+ else:
+ try:
+ print(load_dump_pretty(r.content))
+ except Exception:
+ print('Error occurred while getting output from api server.')
+
+
+def get_token(seed):
+ if seed is not None:
+ try:
+ token = TOTP(seed).now()
+ except binascii.Error:
+ raise argparse.ArgumentTypeError('Invalid seed')
+ else:
+ return token
diff --git a/archive/uncloud_etcd_based/uncloud/cli/host.py b/archive/uncloud_etcd_based/uncloud/cli/host.py
new file mode 100644
index 0000000..e912567
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/host.py
@@ -0,0 +1,45 @@
+import requests
+
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class HostParser(BaseParser):
+ def __init__(self):
+ super().__init__('host')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--hostname', required=True)
+ p.add_argument('--cpu', required=True, type=int)
+ p.add_argument('--ram', required=True)
+ p.add_argument('--os-ssd', required=True)
+ p.add_argument('--hdd', default=list())
+
+ def list(self, **kwargs):
+ self.subparser.add_parser('list', **kwargs)
+
+
+parser = HostParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('host_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ request_method = requests.post
+ data = None
+ if subcommand == 'create':
+ kwargs['specs'] = {
+ 'cpu': kwargs.pop('cpu'),
+ 'ram': kwargs.pop('ram'),
+ 'os-ssd': kwargs.pop('os_ssd'),
+ 'hdd': kwargs.pop('hdd')
+ }
+ data = kwargs
+ elif subcommand == 'list':
+ request_method = requests.get
+
+ make_request('host', subcommand, data=data, request_method=request_method)
diff --git a/archive/uncloud_etcd_based/uncloud/cli/image.py b/archive/uncloud_etcd_based/uncloud/cli/image.py
new file mode 100644
index 0000000..2f59c32
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/image.py
@@ -0,0 +1,38 @@
+import requests
+
+from uncloud.cli.helper import make_request
+from uncloud.common.parser import BaseParser
+
+
+class ImageParser(BaseParser):
+ def __init__(self):
+ super().__init__('image')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', **kwargs)
+ p.add_argument('--name', required=True)
+ p.add_argument('--uuid', required=True)
+ p.add_argument('--image-store', required=True, dest='image_store')
+
+ def list(self, **kwargs):
+ self.subparser.add_parser('list', **kwargs)
+
+
+parser = ImageParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('image_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = None
+ request_method = requests.post
+ if subcommand == 'list':
+ subcommand = 'list-public'
+ request_method = requests.get
+ elif subcommand == 'create':
+ data = kwargs
+
+ make_request('image', subcommand, data=data, request_method=request_method)
diff --git a/archive/uncloud_etcd_based/uncloud/cli/main.py b/archive/uncloud_etcd_based/uncloud/cli/main.py
new file mode 100644
index 0000000..9a42497
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/main.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+
+import argparse
+import importlib
+
+arg_parser = argparse.ArgumentParser('cli', add_help=False)
+subparser = arg_parser.add_subparsers(dest='subcommand')
+
+for component in ['user', 'host', 'image', 'network', 'vm']:
+ module = importlib.import_module('uncloud.cli.{}'.format(component))
+ parser = getattr(module, 'arg_parser')
+ subparser.add_parser(name=parser.prog, parents=[parser])
+
+
+def main(arguments):
+ if not arguments['subcommand']:
+ arg_parser.print_help()
+ else:
+ name = arguments.pop('subcommand')
+ arguments.pop('debug')
+ mod = importlib.import_module('uncloud.cli.{}'.format(name))
+ _main = getattr(mod, 'main')
+ _main(**arguments)
diff --git a/archive/uncloud_etcd_based/uncloud/cli/network.py b/archive/uncloud_etcd_based/uncloud/cli/network.py
new file mode 100644
index 0000000..55798bf
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/network.py
@@ -0,0 +1,32 @@
+import requests
+
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class NetworkParser(BaseParser):
+ def __init__(self):
+ super().__init__('network')
+
+ def create(self, **kwargs):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--network-name', required=True)
+ p.add_argument('--network-type', required=True, dest='type')
+ p.add_argument('--user', action='store_true')
+
+
+parser = NetworkParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('network_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = None
+ request_method = requests.post
+ if subcommand == 'create':
+ data = kwargs
+
+ make_request('network', subcommand, data=data, request_method=request_method)
diff --git a/archive/uncloud_etcd_based/uncloud/cli/user.py b/archive/uncloud_etcd_based/uncloud/cli/user.py
new file mode 100755
index 0000000..3a4cc4e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/user.py
@@ -0,0 +1,41 @@
+from uncloud.cli.helper import make_request, get_otp_parser
+from uncloud.common.parser import BaseParser
+
+
+class UserParser(BaseParser):
+ def __init__(self):
+ super().__init__('user')
+
+ def files(self, **kwargs):
+ self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
+
+ def vms(self, **kwargs):
+ self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
+
+ def networks(self, **kwargs):
+ self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
+
+ def add_ssh(self, **kwargs):
+ p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', required=True)
+ p.add_argument('--key', required=True)
+
+ def get_ssh(self, **kwargs):
+ p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', default='')
+
+ def remove_ssh(self, **kwargs):
+ p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
+ p.add_argument('--key-name', required=True)
+
+
+parser = UserParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('user_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ make_request('user', subcommand, data=kwargs)
diff --git a/archive/uncloud_etcd_based/uncloud/cli/vm.py b/archive/uncloud_etcd_based/uncloud/cli/vm.py
new file mode 100644
index 0000000..396530e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/cli/vm.py
@@ -0,0 +1,62 @@
+from uncloud.common.parser import BaseParser
+from uncloud.cli.helper import make_request, get_otp_parser
+
+
+class VMParser(BaseParser):
+ def __init__(self):
+ super().__init__('vm')
+
+ def start(self, **args):
+ p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def stop(self, **args):
+ p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def status(self, **args):
+ p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def delete(self, **args):
+ p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+
+ def migrate(self, **args):
+ p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
+ p.add_argument('--vm-name', required=True)
+ p.add_argument('--destination', required=True)
+
+ def create(self, **args):
+ p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
+ p.add_argument('--cpu', required=True)
+ p.add_argument('--ram', required=True)
+ p.add_argument('--os-ssd', required=True)
+ p.add_argument('--hdd', action='append', default=list())
+ p.add_argument('--image', required=True)
+ p.add_argument('--network', action='append', default=[])
+ p.add_argument('--vm-name', required=True)
+
+
+parser = VMParser()
+arg_parser = parser.arg_parser
+
+
+def main(**kwargs):
+ subcommand = kwargs.pop('vm_subcommand')
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ data = kwargs
+ endpoint = subcommand
+ if subcommand in ['start', 'stop', 'delete']:
+ endpoint = 'action'
+ data['action'] = subcommand
+ elif subcommand == 'create':
+ kwargs['specs'] = {
+ 'cpu': kwargs.pop('cpu'),
+ 'ram': kwargs.pop('ram'),
+ 'os-ssd': kwargs.pop('os_ssd'),
+ 'hdd': kwargs.pop('hdd')
+ }
+ make_request('vm', endpoint, data=data)
diff --git a/uncloud/uncloud_net/__init__.py b/archive/uncloud_etcd_based/uncloud/client/__init__.py
similarity index 100%
rename from uncloud/uncloud_net/__init__.py
rename to archive/uncloud_etcd_based/uncloud/client/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/client/main.py b/archive/uncloud_etcd_based/uncloud/client/main.py
new file mode 100644
index 0000000..062308c
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/client/main.py
@@ -0,0 +1,23 @@
+import argparse
+import etcd3
+from uncloud.common.etcd_wrapper import Etcd3Wrapper
+
+arg_parser = argparse.ArgumentParser('client', add_help=False)
+arg_parser.add_argument('--dump-etcd-contents-prefix', help="Dump contents below the given prefix")
+
+def dump_etcd_contents(prefix):
+ etcd = Etcd3Wrapper()
+ for k,v in etcd.get_prefix_raw(prefix):
+ k = k.decode('utf-8')
+ v = v.decode('utf-8')
+ print("{} = {}".format(k,v))
+# print("{} = {}".format(k,v))
+
+# for k,v in etcd.get_prefix(prefix):
+#
+ print("done")
+
+
+def main(arguments):
+ if 'dump_etcd_contents_prefix' in arguments:
+ dump_etcd_contents(prefix=arguments['dump_etcd_contents_prefix'])
diff --git a/archive/uncloud_etcd_based/uncloud/common/__init__.py b/archive/uncloud_etcd_based/uncloud/common/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/common/classes.py b/archive/uncloud_etcd_based/uncloud/common/classes.py
new file mode 100644
index 0000000..29dffd4
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/classes.py
@@ -0,0 +1,26 @@
+from .etcd_wrapper import EtcdEntry
+
+
+class SpecificEtcdEntryBase:
+ def __init__(self, e: EtcdEntry):
+ self.key = e.key
+
+ for k in e.value.keys():
+ self.__setattr__(k, e.value[k])
+
+ def original_keys(self):
+ r = dict(self.__dict__)
+ if "key" in r:
+ del r["key"]
+ return r
+
+ @property
+ def value(self):
+ return self.original_keys()
+
+ @value.setter
+ def value(self, v):
+ self.__dict__ = v
+
+ def __repr__(self):
+ return str(dict(self.__dict__))
diff --git a/archive/uncloud_etcd_based/uncloud/common/cli.py b/archive/uncloud_etcd_based/uncloud/common/cli.py
new file mode 100644
index 0000000..3d3c248
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/cli.py
@@ -0,0 +1,26 @@
+from uncloud.common.shared import shared
+from pyotp import TOTP
+
+
+def get_token(seed):
+ if seed is not None:
+ try:
+ token = TOTP(seed).now()
+ except Exception:
+ raise Exception('Invalid seed')
+ else:
+ return token
+
+
+def resolve_otp_credentials(kwargs):
+ d = {
+ 'name': shared.settings['client']['name'],
+ 'realm': shared.settings['client']['realm'],
+ 'token': get_token(shared.settings['client']['seed'])
+ }
+
+ for k, v in d.items():
+ if k in kwargs and kwargs[k] is None:
+ kwargs.update({k: v})
+
+ return d
diff --git a/archive/uncloud_etcd_based/uncloud/common/counters.py b/archive/uncloud_etcd_based/uncloud/common/counters.py
new file mode 100644
index 0000000..2d4a8e9
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/counters.py
@@ -0,0 +1,21 @@
+from .etcd_wrapper import Etcd3Wrapper
+
+
+def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
+ kv = etcd_client.get(key)
+
+ if kv:
+ counter = int(kv.value)
+ counter = counter + 1
+ else:
+ counter = 1
+
+ etcd_client.put(key, str(counter))
+ return counter
+
+
+def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
+ kv = etcd_client.get(key)
+ if kv:
+ return int(kv.value)
+ return None
diff --git a/archive/uncloud_etcd_based/uncloud/common/etcd_wrapper.py b/archive/uncloud_etcd_based/uncloud/common/etcd_wrapper.py
new file mode 100644
index 0000000..38471ab
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/etcd_wrapper.py
@@ -0,0 +1,75 @@
+import etcd3
+import json
+
+from functools import wraps
+
+from uncloud import UncloudException
+from uncloud.common import logger
+
+
+class EtcdEntry:
+ def __init__(self, meta_or_key, value, value_in_json=False):
+ if hasattr(meta_or_key, 'key'):
+ # if meta has attr 'key' then get it
+ self.key = meta_or_key.key.decode('utf-8')
+ else:
+ # otherwise meta is the 'key'
+ self.key = meta_or_key
+ self.value = value.decode('utf-8')
+
+ if value_in_json:
+ self.value = json.loads(self.value)
+
+
+def readable_errors(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except etcd3.exceptions.ConnectionFailedError:
+ raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
+ except etcd3.exceptions.ConnectionTimeoutError as err:
+ raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
+ except Exception:
+ logger.exception('Some etcd error occured. See syslog for details.')
+
+ return wrapper
+
+
+class Etcd3Wrapper:
+ @readable_errors
+ def __init__(self, *args, **kwargs):
+ self.client = etcd3.client(*args, **kwargs)
+
+ @readable_errors
+ def get(self, *args, value_in_json=False, **kwargs):
+ _value, _key = self.client.get(*args, **kwargs)
+ if _key is None or _value is None:
+ return None
+ return EtcdEntry(_key, _value, value_in_json=value_in_json)
+
+ @readable_errors
+ def put(self, *args, value_in_json=False, **kwargs):
+ _key, _value = args
+ if value_in_json:
+ _value = json.dumps(_value)
+
+ if not isinstance(_key, str):
+ _key = _key.decode('utf-8')
+
+ return self.client.put(_key, _value, **kwargs)
+
+ @readable_errors
+ def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
+ event_iterator = self.client.get_prefix(*args, **kwargs)
+ for e in event_iterator:
+ yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
+
+ @readable_errors
+ def watch_prefix(self, key, raise_exception=True, value_in_json=False):
+ event_iterator, cancel = self.client.watch_prefix(key)
+ for e in event_iterator:
+ if hasattr(e, '_event'):
+ e = e._event
+ if e.type == e.PUT:
+ yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
diff --git a/archive/uncloud_etcd_based/uncloud/common/host.py b/archive/uncloud_etcd_based/uncloud/common/host.py
new file mode 100644
index 0000000..f7bb7d5
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/host.py
@@ -0,0 +1,69 @@
+import time
+from datetime import datetime
+from os.path import join
+from typing import List
+
+from .classes import SpecificEtcdEntryBase
+
+
+class HostStatus:
+ """Possible Statuses of uncloud host."""
+
+ alive = "ALIVE"
+ dead = "DEAD"
+
+
+class HostEntry(SpecificEtcdEntryBase):
+ """Represents Host Entry Structure and its supporting methods."""
+
+ def __init__(self, e):
+ self.specs = None # type: dict
+ self.hostname = None # type: str
+ self.status = None # type: str
+ self.last_heartbeat = None # type: str
+
+ super().__init__(e)
+
+ def update_heartbeat(self):
+ self.status = HostStatus.alive
+ self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+
+ def is_alive(self):
+ last_heartbeat = datetime.strptime(
+ self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
+ )
+ delta = datetime.utcnow() - last_heartbeat
+ if delta.total_seconds() > 60:
+ return False
+ return True
+
+ def declare_dead(self):
+ self.status = HostStatus.dead
+ self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
+
+
+class HostPool:
+ def __init__(self, etcd_client, host_prefix):
+ self.client = etcd_client
+ self.prefix = host_prefix
+
+ @property
+ def hosts(self) -> List[HostEntry]:
+ _hosts = self.client.get_prefix(self.prefix, value_in_json=True)
+ return [HostEntry(host) for host in _hosts]
+
+ def get(self, key):
+ if not key.startswith(self.prefix):
+ key = join(self.prefix, key)
+ v = self.client.get(key, value_in_json=True)
+ if v:
+ return HostEntry(v)
+ return None
+
+ def put(self, obj: HostEntry):
+ self.client.put(obj.key, obj.value, value_in_json=True)
+
+ def by_status(self, status, _hosts=None):
+ if _hosts is None:
+ _hosts = self.hosts
+ return list(filter(lambda x: x.status == status, _hosts))
diff --git a/archive/uncloud_etcd_based/uncloud/common/network.py b/archive/uncloud_etcd_based/uncloud/common/network.py
new file mode 100644
index 0000000..32f6951
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/network.py
@@ -0,0 +1,70 @@
+import subprocess as sp
+import random
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def random_bytes(num=6):
+ return [random.randrange(256) for _ in range(num)]
+
+
+def generate_mac(
+ uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
+):
+ mac = random_bytes()
+ if oui:
+ if type(oui) == str:
+ oui = [int(chunk) for chunk in oui.split(separator)]
+ mac = oui + random_bytes(num=6 - len(oui))
+ else:
+ if multicast:
+ mac[0] |= 1 # set bit 0
+ else:
+ mac[0] &= ~1 # clear bit 0
+ if uaa:
+ mac[0] &= ~(1 << 1) # clear bit 1
+ else:
+ mac[0] |= 1 << 1 # set bit 1
+ return separator.join(byte_fmt % b for b in mac)
+
+
+def create_dev(script, _id, dev, ip=None):
+ command = [
+ "sudo",
+ "-p",
+ "Enter password to create network devices for vm: ",
+ script,
+ str(_id),
+ dev,
+ ]
+ if ip:
+ command.append(ip)
+ try:
+ output = sp.check_output(command, stderr=sp.PIPE)
+ except Exception:
+ logger.exception("Creation of interface %s failed.", dev)
+ return None
+ else:
+ return output.decode("utf-8").strip()
+
+
+def delete_network_interface(iface):
+ try:
+ sp.check_output(
+ [
+ "sudo",
+ "-p",
+ "Enter password to remove {} network device: ".format(
+ iface
+ ),
+ "ip",
+ "link",
+ "del",
+ iface,
+ ],
+ stderr=sp.PIPE,
+ )
+ except Exception:
+ logger.exception("Interface %s Deletion failed", iface)
+
diff --git a/archive/uncloud_etcd_based/uncloud/common/parser.py b/archive/uncloud_etcd_based/uncloud/common/parser.py
new file mode 100644
index 0000000..576f0e7
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/parser.py
@@ -0,0 +1,13 @@
+import argparse
+
+
+class BaseParser:
+ def __init__(self, command):
+ self.arg_parser = argparse.ArgumentParser(command, add_help=False)
+ self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
+ self.common_args = {'add_help': False}
+
+ methods = [attr for attr in dir(self) if not attr.startswith('__')
+ and type(getattr(self, attr)).__name__ == 'method']
+ for method in methods:
+ getattr(self, method)(**self.common_args)
diff --git a/archive/uncloud_etcd_based/uncloud/common/request.py b/archive/uncloud_etcd_based/uncloud/common/request.py
new file mode 100644
index 0000000..cb0add5
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/request.py
@@ -0,0 +1,46 @@
+import json
+from os.path import join
+from uuid import uuid4
+
+from uncloud.common.etcd_wrapper import EtcdEntry
+from uncloud.common.classes import SpecificEtcdEntryBase
+
+
+class RequestType:
+ CreateVM = "CreateVM"
+ ScheduleVM = "ScheduleVM"
+ StartVM = "StartVM"
+ StopVM = "StopVM"
+ InitVMMigration = "InitVMMigration"
+ TransferVM = "TransferVM"
+ DeleteVM = "DeleteVM"
+
+
+class RequestEntry(SpecificEtcdEntryBase):
+ def __init__(self, e):
+ self.destination_sock_path = None
+ self.destination_host_key = None
+ self.type = None # type: str
+ self.migration = None # type: bool
+ self.destination = None # type: str
+ self.uuid = None # type: str
+ self.hostname = None # type: str
+ super().__init__(e)
+
+ @classmethod
+ def from_scratch(cls, request_prefix, **kwargs):
+ e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
+ value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
+ return cls(e)
+
+
+class RequestPool:
+ def __init__(self, etcd_client, request_prefix):
+ self.client = etcd_client
+ self.prefix = request_prefix
+
+ def put(self, obj: RequestEntry):
+ if not obj.key.startswith(self.prefix):
+ obj.key = join(self.prefix, obj.key)
+
+ self.client.put(obj.key, obj.value, value_in_json=True)
diff --git a/archive/uncloud_etcd_based/uncloud/common/schemas.py b/archive/uncloud_etcd_based/uncloud/common/schemas.py
new file mode 100644
index 0000000..04978a5
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/schemas.py
@@ -0,0 +1,41 @@
+import bitmath
+
+from marshmallow import fields, Schema
+
+
+class StorageUnit(fields.Field):
+ def _serialize(self, value, attr, obj, **kwargs):
+ return str(value)
+
+ def _deserialize(self, value, attr, data, **kwargs):
+ return bitmath.parse_string_unsafe(value)
+
+
+class SpecsSchema(Schema):
+ cpu = fields.Int()
+ ram = StorageUnit()
+ os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
+ hdd = fields.List(StorageUnit())
+
+
+class VMSchema(Schema):
+ name = fields.Str()
+ owner = fields.Str()
+ owner_realm = fields.Str()
+ specs = fields.Nested(SpecsSchema)
+ status = fields.Str()
+ log = fields.List(fields.Str())
+ vnc_socket = fields.Str()
+ image_uuid = fields.Str()
+ hostname = fields.Str()
+ metadata = fields.Dict()
+ network = fields.List(
+ fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
+ )
+ in_migration = fields.Bool()
+
+
+class NetworkSchema(Schema):
+ _id = fields.Int(data_key="id", attribute="id")
+ _type = fields.Str(data_key="type", attribute="type")
+ ipv6 = fields.Str()
diff --git a/archive/uncloud_etcd_based/uncloud/common/settings.py b/archive/uncloud_etcd_based/uncloud/common/settings.py
new file mode 100644
index 0000000..8503f42
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/settings.py
@@ -0,0 +1,136 @@
+import configparser
+import logging
+import sys
+import os
+
+from datetime import datetime
+from uncloud.common.etcd_wrapper import Etcd3Wrapper
+from os.path import join as join_path
+
+logger = logging.getLogger(__name__)
+settings = None
+
+
+class CustomConfigParser(configparser.RawConfigParser):
+ def __getitem__(self, key):
+ try:
+ result = super().__getitem__(key)
+ except KeyError as err:
+ raise KeyError(
+ 'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
+ key
+ )
+ ) from err
+ else:
+ return result
+
+
+class Settings(object):
+ def __init__(self, conf_dir, seed_value=None):
+ conf_name = 'uncloud.conf'
+ self.config_file = join_path(conf_dir, conf_name)
+
+ # this is used to cache config from etcd for 1 minutes. Without this we
+ # would make a lot of requests to etcd which slows down everything.
+ self.last_config_update = datetime.fromtimestamp(0)
+
+ self.config_parser = CustomConfigParser(allow_no_value=True)
+ self.config_parser.add_section('etcd')
+ self.config_parser.set('etcd', 'base_prefix', '/')
+
+ if os.access(self.config_file, os.R_OK):
+ self.config_parser.read(self.config_file)
+ else:
+ raise FileNotFoundError('Config file %s not found!', self.config_file)
+ self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
+
+ self.read_internal_values()
+
+ if seed_value is None:
+ seed_value = dict()
+
+ self.config_parser.read_dict(seed_value)
+
+ def get_etcd_client(self):
+ args = tuple()
+ try:
+ kwargs = {
+ 'host': self.config_parser.get('etcd', 'url'),
+ 'port': self.config_parser.get('etcd', 'port'),
+ 'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
+ 'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
+ 'cert_key': self.config_parser.get('etcd', 'cert_key'),
+ }
+ except configparser.Error as err:
+ raise configparser.Error(
+ '{} in config file {}'.format(
+ err.message, self.config_file
+ )
+ ) from err
+ else:
+ try:
+ wrapper = Etcd3Wrapper(*args, **kwargs)
+ except Exception as err:
+ logger.error(
+ 'etcd connection not successfull. Please check your config file.'
+ '\nDetails: %s\netcd connection parameters: %s',
+ err,
+ kwargs,
+ )
+ sys.exit(1)
+ else:
+ return wrapper
+
+ def read_internal_values(self):
+ base_prefix = self['etcd']['base_prefix']
+ self.config_parser.read_dict(
+ {
+ 'etcd': {
+ 'file_prefix': join_path(base_prefix, 'files/'),
+ 'host_prefix': join_path(base_prefix, 'hosts/'),
+ 'image_prefix': join_path(base_prefix, 'images/'),
+ 'image_store_prefix': join_path(base_prefix, 'imagestore/'),
+ 'network_prefix': join_path(base_prefix, 'networks/'),
+ 'request_prefix': join_path(base_prefix, 'requests/'),
+ 'user_prefix': join_path(base_prefix, 'users/'),
+ 'vm_prefix': join_path(base_prefix, 'vms/'),
+ 'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
+ 'tap_counter': join_path(base_prefix, 'counters/tap')
+ }
+ }
+ )
+
+ def read_config_file_values(self, config_file):
+ try:
+ # Trying to read configuration file
+ with open(config_file) as config_file_handle:
+ self.config_parser.read_file(config_file_handle)
+ except FileNotFoundError:
+ sys.exit('Configuration file {} not found!'.format(config_file))
+ except Exception as err:
+ logger.exception(err)
+ sys.exit('Error occurred while reading configuration file')
+
+ def read_values_from_etcd(self):
+ etcd_client = self.get_etcd_client()
+ if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
+ config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
+ if config_from_etcd:
+ self.config_parser.read_dict(config_from_etcd.value)
+ self.last_config_update = datetime.utcnow()
+ else:
+ raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
+
+ def __getitem__(self, key):
+ # Allow failing to read from etcd if we have
+ # it locally
+ if key not in self.config_parser.sections():
+ try:
+ self.read_values_from_etcd()
+ except KeyError:
+ pass
+ return self.config_parser[key]
+
+
+def get_settings():
+ return settings
diff --git a/archive/uncloud_etcd_based/uncloud/common/shared.py b/archive/uncloud_etcd_based/uncloud/common/shared.py
new file mode 100644
index 0000000..aea7cbc
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/shared.py
@@ -0,0 +1,34 @@
+from uncloud.common.settings import get_settings
+from uncloud.common.vm import VmPool
+from uncloud.common.host import HostPool
+from uncloud.common.request import RequestPool
+import uncloud.common.storage_handlers as storage_handlers
+
+
+class Shared:
+ @property
+ def settings(self):
+ return get_settings()
+
+ @property
+ def etcd_client(self):
+ return self.settings.get_etcd_client()
+
+ @property
+ def host_pool(self):
+ return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
+
+ @property
+ def vm_pool(self):
+ return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
+
+ @property
+ def request_pool(self):
+ return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
+
+ @property
+ def storage_handler(self):
+ return storage_handlers.get_storage_handler()
+
+
+shared = Shared()
diff --git a/archive/uncloud_etcd_based/uncloud/common/storage_handlers.py b/archive/uncloud_etcd_based/uncloud/common/storage_handlers.py
new file mode 100644
index 0000000..58c2dc2
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/storage_handlers.py
@@ -0,0 +1,207 @@
+import shutil
+import subprocess as sp
+import os
+import stat
+
+from abc import ABC
+from . import logger
+from os.path import join as join_path
+import uncloud.common.shared as shared
+
+
+class ImageStorageHandler(ABC):
+ handler_name = "base"
+
+ def __init__(self, image_base, vm_base):
+ self.image_base = image_base
+ self.vm_base = vm_base
+
+ def import_image(self, image_src, image_dest, protect=False):
+ """Put an image at the destination
+ :param image_src: An Image file
+ :param image_dest: A path where :param src: is to be put.
+ :param protect: If protect is true then the dest is protect (readonly etc)
+ The obj must exist on filesystem.
+ """
+
+ raise NotImplementedError()
+
+ def make_vm_image(self, image_path, path):
+ """Copy image from src to dest
+
+ :param image_path: A path
+ :param path: A path
+
+ src and destination must be on same storage system i.e both on file system or both on CEPH etc.
+ """
+ raise NotImplementedError()
+
+ def resize_vm_image(self, path, size):
+ """Resize image located at :param path:
+ :param path: The file which is to be resized
+ :param size: Size must be in Megabytes
+ """
+ raise NotImplementedError()
+
+ def delete_vm_image(self, path):
+ raise NotImplementedError()
+
+ def execute_command(self, command, report=True, error_origin=None):
+ if not error_origin:
+ error_origin = self.handler_name
+
+ command = list(map(str, command))
+ try:
+ sp.check_output(command, stderr=sp.PIPE)
+ except sp.CalledProcessError as e:
+ _stderr = e.stderr.decode("utf-8").strip()
+ if report:
+ logger.exception("%s:- %s", error_origin, _stderr)
+ return False
+ return True
+
+ def vm_path_string(self, path):
+ raise NotImplementedError()
+
+ def qemu_path_string(self, path):
+ raise NotImplementedError()
+
+ def is_vm_image_exists(self, path):
+ raise NotImplementedError()
+
+
+class FileSystemBasedImageStorageHandler(ImageStorageHandler):
+ handler_name = "Filesystem"
+
+ def import_image(self, src, dest, protect=False):
+ dest = join_path(self.image_base, dest)
+ try:
+ shutil.copy(src, dest)
+ if protect:
+ os.chmod(
+ dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+ )
+ except Exception as e:
+ logger.exception(e)
+ return False
+ return True
+
+ def make_vm_image(self, src, dest):
+ src = join_path(self.image_base, src)
+ dest = join_path(self.vm_base, dest)
+ try:
+ shutil.copyfile(src, dest)
+ except Exception as e:
+ logger.exception(e)
+ return False
+ return True
+
+ def resize_vm_image(self, path, size):
+ path = join_path(self.vm_base, path)
+ command = [
+ "qemu-img",
+ "resize",
+ "-f",
+ "raw",
+ path,
+ "{}M".format(size),
+ ]
+ if self.execute_command(command):
+ return True
+ else:
+ self.delete_vm_image(path)
+ return False
+
+ def delete_vm_image(self, path):
+ path = join_path(self.vm_base, path)
+ try:
+ os.remove(path)
+ except Exception as e:
+ logger.exception(e)
+ return False
+ return True
+
+ def vm_path_string(self, path):
+ return join_path(self.vm_base, path)
+
+ def qemu_path_string(self, path):
+ return self.vm_path_string(path)
+
+ def is_vm_image_exists(self, path):
+ path = join_path(self.vm_base, path)
+ command = ["ls", path]
+ return self.execute_command(command, report=False)
+
+
+class CEPHBasedImageStorageHandler(ImageStorageHandler):
+ handler_name = "Ceph"
+
+ def import_image(self, src, dest, protect=False):
+ dest = join_path(self.image_base, dest)
+ import_command = ["rbd", "import", src, dest]
+ commands = [import_command]
+ if protect:
+ snap_create_command = [
+ "rbd",
+ "snap",
+ "create",
+ "{}@protected".format(dest),
+ ]
+ snap_protect_command = [
+ "rbd",
+ "snap",
+ "protect",
+ "{}@protected".format(dest),
+ ]
+ commands.append(snap_create_command)
+ commands.append(snap_protect_command)
+
+ result = True
+ for command in commands:
+ result = result and self.execute_command(command)
+
+ return result
+
+ def make_vm_image(self, src, dest):
+ src = join_path(self.image_base, src)
+ dest = join_path(self.vm_base, dest)
+
+ command = ["rbd", "clone", "{}@protected".format(src), dest]
+ return self.execute_command(command)
+
+ def resize_vm_image(self, path, size):
+ path = join_path(self.vm_base, path)
+ command = ["rbd", "resize", path, "--size", size]
+ return self.execute_command(command)
+
+ def delete_vm_image(self, path):
+ path = join_path(self.vm_base, path)
+ command = ["rbd", "rm", path]
+ return self.execute_command(command)
+
+ def vm_path_string(self, path):
+ return join_path(self.vm_base, path)
+
+ def qemu_path_string(self, path):
+ return "rbd:{}".format(self.vm_path_string(path))
+
+ def is_vm_image_exists(self, path):
+ path = join_path(self.vm_base, path)
+ command = ["rbd", "info", path]
+ return self.execute_command(command, report=False)
+
+
+def get_storage_handler():
+ __storage_backend = shared.shared.settings["storage"]["storage_backend"]
+ if __storage_backend == "filesystem":
+ return FileSystemBasedImageStorageHandler(
+ vm_base=shared.shared.settings["storage"]["vm_dir"],
+ image_base=shared.shared.settings["storage"]["image_dir"],
+ )
+ elif __storage_backend == "ceph":
+ return CEPHBasedImageStorageHandler(
+ vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
+ image_base=shared.shared.settings["storage"]["ceph_image_pool"],
+ )
+ else:
+ raise Exception("Unknown Image Storage Handler")
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/common/vm.py b/archive/uncloud_etcd_based/uncloud/common/vm.py
new file mode 100644
index 0000000..d11046d
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/common/vm.py
@@ -0,0 +1,102 @@
+from contextlib import contextmanager
+from datetime import datetime
+from os.path import join
+
+from .classes import SpecificEtcdEntryBase
+
+
+class VMStatus:
+ stopped = "STOPPED" # After requested_shutdown
+ killed = "KILLED" # either host died or vm died itself
+ running = "RUNNING"
+ error = "ERROR" # An error occurred that cannot be resolved automatically
+
+
+def declare_stopped(vm):
+ vm["hostname"] = ""
+ vm["in_migration"] = False
+ vm["status"] = VMStatus.stopped
+
+
+class VMEntry(SpecificEtcdEntryBase):
+ def __init__(self, e):
+ self.owner = None # type: str
+ self.specs = None # type: dict
+ self.hostname = None # type: str
+ self.status = None # type: str
+ self.image_uuid = None # type: str
+ self.log = None # type: list
+ self.in_migration = None # type: bool
+
+ super().__init__(e)
+
+ @property
+ def uuid(self):
+ return self.key.split("/")[-1]
+
+ def declare_killed(self):
+ self.hostname = ""
+ self.in_migration = False
+ if self.status == VMStatus.running:
+ self.status = VMStatus.killed
+
+ def declare_stopped(self):
+ self.hostname = ""
+ self.in_migration = False
+ self.status = VMStatus.stopped
+
+ def add_log(self, msg):
+ self.log = self.log[:5]
+ self.log.append(
+ "{} - {}".format(datetime.now().isoformat(), msg)
+ )
+
+
+class VmPool:
+ def __init__(self, etcd_client, vm_prefix):
+ self.client = etcd_client
+ self.prefix = vm_prefix
+
+ @property
+ def vms(self):
+ _vms = self.client.get_prefix(self.prefix, value_in_json=True)
+ return [VMEntry(vm) for vm in _vms]
+
+ def by_host(self, host, _vms=None):
+ if _vms is None:
+ _vms = self.vms
+ return list(filter(lambda x: x.hostname == host, _vms))
+
+ def by_status(self, status, _vms=None):
+ if _vms is None:
+ _vms = self.vms
+ return list(filter(lambda x: x.status == status, _vms))
+
+ def by_owner(self, owner, _vms=None):
+ if _vms is None:
+ _vms = self.vms
+ return list(filter(lambda x: x.owner == owner, _vms))
+
+ def except_status(self, status, _vms=None):
+ if _vms is None:
+ _vms = self.vms
+ return list(filter(lambda x: x.status != status, _vms))
+
+ def get(self, key):
+ if not key.startswith(self.prefix):
+ key = join(self.prefix, key)
+ v = self.client.get(key, value_in_json=True)
+ if v:
+ return VMEntry(v)
+ return None
+
+ def put(self, obj: VMEntry):
+ self.client.put(obj.key, obj.value, value_in_json=True)
+
+ @contextmanager
+ def get_put(self, key) -> VMEntry:
+ # Updates object at key on exit
+ obj = self.get(key)
+ yield obj
+ if obj:
+ self.put(obj)
diff --git a/uncloud/uncloud_pay/__init__.py b/archive/uncloud_etcd_based/uncloud/configure/__init__.py
similarity index 100%
rename from uncloud/uncloud_pay/__init__.py
rename to archive/uncloud_etcd_based/uncloud/configure/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/configure/main.py b/archive/uncloud_etcd_based/uncloud/configure/main.py
new file mode 100644
index 0000000..87f5752
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/configure/main.py
@@ -0,0 +1,57 @@
+import os
+import argparse
+
+from uncloud.common.shared import shared
+
+arg_parser = argparse.ArgumentParser('configure', add_help=False)
+configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
+
+otp_parser = configure_subparsers.add_parser('otp')
+otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
+otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
+otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
+otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
+
+network_parser = configure_subparsers.add_parser('network')
+network_parser.add_argument('--prefix-length', required=True, type=int)
+network_parser.add_argument('--prefix', required=True)
+network_parser.add_argument('--vxlan-phy-dev', required=True)
+
+netbox_parser = configure_subparsers.add_parser('netbox')
+netbox_parser.add_argument('--url', required=True)
+netbox_parser.add_argument('--token', required=True)
+
+ssh_parser = configure_subparsers.add_parser('ssh')
+ssh_parser.add_argument('--username', default='root')
+ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
+
+storage_parser = configure_subparsers.add_parser('storage')
+storage_parser.add_argument('--file-dir', required=True)
+storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
+
+filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
+filesystem_storage_parser.add_argument('--vm-dir', required=True)
+filesystem_storage_parser.add_argument('--image-dir', required=True)
+
+ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
+ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
+ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
+
+
+def update_config(section, kwargs):
+ uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
+ if not uncloud_config:
+ uncloud_config = {}
+ else:
+ uncloud_config = uncloud_config.value
+
+ uncloud_config[section] = kwargs
+ shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
+
+
+def main(arguments):
+ subcommand = arguments['subcommand']
+ if not subcommand:
+ arg_parser.print_help()
+ else:
+ update_config(subcommand, arguments)
diff --git a/archive/uncloud_etcd_based/uncloud/filescanner/__init__.py b/archive/uncloud_etcd_based/uncloud/filescanner/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/filescanner/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/filescanner/main.py b/archive/uncloud_etcd_based/uncloud/filescanner/main.py
new file mode 100755
index 0000000..046f915
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/filescanner/main.py
@@ -0,0 +1,85 @@
+import glob
+import os
+import pathlib
+import subprocess as sp
+import time
+import argparse
+import bitmath
+
+from uuid import uuid4
+
+from . import logger
+from uncloud.common.shared import shared
+
+arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
+arg_parser.add_argument('--hostname', required=True)
+
+
+def sha512sum(file: str):
+ """Use sha512sum utility to compute sha512 sum of arg:file
+
+ IF arg:file does not exists:
+ raise FileNotFoundError exception
+ ELSE IF sum successfully computer:
+ return computed sha512 sum
+ ELSE:
+ return None
+ """
+ if not isinstance(file, str):
+ raise TypeError
+ try:
+ output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
+ except sp.CalledProcessError as e:
+ error = e.stderr.decode('utf-8')
+ if 'No such file or directory' in error:
+ raise FileNotFoundError from None
+ else:
+ output = output.decode('utf-8').strip()
+ output = output.split(' ')
+ return output[0]
+ return None
+
+
+def track_file(file, base_dir, host):
+ file_path = file.relative_to(base_dir)
+ file_str = str(file)
+ # Get Username
+ try:
+ owner = file_path.parts[0]
+ except IndexError:
+ pass
+ else:
+ file_path = file_path.relative_to(owner)
+ creation_date = time.ctime(os.stat(file_str).st_ctime)
+
+ entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
+ entry_value = {
+ 'filename': str(file_path),
+ 'owner': owner,
+ 'sha512sum': sha512sum(file_str),
+ 'creation_date': creation_date,
+ 'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
+ 'host': host
+ }
+
+ logger.info('Tracking %s', file_str)
+
+ shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
+
+
+def main(arguments):
+ hostname = arguments['hostname']
+ base_dir = shared.settings['storage']['file_dir']
+ # Recursively Get All Files and Folder below BASE_DIR
+ files = glob.glob('{}/**'.format(base_dir), recursive=True)
+ files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
+
+ # Files that are already tracked
+ tracked_files = [
+ pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
+ for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
+ if f.value['host'] == hostname
+ ]
+ untracked_files = set(files) - set(tracked_files)
+ for file in untracked_files:
+ track_file(file, base_dir, hostname)
diff --git a/archive/uncloud_etcd_based/uncloud/hack/README.org b/archive/uncloud_etcd_based/uncloud/hack/README.org
new file mode 100644
index 0000000..7529263
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/README.org
@@ -0,0 +1,13 @@
+This directory contains unfinishe hacks / inspirations
+* firewalling / networking in ucloud
+** automatically route a network per VM - /64?
+** nft: one chain per VM on each vm host (?)
+*** might have scaling issues?
+** firewall rules on each VM host
+ - mac filtering:
+* To add / block
+** TODO arp poisoning
+** TODO ndp "poisoning"
+** TODO ipv4 dhcp server
+*** drop dhcpv4 requests
+*** drop dhcpv4 answers
diff --git a/archive/uncloud_etcd_based/uncloud/hack/__init__.py b/archive/uncloud_etcd_based/uncloud/hack/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/__init__.py
@@ -0,0 +1 @@
+
diff --git a/archive/uncloud_etcd_based/uncloud/hack/conf.d/ucloud-host b/archive/uncloud_etcd_based/uncloud/hack/conf.d/ucloud-host
new file mode 100644
index 0000000..d1dd8d1
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/conf.d/ucloud-host
@@ -0,0 +1 @@
+HOSTNAME=server1.place10
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/config.py b/archive/uncloud_etcd_based/uncloud/hack/config.py
new file mode 100644
index 0000000..7e2655d
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/config.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+#
+#
+
+class Config(object):
+ def __init__(self, arguments):
+ """ read arguments dicts as a base """
+
+ self.arguments = arguments
+
+ # Split them so *etcd_args can be used and we can
+ # iterate over etcd_hosts
+ self.etcd_hosts = [ arguments['etcd_host'] ]
+ self.etcd_args = {
+ 'ca_cert': arguments['etcd_ca_cert'],
+ 'cert_cert': arguments['etcd_cert_cert'],
+ 'cert_key': arguments['etcd_cert_key'],
+# 'user': None,
+# 'password': None
+ }
+ self.etcd_prefix = '/nicohack/'
diff --git a/archive/uncloud_etcd_based/uncloud/hack/db.py b/archive/uncloud_etcd_based/uncloud/hack/db.py
new file mode 100644
index 0000000..3d5582e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/db.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+#
+#
+
+import etcd3
+import json
+import logging
+import datetime
+import re
+
+from functools import wraps
+from uncloud import UncloudException
+
+log = logging.getLogger(__name__)
+
+def db_logentry(message):
+ timestamp = datetime.datetime.now()
+ return {
+ "timestamp": str(timestamp),
+ "message": message
+ }
+
+
+def readable_errors(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except etcd3.exceptions.ConnectionFailedError as e:
+ raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
+ except etcd3.exceptions.ConnectionTimeoutError as e:
+ raise UncloudException('etcd connection timeout. {}'.format(e))
+
+ return wrapper
+
+
+class DB(object):
+ def __init__(self, config, prefix="/"):
+ self.config = config
+
+ # Root for everything
+ self.base_prefix= '/nicohack'
+
+ # Can be set from outside
+ self.prefix = prefix
+
+ try:
+ self.connect()
+ except FileNotFoundError as e:
+ raise UncloudException("Is the path to the etcd certs correct? {}".format(e))
+
+ @readable_errors
+ def connect(self):
+ self._db_clients = []
+ for endpoint in self.config.etcd_hosts:
+ client = etcd3.client(host=endpoint, **self.config.etcd_args)
+ self._db_clients.append(client)
+
+ def realkey(self, key):
+ return "{}{}/{}".format(self.base_prefix,
+ self.prefix,
+ key)
+
+ @readable_errors
+ def get(self, key, as_json=False, **kwargs):
+ value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
+
+ if as_json:
+ value = json.loads(value)
+
+ return value
+
+ @readable_errors
+ def get_prefix(self, key, as_json=False, **kwargs):
+ for value, meta in self._db_clients[0].get_prefix(self.realkey(key), **kwargs):
+ k = meta.key.decode("utf-8")
+ value = value.decode("utf-8")
+ if as_json:
+ value = json.loads(value)
+
+ yield (k, value)
+
+
+ @readable_errors
+ def set(self, key, value, as_json=False, **kwargs):
+ if as_json:
+ value = json.dumps(value)
+
+ log.debug("Setting {} = {}".format(self.realkey(key), value))
+ # FIXME: iterate over clients in case of failure ?
+ return self._db_clients[0].put(self.realkey(key), value, **kwargs)
+
+
+ @readable_errors
+ def list_and_filter(self, key, filter_key=None, filter_regexp=None):
+ for k,v in self.get_prefix(key, as_json=True):
+
+ if filter_key and filter_regexp:
+ if filter_key in v:
+ if re.match(filter_regexp, v[filter_key]):
+ yield v
+ else:
+ yield v
+
+
+ @readable_errors
+ def increment(self, key, **kwargs):
+ print(self.realkey(key))
+
+
+ print("prelock")
+ lock = self._db_clients[0].lock('/nicohack/foo')
+ print("prelockacq")
+ lock.acquire()
+ print("prelockrelease")
+ lock.release()
+
+ with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
+ print("in lock")
+ pass
+
+# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
+# self.set(self.realkey(key), str(value + 1), **kwargs)
+
+
+if __name__ == '__main__':
+ endpoints = [ "https://etcd1.ungleich.ch:2379",
+ "https://etcd2.ungleich.ch:2379",
+ "https://etcd3.ungleich.ch:2379" ]
+
+ db = DB(url=endpoints)
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/.gitignore b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/.gitignore
new file mode 100644
index 0000000..0ad647b
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/.gitignore
@@ -0,0 +1,3 @@
+*.iso
+radvdpid
+foo
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/__init__.py b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/__init__.py
@@ -0,0 +1 @@
+
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/etcd-client.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/etcd-client.sh
new file mode 100644
index 0000000..ab102a5
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/etcd-client.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
+ --key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
+ --cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
+ --endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifdown.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifdown.sh
new file mode 100755
index 0000000..5753099
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifdown.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+echo $@
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifup.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifup.sh
new file mode 100755
index 0000000..e0a3ca0
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/ifup.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+dev=$1; shift
+
+# bridge is setup from outside
+ip link set dev "$dev" master ${bridge}
+ip link set dev "$dev" up
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-last b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-last
new file mode 100644
index 0000000..8c5f254
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-last
@@ -0,0 +1 @@
+000000000252
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-prefix b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-prefix
new file mode 100644
index 0000000..5084a2f
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/mac-prefix
@@ -0,0 +1 @@
+02:00
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/net.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/net.sh
new file mode 100755
index 0000000..4e2bfa1
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/net.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -x
+
+netid=100
+dev=wlp2s0
+dev=wlp0s20f3
+#dev=wlan0
+
+ip=2a0a:e5c1:111:888::48/64
+vxlandev=vxlan${netid}
+bridgedev=br${netid}
+
+ip -6 link add ${vxlandev} type vxlan \
+ id ${netid} \
+ dstport 4789 \
+ group ff05::${netid} \
+ dev ${dev} \
+ ttl 5
+
+ip link set ${vxlandev} up
+
+
+ip link add ${bridgedev} type bridge
+ip link set ${bridgedev} up
+
+ip link set ${vxlandev} master ${bridgedev} up
+
+ip addr add ${ip} dev ${bridgedev}
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules
new file mode 100644
index 0000000..636c63d
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules
@@ -0,0 +1,31 @@
+flush ruleset
+
+table bridge filter {
+ chain prerouting {
+ type filter hook prerouting priority 0;
+ policy accept;
+
+ ibrname br100 jump br100
+ }
+
+ chain br100 {
+ # Allow all incoming traffic from outside
+ iifname vxlan100 accept
+
+ # Default blocks: router advertisements, dhcpv6, dhcpv4
+ icmpv6 type nd-router-advert drop
+ ip6 version 6 udp sport 547 drop
+ ip version 4 udp sport 67 drop
+
+ jump br100_vmlist
+ drop
+ }
+ chain br100_vmlist {
+ # VM1
+ iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
+
+ # VM2
+ iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
+ iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
+ }
+}
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules-v2 b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules-v2
new file mode 100644
index 0000000..b6d4cf3
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/nftrules-v2
@@ -0,0 +1,64 @@
+flush ruleset
+
+table bridge filter {
+ chain prerouting {
+ type filter hook prerouting priority 0;
+ policy accept;
+
+ ibrname br100 jump netpublic
+ }
+
+ chain netpublic {
+ iifname vxlan100 jump from_uncloud
+
+ # Default blocks: router advertisements, dhcpv6, dhcpv4
+ icmpv6 type nd-router-advert drop
+ ip6 version 6 udp sport 547 drop
+ ip version 4 udp sport 67 drop
+
+ # Individual blocks
+# iifname tap1 jump vm1
+ }
+
+ chain vm1 {
+ ether saddr != 02:00:f0:a9:c4:4e drop
+ ip6 saddr != 2a0a:e5c1:111:888:0:f0ff:fea9:c44e drop
+ }
+
+ chain from_uncloud {
+ accept
+ }
+}
+
+# table ip6 filter {
+# chain forward {
+# type filter hook forward priority 0;
+
+# # policy drop;
+
+# ct state established,related accept;
+
+# }
+
+# }
+
+# table ip filter {
+# chain input {
+# type filter hook input priority filter; policy drop;
+# iif "lo" accept
+# icmp type { echo-reply, destination-unreachable, source-quench, redirect, echo-request, router-advertisement, router-solicitation, time-exceeded, parameter-problem, timestamp-request, timestamp-reply, info-request, info-reply, address-mask-request, address-mask-reply } accept
+# ct state established,related accept
+# tcp dport { 22 } accept
+# log prefix "firewall-ipv4: "
+# udp sport 67 drop
+# }
+
+# chain forward {
+# type filter hook forward priority filter; policy drop;
+# log prefix "firewall-ipv4: "
+# }
+
+# chain output {
+# type filter hook output priority filter; policy accept;
+# }
+# }
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.conf b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.conf
new file mode 100644
index 0000000..3d8ce4d
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.conf
@@ -0,0 +1,13 @@
+interface br100
+{
+ AdvSendAdvert on;
+ MinRtrAdvInterval 3;
+ MaxRtrAdvInterval 5;
+ AdvDefaultLifetime 3600;
+
+ prefix 2a0a:e5c1:111:888::/64 {
+ };
+
+ RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
+ DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
+};
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.sh
new file mode 100644
index 0000000..9d0e7d1
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/radvd.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+radvd -C ./radvd.conf -n -p ./radvdpid
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm-2.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm-2.sh
new file mode 100755
index 0000000..af9dec7
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm-2.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+vmid=$1; shift
+
+qemu=/usr/bin/qemu-system-x86_64
+
+accel=kvm
+#accel=tcg
+
+memory=1024
+cores=2
+uuid=732e08c7-84f8-4d43-9571-263db4f80080
+
+export bridge=br100
+
+$qemu -name uc${vmid} \
+ -machine pc,accel=${accel} \
+ -m ${memory} \
+ -smp ${cores} \
+ -uuid ${uuid} \
+ -drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
+ -drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
+ -netdev tap,id=netmain,script=./ifup.sh \
+ -device virtio-net-pci,netdev=netmain,id=net0,mac=02:00:f0:a9:c4:4e
diff --git a/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm.sh b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm.sh
new file mode 100755
index 0000000..dd9be84
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/hackcloud/vm.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# if [ $# -ne 1 ]; then
+# echo "$0: owner"
+# exit 1
+# fi
+
+qemu=/usr/bin/qemu-system-x86_64
+
+accel=kvm
+#accel=tcg
+
+memory=1024
+cores=2
+uuid=$(uuidgen)
+mac=$(./mac-gen.py)
+owner=nico
+
+export bridge=br100
+
+set -x
+$qemu -name "uncloud-${uuid}" \
+ -machine pc,accel=${accel} \
+ -m ${memory} \
+ -smp ${cores} \
+ -uuid ${uuid} \
+ -drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
+ -netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
+ -device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}
diff --git a/archive/uncloud_etcd_based/uncloud/hack/host.py b/archive/uncloud_etcd_based/uncloud/hack/host.py
new file mode 100644
index 0000000..06ccf98
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/host.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+
+import uuid
+
+from uncloud.hack.db import DB
+from uncloud import UncloudException
+
+class Host(object):
+ def __init__(self, config, db_entry=None):
+ self.config = config
+ self.db = DB(self.config, prefix="/hosts")
+
+ if db_entry:
+ self.db_entry = db_entry
+
+
+ def list_hosts(self, filter_key=None, filter_regexp=None):
+ """ Return list of all hosts """
+ for entry in self.db.list_and_filter("", filter_key, filter_regexp):
+ yield self.__class__(self.config, db_entry=entry)
+
+ def cmdline_add_host(self):
+ """ FIXME: make this a bit smarter and less redundant """
+
+ for required_arg in [
+ 'add_vm_host',
+ 'max_cores_per_vm',
+ 'max_cores_total',
+ 'max_memory_in_gb' ]:
+ if not required_arg in self.config.arguments:
+ raise UncloudException("Missing argument: {}".format(required_arg))
+
+ return self.add_host(
+ self.config.arguments['add_vm_host'],
+ self.config.arguments['max_cores_per_vm'],
+ self.config.arguments['max_cores_total'],
+ self.config.arguments['max_memory_in_gb'])
+
+
+ def add_host(self,
+ hostname,
+ max_cores_per_vm,
+ max_cores_total,
+ max_memory_in_gb):
+
+ db_entry = {}
+ db_entry['uuid'] = str(uuid.uuid4())
+ db_entry['hostname'] = hostname
+ db_entry['max_cores_per_vm'] = max_cores_per_vm
+ db_entry['max_cores_total'] = max_cores_total
+ db_entry['max_memory_in_gb'] = max_memory_in_gb
+ db_entry["db_version"] = 1
+ db_entry["log"] = []
+
+ self.db.set(db_entry['uuid'], db_entry, as_json=True)
+
+ return self.__class__(self.config, db_entry)
diff --git a/archive/uncloud_etcd_based/uncloud/hack/mac.py b/archive/uncloud_etcd_based/uncloud/hack/mac.py
new file mode 100755
index 0000000..e35cd9f
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/mac.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2012 Nico Schottelius (nico-cinv at schottelius.org)
+#
+# This file is part of cinv.
+#
+# cinv is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# cinv is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with cinv. If not, see .
+#
+#
+
+import argparse
+import logging
+import os.path
+import os
+import re
+import json
+
+from uncloud import UncloudException
+from uncloud.hack.db import DB
+
+log = logging.getLogger(__name__)
+
+
+class MAC(object):
+ def __init__(self, config):
+ self.config = config
+ self.no_db = self.config.arguments['no_db']
+ if not self.no_db:
+ self.db = DB(config, prefix="/mac")
+
+ self.prefix = 0x420000000000
+ self._number = 0 # Not set by default
+
+ @staticmethod
+ def validate_mac(mac):
+ if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
+ raise UncloudException("Not a valid mac address: %s" % mac)
+ else:
+ return True
+
+ def last_used_index(self):
+ if not self.no_db:
+ value = self.db.get("last_used_index")
+ if not value:
+ self.db.set("last_used_index", "0")
+ value = self.db.get("last_used_index")
+
+ else:
+ value = "0"
+
+ return int(value)
+
+ def last_used_mac(self):
+ return self.int_to_mac(self.prefix + self.last_used_index())
+
+ def to_colon_format(self):
+ b = self._number.to_bytes(6, byteorder="big")
+ return ':'.join(format(s, '02x') for s in b)
+
+ def to_str_format(self):
+ b = self._number.to_bytes(6, byteorder="big")
+ return ''.join(format(s, '02x') for s in b)
+
+ def create(self):
+ last_number = self.last_used_index()
+
+ if last_number == int('0xffffffff', 16):
+ raise UncloudException("Exhausted all possible mac addresses - try to free some")
+
+ next_number = last_number + 1
+ self._number = self.prefix + next_number
+
+ #next_number_string = "{:012x}".format(next_number)
+ #next_mac = self.int_to_mac(next_mac_number)
+ # db_entry = {}
+ # db_entry['vm_uuid'] = vmuuid
+ # db_entry['index'] = next_number
+ # db_entry['mac_address'] = next_mac
+
+ # should be one transaction
+ # self.db.increment("last_used_index")
+ # self.db.set("used/{}".format(next_mac),
+ # db_entry, as_json=True)
+
+ def __int__(self):
+ return self._number
+
+ def __repr__(self):
+ return self.to_str_format()
+
+ def __str__(self):
+ return self.to_colon_format()
diff --git a/archive/uncloud_etcd_based/uncloud/hack/main.py b/archive/uncloud_etcd_based/uncloud/hack/main.py
new file mode 100644
index 0000000..0ddd8fb
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/main.py
@@ -0,0 +1,186 @@
+import argparse
+import logging
+import re
+
+import ldap3
+
+
+from uncloud.hack.vm import VM
+from uncloud.hack.host import Host
+from uncloud.hack.config import Config
+from uncloud.hack.mac import MAC
+from uncloud.hack.net import VXLANBridge, DNSRA
+
+from uncloud import UncloudException
+from uncloud.hack.product import ProductOrder
+
+arg_parser = argparse.ArgumentParser('hack', add_help=False)
+ #description="Commands that are unfinished - use at own risk")
+arg_parser.add_argument('--last-used-mac', action='store_true')
+arg_parser.add_argument('--get-new-mac', action='store_true')
+
+arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
+arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
+arg_parser.add_argument('--network', help="/64 IPv6 network")
+arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
+arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
+arg_parser.add_argument('--run-dns-ra', action='store_true',
+ help="Provide router advertisements and DNS resolution via dnsmasq")
+arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
+
+arg_parser.add_argument('--create-vm', action='store_true')
+arg_parser.add_argument('--destroy-vm', action='store_true')
+arg_parser.add_argument('--get-vm-status', action='store_true')
+arg_parser.add_argument('--get-vm-vnc', action='store_true')
+arg_parser.add_argument('--list-vms', action='store_true')
+arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int, default=2)
+arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int, default=1)
+arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
+
+arg_parser.add_argument('--image-format', help="Image format: qcow2 or raw", choices=['raw', 'qcow2'])
+arg_parser.add_argument('--uuid', help="VM UUID")
+
+arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
+arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
+
+# order based commands => later to be shifted below "order"
+arg_parser.add_argument('--order', action='store_true')
+arg_parser.add_argument('--list-orders', help="List all orders", action='store_true')
+arg_parser.add_argument('--filter-order-key', help="Which key to filter on")
+arg_parser.add_argument('--filter-order-regexp', help="Which regexp the value should match")
+
+arg_parser.add_argument('--process-orders', help="Process all (pending) orders", action='store_true')
+
+arg_parser.add_argument('--product', choices=["dualstack-vm"])
+arg_parser.add_argument('--os-image-name', help="Name of OS image (successor to --image)")
+arg_parser.add_argument('--os-image-size', help="Size of OS image in GB", type=int, default=10)
+
+arg_parser.add_argument('--username')
+arg_parser.add_argument('--password')
+
+arg_parser.add_argument('--api', help="Run the API")
+arg_parser.add_argument('--mode',
+ choices=["direct", "api", "client"],
+ default="client",
+ help="Directly manipulate etcd, spawn the API server or behave as a client")
+
+
+arg_parser.add_argument('--add-vm-host', help="Add a host that can run VMs")
+arg_parser.add_argument('--list-vm-hosts', action='store_true')
+
+arg_parser.add_argument('--max-cores-per-vm')
+arg_parser.add_argument('--max-cores-total')
+arg_parser.add_argument('--max-memory-in-gb')
+
+
+log = logging.getLogger(__name__)
+
+def authenticate(username, password, totp_token=None):
+ server = ldap3.Server("ldaps://ldap1.ungleich.ch")
+ dn = "uid={},ou=customer,dc=ungleich,dc=ch".format(username)
+
+ log.debug("LDAP: connecting to {} as {}".format(server, dn))
+
+ try:
+ conn = ldap3.Connection(server, dn, password, auto_bind=True)
+ except ldap3.core.exceptions.LDAPBindError as e:
+ raise UncloudException("Credentials not verified by LDAP server: {}".format(e))
+
+
+
+def order(config):
+ for required_arg in [ 'product', 'username', 'password' ]:
+ if not config.arguments[required_arg]:
+ raise UncloudException("Missing required argument: {}".format(required_arg))
+
+ if config.arguments['product'] == 'dualstack-vm':
+ for required_arg in [ 'cores', 'memory', 'os_image_name', 'os_image_size' ]:
+ if not config.arguments[required_arg]:
+ raise UncloudException("Missing required argument: {}".format(required_arg))
+
+ log.debug(config.arguments)
+ authenticate(config.arguments['username'], config.arguments['password'])
+
+ # create DB entry for VM
+ vm = VM(config)
+ return vm.product.place_order(owner=config.arguments['username'])
+
+
+
+
+
+def main(arguments):
+ config = Config(arguments)
+
+ if arguments['add_vm_host']:
+ h = Host(config)
+ h.cmdline_add_host()
+
+ if arguments['list_vm_hosts']:
+ h = Host(config)
+
+ for host in h.list_hosts(filter_key=arguments['filter_order_key'],
+ filter_regexp=arguments['filter_order_regexp']):
+ print("Host {}: {}".format(host.db_entry['uuid'], host.db_entry))
+
+ if arguments['order']:
+ print("Created order: {}".format(order(config)))
+
+ if arguments['list_orders']:
+ p = ProductOrder(config)
+ for product_order in p.list_orders(filter_key=arguments['filter_order_key'],
+ filter_regexp=arguments['filter_order_regexp']):
+ print("Order {}: {}".format(product_order.db_entry['uuid'], product_order.db_entry))
+
+ if arguments['process_orders']:
+ p = ProductOrder(config)
+ p.process_orders()
+
+ if arguments['create_vm']:
+ vm = VM(config)
+ vm.create()
+
+ if arguments['destroy_vm']:
+ vm = VM(config)
+ vm.stop()
+
+ if arguments['get_vm_status']:
+ vm = VM(config)
+ vm.status()
+
+ if arguments['get_vm_vnc']:
+ vm = VM(config)
+ vm.vnc_addr()
+
+ if arguments['list_vms']:
+ vm = VM(config)
+ vm.list()
+
+ if arguments['last_used_mac']:
+ m = MAC(config)
+ print(m.last_used_mac())
+
+ if arguments['get_new_mac']:
+ print(MAC(config).get_next())
+
+ #if arguments['init_network']:
+ if arguments['create_vxlan']:
+ if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
+ raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
+ vb = VXLANBridge(vni=arguments['vni'],
+ route=arguments['network'],
+ uplinkdev=arguments['vxlan_uplink_device'],
+ use_sudo=arguments['use_sudo'])
+ vb._setup_vxlan()
+ vb._setup_bridge()
+ vb._add_vxlan_to_bridge()
+ vb._route_network()
+
+ if arguments['run_dns_ra']:
+ if not arguments['network'] or not arguments['vni']:
+ raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
+
+ dnsra = DNSRA(route=arguments['network'],
+ vni=arguments['vni'],
+ use_sudo=arguments['use_sudo'])
+ dnsra._setup_dnsmasq()
diff --git a/archive/uncloud_etcd_based/uncloud/hack/net.py b/archive/uncloud_etcd_based/uncloud/hack/net.py
new file mode 100644
index 0000000..4887e04
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/net.py
@@ -0,0 +1,116 @@
+import subprocess
+import ipaddress
+import logging
+
+
+from uncloud import UncloudException
+
+log = logging.getLogger(__name__)
+
+
+class VXLANBridge(object):
+ cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
+ cmd_up_dev = "{sudo}ip link set {dev} up"
+ cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
+ cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
+ cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
+ cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
+
+ # VXLAN ids are at maximum 24 bit - use a /104
+ multicast_network = ipaddress.IPv6Network("ff05::/104")
+ max_vni = (2**24)-1
+
+ def __init__(self,
+ vni,
+ uplinkdev,
+ route=None,
+ use_sudo=False):
+ self.config = {}
+
+ if vni > self.max_vni:
+ raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
+
+ if use_sudo:
+ self.config['sudo'] = 'sudo '
+ else:
+ self.config['sudo'] = ''
+
+ self.config['vni_dec'] = vni
+ self.config['vni_hex'] = "{:x}".format(vni)
+ self.config['multicast_address'] = self.multicast_network[vni]
+
+ self.config['route_network'] = ipaddress.IPv6Network(route)
+ self.config['route'] = route
+
+ self.config['uplinkdev'] = uplinkdev
+ self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
+ self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
+
+
+ def setup_networking(self):
+ pass
+
+ def _setup_vxlan(self):
+ self._execute_cmd(self.cmd_create_vxlan)
+ self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
+
+ def _setup_bridge(self):
+ self._execute_cmd(self.cmd_create_bridge)
+ self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
+
+ def _route_network(self):
+ self._execute_cmd(self.cmd_add_route_dev)
+
+ def _add_vxlan_to_bridge(self):
+ self._execute_cmd(self.cmd_add_to_bridge)
+
+ def _execute_cmd(self, cmd_string, **kwargs):
+ cmd = cmd_string.format(**self.config, **kwargs)
+ log.info("Executing: {}".format(cmd))
+ subprocess.run(cmd.split())
+
+class ManagementBridge(VXLANBridge):
+ pass
+
+
+class DNSRA(object):
+ # VXLAN ids are at maximum 24 bit
+ max_vni = (2**24)-1
+
+
+ # Command to start dnsmasq
+ cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra --no-daemon"
+
+ def __init__(self,
+ vni,
+ route=None,
+ use_sudo=False):
+ self.config = {}
+
+ if vni > self.max_vni:
+ raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
+
+ if use_sudo:
+ self.config['sudo'] = 'sudo '
+ else:
+ self.config['sudo'] = ''
+
+ #TODO: remove if not needed
+ #self.config['vni_dec'] = vni
+ self.config['vni_hex'] = "{:x}".format(vni)
+
+ # dnsmasq only wants the network without the prefix, therefore, cut it off
+ self.config['route'] = ipaddress.IPv6Network(route).network_address
+ self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
+
+ def _setup_dnsmasq(self):
+ self._execute_cmd(self.cmd_start_dnsmasq)
+
+ def _execute_cmd(self, cmd_string, **kwargs):
+ cmd = cmd_string.format(**self.config, **kwargs)
+ log.info("Executing: {}".format(cmd))
+ print("Executing: {}".format(cmd))
+ subprocess.run(cmd.split())
+
+class Firewall(object):
+ pass
diff --git a/archive/uncloud_etcd_based/uncloud/hack/nftables.conf b/archive/uncloud_etcd_based/uncloud/hack/nftables.conf
new file mode 100644
index 0000000..7d1742e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/nftables.conf
@@ -0,0 +1,94 @@
+flush ruleset
+
+table bridge filter {
+ chain prerouting {
+ type filter hook prerouting priority 0;
+ policy accept;
+ ibrname br100 jump netpublic
+ }
+ chain netpublic {
+ icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
+ }
+}
+
+table ip6 filter {
+ chain forward {
+ type filter hook forward priority 0;
+
+ # this would be nice...
+ policy drop;
+
+ ct state established,related accept;
+
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority 0;
+ policy accept;
+
+ # not supporting in here!
+
+
+ iifname vmXXXX jump vmXXXX
+ iifname vmYYYY jump vmYYYY
+
+ iifname brXX jump brXX
+
+ iifname vxlan100 jump vxlan100
+ iifname br100 jump br100
+ }
+
+ # 1. Rules per VM (names: vmXXXXX?
+ # 2. Rules per network (names: vxlanXXXX, what about non vxlan?)
+ # 3. Rules per bridge:
+ # vxlanXX is inside brXX
+ # This is effectively a network filter
+ # 4. Kill all malicous traffic:
+ # - router advertisements from VMs in which they should not announce RAs
+
+
+
+ chain vxlan100 {
+ icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
+ }
+ chain br100 {
+ icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
+ }
+
+ chain netpublic {
+ # drop router advertisements that don't come from us
+ iifname != vxlanpublic icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
+ # icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
+
+ }
+
+ # This vlan
+ chain brXX {
+ ip6 saddr != 2001:db8:1::/64 drop;
+ }
+
+ chain vmXXXX {
+ ether saddr != 00:0f:54:0c:11:04 drop;
+ ip6 saddr != 2001:db8:1:000f::540c:11ff:fe04 drop;
+ jump drop_from_vm_without_ipam
+ }
+
+ chain net_2a0ae5c05something {
+
+
+ }
+
+ chain drop_from_vm_without_ipam {
+
+ }
+
+ chain vmYYYY {
+ ether saddr != 00:0f:54:0c:11:05 drop;
+ jump drop_from_vm_with_ipam
+ }
+
+ # Drop stuff from every VM
+ chain drop_from_vm_with_ipam {
+ icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
+ }
+}
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/product.py b/archive/uncloud_etcd_based/uncloud/hack/product.py
new file mode 100755
index 0000000..f979268
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/product.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+
+import json
+import uuid
+import logging
+import re
+import importlib
+
+from uncloud import UncloudException
+from uncloud.hack.db import DB, db_logentry
+
+log = logging.getLogger(__name__)
+
+class ProductOrder(object):
+ def __init__(self, config, product_entry=None, db_entry=None):
+ self.config = config
+ self.db = DB(self.config, prefix="/orders")
+ self.db_entry = {}
+ self.db_entry["product"] = product_entry
+
+ # Overwrite if we are loading an existing product order
+ if db_entry:
+ self.db_entry = db_entry
+
+ # FIXME: this should return a list of our class!
+ def list_orders(self, filter_key=None, filter_regexp=None):
+ for entry in self.db.list_and_filter("", filter_key, filter_regexp):
+ yield self.__class__(self.config, db_entry=entry)
+
+
+ def set_required_values(self):
+ """Set values that are required to make the db entry valid"""
+ if not "uuid" in self.db_entry:
+ self.db_entry["uuid"] = str(uuid.uuid4())
+ if not "status" in self.db_entry:
+ self.db_entry["status"] = "NEW"
+ if not "owner" in self.db_entry:
+ self.db_entry["owner"] = "UNKNOWN"
+ if not "log" in self.db_entry:
+ self.db_entry["log"] = []
+ if not "db_version" in self.db_entry:
+ self.db_entry["db_version"] = 1
+
+ def validate_status(self):
+ if "status" in self.db_entry:
+ if self.db_entry["status"] in [ "NEW",
+ "SCHEDULED",
+ "CREATED_ACTIVE",
+ "CANCELLED",
+ "REJECTED" ]:
+ return False
+ return True
+
+ def order(self):
+ self.set_required_values()
+ if not self.db_entry["status"] == "NEW":
+ raise UncloudException("Cannot re-order same order. Status: {}".format(self.db_entry["status"]))
+ self.db.set(self.db_entry["uuid"], self.db_entry, as_json=True)
+
+ return self.db_entry["uuid"]
+
+ def process_orders(self):
+ """processing orders can be done stand alone on server side"""
+ for order in self.list_orders():
+ if order.db_entry["status"] == "NEW":
+ log.info("Handling new order: {}".format(order))
+
+ # FIXME: these all should be a transactions! -> fix concurrent access! !
+ if not "log" in order.db_entry:
+ order.db_entry['log'] = []
+
+ is_valid = True
+ # Verify the order entry
+ for must_attribute in [ "owner", "product" ]:
+ if not must_attribute in order.db_entry:
+ message = "Missing {} entry in order, rejecting order".format(must_attribute)
+ log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
+
+ order.db_entry['log'].append(db_logentry(message))
+ order.db_entry['status'] = "REJECTED"
+ self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
+
+ is_valid = False
+
+ # Rejected the order
+ if not is_valid:
+ continue
+
+ # Verify the product entry
+ for must_attribute in [ "python_product_class", "python_product_module" ]:
+ if not must_attribute in order.db_entry['product']:
+ message = "Missing {} entry in product of order, rejecting order".format(must_attribute)
+ log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
+
+ order.db_entry['log'].append(db_logentry(message))
+ order.db_entry['status'] = "REJECTED"
+ self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
+
+ is_valid = False
+
+ # Rejected the order
+ if not is_valid:
+ continue
+
+ print(order.db_entry["product"]["python_product_class"])
+
+ # Create the product
+ m = importlib.import_module(order.db_entry["product"]["python_product_module"])
+ c = getattr(m, order.db_entry["product"]["python_product_class"])
+
+ product = c(config, db_entry=order.db_entry["product"])
+
+ # STOPPED
+ product.create_product()
+
+ order.db_entry['status'] = "SCHEDULED"
+ self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
+
+
+
+ def __str__(self):
+ return str(self.db_entry)
+
+class Product(object):
+ def __init__(self,
+ config,
+ product_name,
+ product_class,
+ db_entry=None):
+ self.config = config
+ self.db = DB(self.config, prefix="/orders")
+
+ self.db_entry = {}
+ self.db_entry["product_name"] = product_name
+ self.db_entry["python_product_class"] = product_class.__qualname__
+ self.db_entry["python_product_module"] = product_class.__module__
+ self.db_entry["db_version"] = 1
+ self.db_entry["log"] = []
+ self.db_entry["features"] = {}
+
+ # Existing product? Read in db_entry
+ if db_entry:
+ self.db_entry = db_entry
+
+ self.valid_periods = [ "per_year", "per_month", "per_week",
+ "per_day", "per_hour",
+ "per_minute", "per_second" ]
+
+ def define_feature(self,
+ name,
+ one_time_price,
+ recurring_price,
+ recurring_period,
+ minimum_period):
+
+ self.db_entry['features'][name] = {}
+ self.db_entry['features'][name]['one_time_price'] = one_time_price
+ self.db_entry['features'][name]['recurring_price'] = recurring_price
+
+ if not recurring_period in self.valid_periods:
+ raise UncloudException("Invalid recurring period: {}".format(recurring_period))
+
+ self.db_entry['features'][name]['recurring_period'] = recurring_period
+
+ if not minimum_period in self.valid_periods:
+ raise UncloudException("Invalid recurring period: {}".format(recurring_period))
+
+ recurring_index = self.valid_periods.index(recurring_period)
+ minimum_index = self.valid_periods.index(minimum_period)
+
+ if minimum_index < recurring_index:
+ raise UncloudException("Minimum period for product '{}' feature '{}' must be shorter or equal than/as recurring period: {} > {}".format(self.db_entry['product_name'], name, minimum_period, recurring_period))
+
+ self.db_entry['features'][name]['minimum_period'] = minimum_period
+
+
+ def validate_product(self):
+ for feature in self.db_entry['features']:
+ pass
+
+ def place_order(self, owner):
+ """ Schedule creating the product in etcd """
+ order = ProductOrder(self.config, product_entry=self.db_entry)
+ order.db_entry["owner"] = owner
+ return order.order()
+
+ def __str__(self):
+ return json.dumps(self.db_entry)
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-api b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-api
new file mode 100644
index 0000000..eb7f83e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-api
@@ -0,0 +1,8 @@
+#!/sbin/openrc-run
+
+name="$RC_SVCNAME"
+pidfile="/var/run/${name}.pid"
+command="$(which pipenv)"
+command_args="run python ucloud.py api"
+command_background="true"
+directory="/root/ucloud"
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-host b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-host
new file mode 100644
index 0000000..0aa375f
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-host
@@ -0,0 +1,8 @@
+#!/sbin/openrc-run
+
+name="$RC_SVCNAME"
+pidfile="/var/run/${name}.pid"
+command="$(which pipenv)"
+command_args="run python ucloud.py host ${HOSTNAME}"
+command_background="true"
+directory="/root/ucloud"
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-metadata b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-metadata
new file mode 100644
index 0000000..d41807f
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-metadata
@@ -0,0 +1,8 @@
+#!/sbin/openrc-run
+
+name="$RC_SVCNAME"
+pidfile="/var/run/${name}.pid"
+command="$(which pipenv)"
+command_args="run python ucloud.py metadata"
+command_background="true"
+directory="/root/ucloud"
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-scheduler b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-scheduler
new file mode 100644
index 0000000..00c0a36
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/rc-scripts/ucloud-scheduler
@@ -0,0 +1,8 @@
+#!/sbin/openrc-run
+
+name="$RC_SVCNAME"
+pidfile="/var/run/${name}.pid"
+command="$(which pipenv)"
+command_args="run python ucloud.py scheduler"
+command_background="true"
+directory="/root/ucloud"
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/hack/uncloud-hack-init-host b/archive/uncloud_etcd_based/uncloud/hack/uncloud-hack-init-host
new file mode 100644
index 0000000..787ff80
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/uncloud-hack-init-host
@@ -0,0 +1,26 @@
+id=100
+rawdev=eth0
+
+# create vxlan
+ip -6 link add vxlan${id} type vxlan \
+ id ${id} \
+ dstport 4789 \
+ group ff05::${id} \
+ dev ${rawdev} \
+ ttl 5
+
+ip link set vxlan${id} up
+
+# create bridge
+ip link set vxlan${id} up
+ip link set br${id} up
+
+# Add vxlan into bridge
+ip link set vxlan${id} master br${id}
+
+
+# useradd -m uncloud
+# [18:05] tablett.place10:~# id uncloud
+# uid=1000(uncloud) gid=1000(uncloud) groups=1000(uncloud),34(kvm),36(qemu)
+# apk add qemu-system-x86_64
+# also needs group netdev
diff --git a/archive/uncloud_etcd_based/uncloud/hack/uncloud-run-vm b/archive/uncloud_etcd_based/uncloud/hack/uncloud-run-vm
new file mode 100644
index 0000000..33e5860
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/uncloud-run-vm
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+if [ $# -ne 1 ]; then
+ echo $0 vmid
+ exit 1
+fi
+
+id=$1; shift
+
+memory=512
+macaddress=02:00:b9:cb:70:${id}
+netname=net${id}-1
+
+qemu-system-x86_64 \
+ -name uncloud-${id} \
+ -accel kvm \
+ -m ${memory} \
+ -smp 2,sockets=2,cores=1,threads=1 \
+ -device virtio-net-pci,netdev=net0,mac=$macaddress \
+ -netdev tap,id=net0,ifname=${netname},script=no,downscript=no \
+ -vnc [::]:0
+
+# To be changed:
+# -vnc to unix path
+# or -spice
diff --git a/archive/uncloud_etcd_based/uncloud/hack/vm.py b/archive/uncloud_etcd_based/uncloud/hack/vm.py
new file mode 100755
index 0000000..4b0ca14
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/hack/vm.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
+#
+# This file is part of uncloud.
+#
+# uncloud is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# uncloud is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with uncloud. If not, see .
+
+# This module is directly called from the hack module, and can be used as follow:
+#
+# Create a new VM with default CPU/Memory. The path of the image file is relative to $hackprefix.
+# `uncloud hack --hackprefix /tmp/hackcloud --create-vm --image mysuperimage.qcow2`
+#
+# List running VMs (returns a list of UUIDs).
+# `uncloud hack --hackprefix /tmp/hackcloud --list-vms
+#
+# Get VM status:
+# `uncloud hack --hackprefix /tmp/hackcloud --get-vm-status --uuid my-vm-uuid`
+#
+# Stop a VM:
+# `uncloud hack --hackprefix /tmp/hackcloud --destroy-vm --uuid my-vm-uuid`
+# ``
+
+import subprocess
+import uuid
+import os
+import logging
+
+from uncloud.hack.db import DB
+from uncloud.hack.mac import MAC
+from uncloud.vmm import VMM
+from uncloud.hack.product import Product
+
+log = logging.getLogger(__name__)
+log.setLevel(logging.DEBUG)
+
+class VM(object):
+ def __init__(self, config, db_entry=None):
+ self.config = config
+
+ #TODO: Enable etcd lookup
+ self.no_db = self.config.arguments['no_db']
+ if not self.no_db:
+ self.db = DB(self.config, prefix="/vm")
+
+ if db_entry:
+ self.db_entry = db_entry
+
+ # General CLI arguments.
+ self.hackprefix = self.config.arguments['hackprefix']
+ self.uuid = self.config.arguments['uuid']
+ self.memory = self.config.arguments['memory'] or '1024M'
+ self.cores = self.config.arguments['cores'] or 1
+
+ if self.config.arguments['image']:
+ self.image = os.path.join(self.hackprefix, self.config.arguments['image'])
+ else:
+ self.image = None
+
+ if self.config.arguments['image_format']:
+ self.image_format=self.config.arguments['image_format']
+ else:
+ self.image_format='qcow2'
+
+ # External components.
+
+ # This one is broken:
+ # TypeError: expected str, bytes or os.PathLike object, not NoneType
+ # Fix before re-enabling
+ # self.vmm = VMM(vmm_backend=self.hackprefix)
+ self.mac = MAC(self.config)
+
+ # Harcoded & generated values.
+ self.owner = 'uncloud'
+ self.accel = 'kvm'
+ self.threads = 1
+ self.ifup = os.path.join(self.hackprefix, "ifup.sh")
+ self.ifdown = os.path.join(self.hackprefix, "ifdown.sh")
+ self.ifname = "uc{}".format(self.mac.to_str_format())
+
+ self.vm = {}
+
+ self.product = Product(config, product_name="dualstack-vm",
+ product_class=self.__class__)
+ self.product.define_feature(name="base",
+ one_time_price=0,
+ recurring_price=9,
+ recurring_period="per_month",
+ minimum_period="per_hour")
+
+
+ self.features = []
+
+
+ def get_qemu_args(self):
+ command = (
+ "-name {owner}-{name}"
+ " -machine pc,accel={accel}"
+ " -drive file={image},format={image_format},if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname}"
+ " -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
+ ).format(
+ owner=self.owner, name=self.uuid,
+ accel=self.accel,
+ image=self.image, image_format=self.image_format,
+ memory=self.memory, cores=self.cores, threads=self.threads,
+ ifup=self.ifup, ifdown=self.ifdown, ifname=self.ifname,
+ mac=self.mac
+ )
+
+ return command.split(" ")
+
+ def create_product(self):
+ """Find a VM host and schedule on it"""
+ pass
+
+ def create(self):
+ # New VM: new UUID, new MAC.
+ self.uuid = str(uuid.uuid4())
+ self.mac=MAC(self.config)
+ self.mac.create()
+
+ qemu_args = self.get_qemu_args()
+ log.debug("QEMU args passed to VMM: {}".format(qemu_args))
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=False,
+ *qemu_args
+ )
+
+
+ self.mac.create()
+ self.vm['mac'] = self.mac
+ self.vm['ifname'] = "uc{}".format(self.mac.__repr__())
+
+ # FIXME: TODO: turn this into a string and THEN
+ # .split() it later -- easier for using .format()
+ #self.vm['commandline'] = [ "{}".format(self.sudo),
+ self.vm['commandline'] = "{sudo}{qemu} -name uncloud-{uuid} -machine pc,accel={accel} -m {memory} -smp {cores} -uuid {uuid} -drive file={os_image},media=cdrom -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname} -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
+# self.vm['commandline'] = [ "{}".format(self.sudo),
+# "{}".format(self.qemu),
+# "-name", "uncloud-{}".format(self.vm['uuid']),
+# "-machine", "pc,accel={}".format(self.accel),
+# "-m", "{}".format(self.vm['memory']),
+# "-smp", "{}".format(self.vm['cores']),
+# "-uuid", "{}".format(self.vm['uuid']),
+# "-drive", "file={},media=cdrom".format(self.vm['os_image']),
+# "-netdev", "tap,id=netmain,script={},downscript={},ifname={}".format(self.ifup, self.ifdown, self.vm['ifname']),
+# "-device", "virtio-net-pci,netdev=netmain,id=net0,mac={}".format(self.vm['mac'])
+# ]
+
+ def _execute_cmd(self, cmd_string, **kwargs):
+ cmd = cmd_string.format(**self.vm, **kwargs)
+ log.info("Executing: {}".format(cmd))
+ subprocess.run(cmd.split())
+
+ def stop(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ self.vmm.stop(self.uuid)
+
+ def status(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ print(self.vmm.get_status(self.uuid))
+
+ def vnc_addr(self):
+ if not self.uuid:
+ print("Please specific an UUID with the --uuid flag.")
+ exit(1)
+
+ print(self.vmm.get_vnc(self.uuid))
+
+ def list(self):
+ print(self.vmm.discover())
diff --git a/archive/uncloud_etcd_based/uncloud/host/__init__.py b/archive/uncloud_etcd_based/uncloud/host/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/host/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/host/main.py b/archive/uncloud_etcd_based/uncloud/host/main.py
new file mode 100755
index 0000000..f680991
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/host/main.py
@@ -0,0 +1,123 @@
+import argparse
+import multiprocessing as mp
+import time
+
+from uuid import uuid4
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.shared import shared
+from uncloud.common.vm import VMStatus
+from uncloud.vmm import VMM
+from os.path import join as join_path
+
+from . import virtualmachine, logger
+
+arg_parser = argparse.ArgumentParser('host', add_help=False)
+arg_parser.add_argument('--hostname', required=True)
+
+
+def update_heartbeat(hostname):
+ """Update Last HeartBeat Time for :param hostname: in etcd"""
+ host_pool = shared.host_pool
+ this_host = next(
+ filter(lambda h: h.hostname == hostname, host_pool.hosts), None
+ )
+ while True:
+ this_host.update_heartbeat()
+ host_pool.put(this_host)
+ time.sleep(10)
+
+
+def maintenance(host):
+ vmm = VMM()
+ running_vms = vmm.discover()
+ for vm_uuid in running_vms:
+ if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == 'running':
+ logger.debug('VM {} is running on {}'.format(vm_uuid, host))
+ vm = shared.vm_pool.get(
+ join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
+ )
+ vm.status = VMStatus.running
+ vm.vnc_socket = vmm.get_vnc(vm_uuid)
+ vm.hostname = host
+ shared.vm_pool.put(vm)
+
+
+def main(arguments):
+ hostname = arguments['hostname']
+ host_pool = shared.host_pool
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+
+ # Does not yet exist, create it
+ if not host:
+ host_key = join_path(
+ shared.settings['etcd']['host_prefix'], uuid4().hex
+ )
+ host_entry = {
+ 'specs': '',
+ 'hostname': hostname,
+ 'status': 'DEAD',
+ 'last_heartbeat': '',
+ }
+ shared.etcd_client.put(
+ host_key, host_entry, value_in_json=True
+ )
+
+ # update, get ourselves now for sure
+ host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
+
+ try:
+ heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
+ heartbeat_updating_process.start()
+ except Exception as e:
+ raise Exception('uncloud-host heartbeat updating mechanism is not working') from e
+
+ # The below while True is neccessary for gracefully handling leadership transfer and temporary
+ # unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
+ # iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
+ # which ends the loop immediately. So, having it inside infinite loop we try again and again to
+ # get prefix until either success or deamon death comes.
+ while True:
+ for events_iterator in [
+ shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False)
+ ]:
+ for request_event in events_iterator:
+ request_event = RequestEntry(request_event)
+
+ maintenance(host.key)
+
+ if request_event.hostname == host.key:
+ logger.debug('VM Request: %s on Host %s', request_event, host.hostname)
+
+ shared.request_pool.client.client.delete(request_event.key)
+ vm_entry = shared.etcd_client.get(
+ join_path(shared.settings['etcd']['vm_prefix'], request_event.uuid)
+ )
+
+ logger.debug('VM hostname: {}'.format(vm_entry.value))
+
+ vm = virtualmachine.VM(vm_entry)
+ if request_event.type == RequestType.StartVM:
+ vm.start()
+
+ elif request_event.type == RequestType.StopVM:
+ vm.stop()
+
+ elif request_event.type == RequestType.DeleteVM:
+ vm.delete()
+
+ elif request_event.type == RequestType.InitVMMigration:
+ vm.start(destination_host_key=host.key)
+
+ elif request_event.type == RequestType.TransferVM:
+ destination_host = host_pool.get(request_event.destination_host_key)
+ if destination_host:
+ vm.migrate(
+ destination_host=destination_host.hostname,
+ destination_sock_path=request_event.destination_sock_path,
+ )
+ else:
+ logger.error('Host %s not found!', request_event.destination_host_key)
diff --git a/archive/uncloud_etcd_based/uncloud/host/virtualmachine.py b/archive/uncloud_etcd_based/uncloud/host/virtualmachine.py
new file mode 100755
index 0000000..a592efc
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/host/virtualmachine.py
@@ -0,0 +1,303 @@
+# QEMU Manual
+# https://qemu.weilnetz.de/doc/qemu-doc.html
+
+# For QEMU Monitor Protocol Commands Information, See
+# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
+
+import os
+import subprocess as sp
+import ipaddress
+
+from string import Template
+from os.path import join as join_path
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.vm import VMStatus, declare_stopped
+from uncloud.common.network import create_dev, delete_network_interface
+from uncloud.common.schemas import VMSchema, NetworkSchema
+from uncloud.host import logger
+from uncloud.common.shared import shared
+from uncloud.vmm import VMM
+
+from marshmallow import ValidationError
+
+
+class VM:
+ def __init__(self, vm_entry):
+ self.schema = VMSchema()
+ self.vmm = VMM()
+ self.key = vm_entry.key
+ try:
+ self.vm = self.schema.loads(vm_entry.value)
+ except ValidationError:
+ logger.exception(
+ "Couldn't validate VM Entry", vm_entry.value
+ )
+ self.vm = None
+ else:
+ self.uuid = vm_entry.key.split("/")[-1]
+ self.host_key = self.vm["hostname"]
+ logger.debug('VM Hostname {}'.format(self.host_key))
+
+ def get_qemu_args(self):
+ command = (
+ "-drive file={file},format=raw,if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -name {owner}_{name}"
+ ).format(
+ owner=self.vm["owner"],
+ name=self.vm["name"],
+ memory=int(self.vm["specs"]["ram"].to_MB()),
+ cores=self.vm["specs"]["cpu"],
+ threads=1,
+ file=shared.storage_handler.qemu_path_string(self.uuid),
+ )
+
+ return command.split(" ")
+
+ def start(self, destination_host_key=None):
+ migration = False
+ if destination_host_key:
+ migration = True
+
+ self.create()
+ try:
+ network_args = self.create_network_dev()
+ except Exception as err:
+ declare_stopped(self.vm)
+ self.vm["log"].append("Cannot Setup Network Properly")
+ logger.error("Cannot Setup Network Properly for vm %s", self.uuid, exc_info=err)
+ else:
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=migration,
+ *self.get_qemu_args(),
+ *network_args
+ )
+
+ status = self.vmm.get_status(self.uuid)
+ logger.debug('VM {} status is {}'.format(self.uuid, status))
+ if status == "running":
+ self.vm["status"] = VMStatus.running
+ self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
+ elif status == "inmigrate":
+ r = RequestEntry.from_scratch(
+ type=RequestType.TransferVM, # Transfer VM
+ hostname=self.host_key, # Which VM should get this request. It is source host
+ uuid=self.uuid, # uuid of VM
+ destination_sock_path=join_path(
+ self.vmm.socket_dir, self.uuid
+ ),
+ destination_host_key=destination_host_key, # Where source host transfer VM
+ request_prefix=shared.settings["etcd"]["request_prefix"],
+ )
+ shared.request_pool.put(r)
+ else:
+ self.stop()
+ declare_stopped(self.vm)
+ logger.debug('VM {} has hostname {}'.format(self.uuid, self.vm['hostname']))
+ self.sync()
+
+ def stop(self):
+ self.vmm.stop(self.uuid)
+ self.delete_network_dev()
+ declare_stopped(self.vm)
+ self.sync()
+
+ def migrate(self, destination_host, destination_sock_path):
+ self.vmm.transfer(
+ src_uuid=self.uuid,
+ destination_sock_path=destination_sock_path,
+ host=destination_host,
+ )
+
+ def create_network_dev(self):
+ command = ""
+ for network_mac_and_tap in self.vm["network"]:
+ network_name, mac, tap = network_mac_and_tap
+
+ _key = os.path.join(
+ shared.settings["etcd"]["network_prefix"],
+ self.vm["owner"],
+ network_name,
+ )
+ network = shared.etcd_client.get(_key, value_in_json=True)
+ network_schema = NetworkSchema()
+ try:
+ network = network_schema.load(network.value)
+ except ValidationError:
+ continue
+
+ if network["type"] == "vxlan":
+ tap = create_vxlan_br_tap(
+ _id=network["id"],
+ _dev=shared.settings["network"]["vxlan_phy_dev"],
+ tap_id=tap,
+ ip=network["ipv6"],
+ )
+
+ all_networks = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["network_prefix"],
+ value_in_json=True,
+ )
+
+ if ipaddress.ip_network(network["ipv6"]).is_global:
+ update_radvd_conf(all_networks)
+
+ command += (
+ "-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
+ " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
+ tap=tap, net_id=network["id"], mac=mac
+ )
+ )
+
+ if command:
+ command = command.split(' ')
+
+ return command
+
+ def delete_network_dev(self):
+ try:
+ for network in self.vm["network"]:
+ network_name = network[0]
+ _ = network[1] # tap_mac
+ tap_id = network[2]
+
+ delete_network_interface("tap{}".format(tap_id))
+
+ owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
+ owners_running_vms = shared.vm_pool.by_status(
+ VMStatus.running, _vms=owners_vms
+ )
+
+ networks = map(
+ lambda n: n[0],
+ map(lambda vm: vm.network, owners_running_vms),
+ )
+ networks_in_use_by_user_vms = [vm[0] for vm in networks]
+ if network_name not in networks_in_use_by_user_vms:
+ network_entry = resolve_network(
+ network[0], self.vm["owner"]
+ )
+ if network_entry:
+ network_type = network_entry.value["type"]
+ network_id = network_entry.value["id"]
+ if network_type == "vxlan":
+ delete_network_interface(
+ "br{}".format(network_id)
+ )
+ delete_network_interface(
+ "vxlan{}".format(network_id)
+ )
+ except Exception:
+ logger.exception("Exception in network interface deletion")
+
+ def create(self):
+ if shared.storage_handler.is_vm_image_exists(self.uuid):
+ # File Already exists. No Problem Continue
+ logger.debug("Image for vm %s exists", self.uuid)
+ else:
+ if shared.storage_handler.make_vm_image(
+ src=self.vm["image_uuid"], dest=self.uuid
+ ):
+ if not shared.storage_handler.resize_vm_image(
+ path=self.uuid,
+ size=int(self.vm["specs"]["os-ssd"].to_MB()),
+ ):
+ self.vm["status"] = VMStatus.error
+ else:
+ logger.info("New VM Created")
+
+ def sync(self):
+ shared.etcd_client.put(
+ self.key, self.schema.dump(self.vm), value_in_json=True
+ )
+
+ def delete(self):
+ self.stop()
+
+ if shared.storage_handler.is_vm_image_exists(self.uuid):
+ r_status = shared.storage_handler.delete_vm_image(self.uuid)
+ if r_status:
+ shared.etcd_client.client.delete(self.key)
+ else:
+ shared.etcd_client.client.delete(self.key)
+
+
+def resolve_network(network_name, network_owner):
+ network = shared.etcd_client.get(
+ join_path(
+ shared.settings["etcd"]["network_prefix"],
+ network_owner,
+ network_name,
+ ),
+ value_in_json=True,
+ )
+ return network
+
+
+def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
+ network_script_base = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), "network"
+ )
+ vxlan = create_dev(
+ script=os.path.join(network_script_base, "create-vxlan.sh"),
+ _id=_id,
+ dev=_dev,
+ )
+ if vxlan:
+ bridge = create_dev(
+ script=os.path.join(
+ network_script_base, "create-bridge.sh"
+ ),
+ _id=_id,
+ dev=vxlan,
+ ip=ip,
+ )
+ if bridge:
+ tap = create_dev(
+ script=os.path.join(
+ network_script_base, "create-tap.sh"
+ ),
+ _id=str(tap_id),
+ dev=bridge,
+ )
+ if tap:
+ return tap
+
+
+def update_radvd_conf(all_networks):
+ network_script_base = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), "network"
+ )
+
+ networks = {
+ net.value["ipv6"]: net.value["id"]
+ for net in all_networks
+ if net.value.get("ipv6")
+ and ipaddress.ip_network(net.value.get("ipv6")).is_global
+ }
+ radvd_template = open(
+ os.path.join(network_script_base, "radvd-template.conf"), "r"
+ ).read()
+ radvd_template = Template(radvd_template)
+
+ content = [
+ radvd_template.safe_substitute(
+ bridge="br{}".format(networks[net]), prefix=net
+ )
+ for net in networks
+ if networks.get(net)
+ ]
+ with open("/etc/radvd.conf", "w") as radvd_conf:
+ radvd_conf.writelines(content)
+ try:
+ sp.check_output(["systemctl", "restart", "radvd"])
+ except sp.CalledProcessError:
+ try:
+ sp.check_output(["service", "radvd", "restart"])
+ except sp.CalledProcessError as err:
+ raise err.__class__(
+ "Cannot start/restart radvd service", err.cmd
+ ) from err
diff --git a/archive/uncloud_etcd_based/uncloud/imagescanner/__init__.py b/archive/uncloud_etcd_based/uncloud/imagescanner/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/imagescanner/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/imagescanner/main.py b/archive/uncloud_etcd_based/uncloud/imagescanner/main.py
new file mode 100755
index 0000000..ee9da2e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/imagescanner/main.py
@@ -0,0 +1,121 @@
+import json
+import os
+import argparse
+import subprocess as sp
+
+from os.path import join as join_path
+from uncloud.common.shared import shared
+from uncloud.imagescanner import logger
+
+
+arg_parser = argparse.ArgumentParser('imagescanner', add_help=False)
+
+
+def qemu_img_type(path):
+ qemu_img_info_command = [
+ "qemu-img",
+ "info",
+ "--output",
+ "json",
+ path,
+ ]
+ try:
+ qemu_img_info = sp.check_output(qemu_img_info_command)
+ except Exception as e:
+ logger.exception(e)
+ return None
+ else:
+ qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
+ return qemu_img_info["format"]
+
+
+def main(arguments):
+ # We want to get images entries that requests images to be created
+ images = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["image_prefix"], value_in_json=True
+ )
+ images_to_be_created = list(
+ filter(lambda im: im.value["status"] == "TO_BE_CREATED", images)
+ )
+
+ for image in images_to_be_created:
+ try:
+ image_uuid = image.key.split("/")[-1]
+ image_owner = image.value["owner"]
+ image_filename = image.value["filename"]
+ image_store_name = image.value["store_name"]
+ image_full_path = join_path(
+ shared.settings["storage"]["file_dir"],
+ image_owner,
+ image_filename,
+ )
+
+ image_stores = shared.etcd_client.get_prefix(
+ shared.settings["etcd"]["image_store_prefix"],
+ value_in_json=True,
+ )
+ user_image_store = next(
+ filter(
+ lambda s, store_name=image_store_name: s.value[
+ "name"
+ ]
+ == store_name,
+ image_stores,
+ )
+ )
+
+ image_store_pool = user_image_store.value["attributes"][
+ "pool"
+ ]
+
+ except Exception as e:
+ logger.exception(e)
+ else:
+ # At least our basic data is available
+ qemu_img_convert_command = [
+ "qemu-img",
+ "convert",
+ "-f",
+ "qcow2",
+ "-O",
+ "raw",
+ image_full_path,
+ "image.raw",
+ ]
+
+ if qemu_img_type(image_full_path) == "qcow2":
+ try:
+ # Convert .qcow2 to .raw
+ sp.check_output(qemu_img_convert_command,)
+
+ except sp.CalledProcessError:
+ logger.exception(
+ "Image convertion from .qcow2 to .raw failed."
+ )
+ else:
+ # Import and Protect
+ r_status = shared.storage_handler.import_image(
+ src="image.raw", dest=image_uuid, protect=True
+ )
+ if r_status:
+ # Everything is successfully done
+ image.value["status"] = "CREATED"
+ shared.etcd_client.put(
+ image.key, json.dumps(image.value)
+ )
+ finally:
+ try:
+ os.remove("image.raw")
+ except Exception:
+ pass
+
+ else:
+ # The user provided image is either not found or of invalid format
+ image.value["status"] = "INVALID_IMAGE"
+ shared.etcd_client.put(
+ image.key, json.dumps(image.value)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/archive/uncloud_etcd_based/uncloud/metadata/__init__.py b/archive/uncloud_etcd_based/uncloud/metadata/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/metadata/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/metadata/main.py b/archive/uncloud_etcd_based/uncloud/metadata/main.py
new file mode 100644
index 0000000..374260e
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/metadata/main.py
@@ -0,0 +1,95 @@
+import os
+import argparse
+
+from flask import Flask, request
+from flask_restful import Resource, Api
+from werkzeug.exceptions import HTTPException
+
+from uncloud.common.shared import shared
+
+app = Flask(__name__)
+api = Api(app)
+
+app.logger.handlers.clear()
+
+DEFAULT_PORT=1234
+
+arg_parser = argparse.ArgumentParser('metadata', add_help=False)
+arg_parser.add_argument('--port', '-p', default=DEFAULT_PORT, help='By default bind to port {}'.format(DEFAULT_PORT))
+
+
+@app.errorhandler(Exception)
+def handle_exception(e):
+ app.logger.error(e)
+ # pass through HTTP errors
+ if isinstance(e, HTTPException):
+ return e
+
+ # now you're handling non-HTTP exceptions only
+ return {"message": "Server Error"}, 500
+
+
+def get_vm_entry(mac_addr):
+ return next(
+ filter(
+ lambda vm: mac_addr in list(zip(*vm.network))[1],
+ shared.vm_pool.vms,
+ ),
+ None,
+ )
+
+
+# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
+def ipv62mac(ipv6):
+ # remove subnet info if given
+ subnet_index = ipv6.find("/")
+ if subnet_index != -1:
+ ipv6 = ipv6[:subnet_index]
+
+ ipv6_parts = ipv6.split(":")
+ mac_parts = list()
+ for ipv6_part in ipv6_parts[-4:]:
+ while len(ipv6_part) < 4:
+ ipv6_part = "0" + ipv6_part
+ mac_parts.append(ipv6_part[:2])
+ mac_parts.append(ipv6_part[-2:])
+
+ # modify parts to match MAC value
+ mac_parts[0] = "%02x" % (int(mac_parts[0], 16) ^ 2)
+ del mac_parts[4]
+ del mac_parts[3]
+ return ":".join(mac_parts)
+
+
+class Root(Resource):
+ @staticmethod
+ def get():
+ data = get_vm_entry(ipv62mac(request.remote_addr))
+
+ if not data:
+ return (
+ {"message": "Metadata for such VM does not exists."},
+ 404,
+ )
+ else:
+ etcd_key = os.path.join(
+ shared.settings["etcd"]["user_prefix"],
+ data.value["owner_realm"],
+ data.value["owner"],
+ "key",
+ )
+ etcd_entry = shared.etcd_client.get_prefix(
+ etcd_key, value_in_json=True
+ )
+ user_personal_ssh_keys = [key.value for key in etcd_entry]
+ data.value["metadata"]["ssh-keys"] += user_personal_ssh_keys
+ return data.value["metadata"], 200
+
+
+api.add_resource(Root, "/")
+
+
+def main(arguments):
+ port = arguments['port']
+ debug = arguments['debug']
+ app.run(debug=debug, host="::", port=port)
diff --git a/archive/uncloud_etcd_based/uncloud/network/README b/archive/uncloud_etcd_based/uncloud/network/README
new file mode 100644
index 0000000..dca25d1
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/network/README
@@ -0,0 +1,195 @@
+The network base - experimental
+
+
+We want to have 1 "main" network for convience.
+
+We want to be able to create networks automatically, once a new
+customer is created -> need hooks!
+
+
+Mapping:
+
+- each network is a "virtual" network. We use vxlan by default, but
+ could be any technology!
+- we need a counter for vxlan mappings / network IDs -> cannot use
+
+Model in etcd:
+
+/v1/networks/
+
+
+Tests
+see
+https://vincent.bernat.ch/en/blog/2017-vxlan-linux
+
+
+# local 2001:db8:1::1 \
+
+
+netid=100
+dev=wlp2s0
+dev=wlp0s20f3
+ip -6 link add vxlan${netid} type vxlan \
+ id ${netid} \
+ dstport 4789 \
+ group ff05::${netid} \
+ dev ${dev} \
+ ttl 5
+
+[root@diamond ~]# ip addr add 2a0a:e5c0:5::1/48 dev vxlan100
+root@manager:~/.ssh# ip addr add 2a0a:e5c0:5::2/48 dev vxlan100
+root@manager:~/.ssh# ping -c3 2a0a:e5c0:5::1
+PING 2a0a:e5c0:5::1(2a0a:e5c0:5::1) 56 data bytes
+64 bytes from 2a0a:e5c0:5::1: icmp_seq=1 ttl=64 time=15.6 ms
+64 bytes from 2a0a:e5c0:5::1: icmp_seq=2 ttl=64 time=30.3 ms
+64 bytes from 2a0a:e5c0:5::1: icmp_seq=3 ttl=64 time=84.4 ms
+
+--- 2a0a:e5c0:5::1 ping statistics ---
+3 packets transmitted, 3 received, 0% packet loss, time 2003ms
+rtt min/avg/max/mdev = 15.580/43.437/84.417/29.594 ms
+
+--> work even via wifi
+
+
+--------------------------------------------------------------------------------
+
+Creating a network:
+
+1) part of the initialisation / demo data (?)
+
+We should probably provide some demo sets that can easily be used.
+
+2) manual/hook based request
+
+- hosts might have different network interfaces (?)
+ -> this will make things very tricky -> don't support it
+- endpoint needs only support
+
+--------------------------------------------------------------------------------
+
+IPAM
+
+IP address management (IPAM) is related to networks, but needs to be
+decoupled to allow pure L2 networks.
+
+From a customer point of view, we probably want to do something like:
+
+- ORDERING an IPv6 network can include creating a virtual network and
+ an IPAM service
+
+Maybe "orders" should always be the first class citizen and ucloud
+internally "hooks" or binds things together.
+
+--------------------------------------------------------------------------------
+
+testing / hacking:
+
+- starting etcd as storage
+
+
+[18:07] diamond:~% etcdctl put /v1/network/200 "{ some_network }"
+OK
+[18:08] diamond:~% etcdctl watch -w=json --prefix /v1/network
+{"Header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":6,"raft_term":2},"Events":[{"kv":{"key":"L3YxL25ldHdvcmsvMjAw","create_revision":5,"mod_revision":6,"version":2,"value":"eyBzb21lX25ldHdvcmsgfQ=="}}],"CompactRevision":0,"Canceled":false,"Created":false}
+
+
+--------------------------------------------------------------------------------
+
+Flow for using and creating networks:
+
+- a network is created -> entry in etcd is created
+ -> we need to keep a counter/lock so that 2 processes don't create
+ the same network [Ahmed]
+ -> nothing to be done on the hosts
+- a VM using a network is created
+- a VM using a network is scheduled to some host
+- the local "spawn a VM" process needs to check whether there is a
+ vxlan interface existing -> if no, create it before creating the VM.
+ -> if no, also create the bridge
+ -> possibly adjusting the MTU (??)
+ -> both names should be in hexadecimal (i.e. brff01 or vxlanff01)
+ --> this way they are consistent with the multicast ipv6 address
+ --> attention, ip -6 link ... id XXX expects DECIMAL input
+
+--------------------------------------------------------------------------------
+If we also supply IPAM:
+
+- ipam needs to be created *after* the network is created
+- ipam is likely to be coupled to netbox (?)
+ --> we need a "get next /64 prefix" function
+- when an ipam service is created in etcd, we need to create a new
+ radvd instance on all routers (this will be a different service on
+ BSDs)
+- we will need to create a new vxlan device on the routers
+- we need to create a new / modify radvd.conf
+- only after all of the routers reloaded radvd the ipam service is
+ available!
+
+
+--------------------------------------------------------------------------------
+If the user requests an IPv4 VM:
+
+- we need to get the next free IPv4 address (again, netbox?)
+- we need to create a mapping entry on the routers for NAT64
+ --> this requires the VM to be in a network with IPAM
+ --> we always assume that the VM embeds itself using EUI64
+
+
+
+--------------------------------------------------------------------------------
+mac address handling!
+
+Example
+
+--------------------------------------------------------------------------------
+
+TODOs
+
+- create-vxlan-on-dev.sh -> the multicast group
+ needs to be ff05:: +int(vxlan_id)
+
+--------------------------------------------------------------------------------
+
+Python hints:
+
+>>> vxlan_id = 3400
+>>> b = ipaddress.IPv6Network("ff05::/16")
+>>> b[vxlan_id]
+IPv6Address('ff05::d48')
+
+we need / should assign hex values for vxlan ids in etcd!
+--> easier to read
+
+>>> b[0x3400]
+IPv6Address('ff05::3400')
+
+
+--------------------------------------------------------------------------------
+
+Bridge names are limited to 15 characters
+
+
+Maximum/highest number of vxlan:
+
+>>> 2**24
+16777216
+>>> (2**25)-1
+33554431
+
+>>> b[33554431]
+IPv6Address('ff05::1ff:ffff')
+
+Last interface:
+br1ffffff
+vxlan1ffffff
+
+root@manager:~/ucloud/network# ip -6 link add vxlan1ffffff type vxlan id 33554431 dstport 4789 group ff05::1ff:ffff dev wlp2s0 ttl 5
+Error: argument "33554431" is wrong: invalid id
+
+root@manager:~/ucloud/network# ip -6 link add vxlanffffff type vxlan id 16777215 dstport 4789 group ff05::ff:ffff dev wlp2s0 ttl 5
+
+
+# id needs to be decimal
+root@manager:~# ip -6 link add vxlanff01 type vxlan id ff01 dstport 4789 group ff05::ff01 dev ttl 5
+Error: argument "ff01" is wrong: invalid id
+root@manager:~# ip -6 link add vxlanff01 type vxlan id 65281 dstport 4789 group ff05::ff01 dev wlp2s0 ttl 5
diff --git a/uncloud/uncloud_pay/migrations/__init__.py b/archive/uncloud_etcd_based/uncloud/network/__init__.py
similarity index 100%
rename from uncloud/uncloud_pay/migrations/__init__.py
rename to archive/uncloud_etcd_based/uncloud/network/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-bridge.sh b/archive/uncloud_etcd_based/uncloud/network/create-bridge.sh
new file mode 100755
index 0000000..bdd8f75
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/network/create-bridge.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+if [ $# -ne 3 ]; then
+ echo "$0 brid dev ip"
+ echo "f.g. $0 100 vxlan100 fd00:/64"
+ echo "Missing arguments" >&2
+ exit 1
+fi
+
+brid=$1; shift
+dev=$1; shift
+ip=$1; shift
+bridge=br${brid}
+
+sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+if ! ip link show $bridge > /dev/null 2> /dev/null; then
+ ip link add name $bridge type bridge
+ ip link set $bridge up
+ ip link set $dev master $bridge
+ ip address add $ip dev $bridge
+fi
+
+echo $bridge
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-tap.sh b/archive/uncloud_etcd_based/uncloud/network/create-tap.sh
new file mode 100755
index 0000000..4a5e470
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/network/create-tap.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+if [ $# -ne 2 ]; then
+ echo "$0 tapid dev"
+ echo "f.g. $0 100 br100"
+ echo "Missing arguments" >&2
+ exit 1
+fi
+
+tapid=$1; shift
+bridge=$1; shift
+vxlan=vxlan${tapid}
+tap=tap${tapid}
+
+if ! ip link show $tap > /dev/null 2> /dev/null; then
+ ip tuntap add $tap mode tap user `whoami`
+ ip link set $tap up
+ sleep 0.5s
+ ip link set $tap master $bridge
+fi
+
+echo $tap
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/network/create-vxlan.sh b/archive/uncloud_etcd_based/uncloud/network/create-vxlan.sh
new file mode 100755
index 0000000..1a730f6
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/network/create-vxlan.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+if [ $# -ne 2 ]; then
+ echo "$0 vxlanid dev"
+ echo "f.i. $0 100 eno1"
+ echo "Missing arguments" >&2
+ exit 1
+fi
+
+netid=$1; shift
+dev=$1; shift
+vxlan=vxlan${netid}
+
+if ! ip link show $vxlan > /dev/null 2> /dev/null; then
+ ip -6 link add $vxlan type vxlan \
+ id $netid \
+ dstport 4789 \
+ group ff05::$netid \
+ dev $dev \
+ ttl 5
+
+ ip link set $dev up
+ ip link set $vxlan up
+fi
+
+echo $vxlan
\ No newline at end of file
diff --git a/archive/uncloud_etcd_based/uncloud/network/radvd-template.conf b/archive/uncloud_etcd_based/uncloud/network/radvd-template.conf
new file mode 100644
index 0000000..8afc9bd
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/network/radvd-template.conf
@@ -0,0 +1,13 @@
+interface $bridge
+{
+ AdvSendAdvert on;
+ MinRtrAdvInterval 3;
+ MaxRtrAdvInterval 5;
+ AdvDefaultLifetime 10;
+
+ prefix $prefix { };
+
+ RDNSS 2a0a:e5c0:2:1::5 2a0a:e5c0:2:1::6 { AdvRDNSSLifetime 6000; };
+ DNSSL place6.ungleich.ch { AdvDNSSLLifetime 6000; } ;
+};
+
diff --git a/archive/uncloud_etcd_based/uncloud/oneshot/__init__.py b/archive/uncloud_etcd_based/uncloud/oneshot/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/oneshot/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/oneshot/main.py b/archive/uncloud_etcd_based/uncloud/oneshot/main.py
new file mode 100644
index 0000000..dbb3b32
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/oneshot/main.py
@@ -0,0 +1,123 @@
+import argparse
+import os
+
+
+from pathlib import Path
+from uncloud.vmm import VMM
+from uncloud.host.virtualmachine import update_radvd_conf, create_vxlan_br_tap
+
+from . import virtualmachine, logger
+
+###
+# Argument parser loaded by scripts/uncloud.
+arg_parser = argparse.ArgumentParser('oneshot', add_help=False)
+
+# Actions.
+arg_parser.add_argument('--list', action='store_true',
+ help='list UUID and name of running VMs')
+arg_parser.add_argument('--start', nargs=4,
+ metavar=('NAME', 'IMAGE', 'UPSTREAM_INTERFACE', 'NETWORK'),
+ help='start a VM using the OS IMAGE (full path), configuring networking on NETWORK IPv6 prefix')
+arg_parser.add_argument('--stop', metavar='UUID',
+ help='stop a VM')
+arg_parser.add_argument('--get-status', metavar='UUID',
+ help='return the status of the VM')
+arg_parser.add_argument('--get-vnc', metavar='UUID',
+ help='return the path of the VNC socket of the VM')
+arg_parser.add_argument('--reconfigure-radvd', metavar='NETWORK',
+ help='regenerate and reload RADVD configuration for NETWORK IPv6 prefix')
+
+# Arguments.
+arg_parser.add_argument('--workdir', default=Path.home(),
+ help='Working directory, defaulting to $HOME')
+arg_parser.add_argument('--mac',
+ help='MAC address of the VM to create (--start)')
+arg_parser.add_argument('--memory', type=int,
+ help='Memory (MB) to allocate (--start)')
+arg_parser.add_argument('--cores', type=int,
+ help='Number of cores to allocate (--start)')
+arg_parser.add_argument('--threads', type=int,
+ help='Number of threads to allocate (--start)')
+arg_parser.add_argument('--image-format', choices=['raw', 'qcow2'],
+ help='Format of OS image (--start)')
+arg_parser.add_argument('--accel', choices=['kvm', 'tcg'], default='kvm',
+ help='QEMU acceleration to use (--start)')
+arg_parser.add_argument('--upstream-interface', default='eth0',
+ help='Name of upstream interface (--start)')
+
+###
+# Helpers.
+
+# XXX: check if it is possible to use the type returned by ETCD queries.
+class UncloudEntryWrapper:
+ def __init__(self, value):
+ self.value = value
+
+ def value(self):
+ return self.value
+
+def status_line(vm):
+ return "VM: {} {} {}".format(vm.get_uuid(), vm.get_name(), vm.get_status())
+
+###
+# Entrypoint.
+
+def main(arguments):
+ # Initialize VMM.
+ workdir = arguments['workdir']
+ vmm = VMM(vmm_backend=workdir)
+
+ # Harcoded debug values.
+ net_id = 0
+
+ # Build VM configuration.
+ vm_config = {}
+ vm_options = [
+ 'mac', 'memory', 'cores', 'threads', 'image', 'image_format',
+ '--upstream_interface', 'upstream_interface', 'network', 'accel'
+ ]
+ for option in vm_options:
+ if arguments.get(option):
+ vm_config[option] = arguments[option]
+
+ vm_config['net_id'] = net_id
+
+ # Execute requested VM action.
+ if arguments['reconfigure_radvd']:
+ # TODO: check that RADVD is available.
+ prefix = arguments['reconfigure_radvd']
+ network = UncloudEntryWrapper({
+ 'id': net_id,
+ 'ipv6': prefix
+ })
+
+ # Make use of uncloud.host.virtualmachine for network configuration.
+ update_radvd_conf([network])
+ elif arguments['start']:
+ # Extract from --start positional arguments. Quite fragile.
+ vm_config['name'] = arguments['start'][0]
+ vm_config['image'] = arguments['start'][1]
+ vm_config['network'] = arguments['start'][2]
+ vm_config['upstream_interface'] = arguments['start'][3]
+
+ vm_config['tap_interface'] = "uc{}".format(len(vmm.discover()))
+ vm = virtualmachine.VM(vmm, vm_config)
+ vm.start()
+ elif arguments['stop']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['stop']})
+ vm.stop()
+ elif arguments['get_status']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['get_status']})
+ print(status_line(vm))
+ elif arguments['get_vnc']:
+ vm = virtualmachine.VM(vmm, {'uuid': arguments['get_vnc']})
+ print(vm.get_vnc_addr())
+ elif arguments['list']:
+ vms = vmm.discover()
+ print("Found {} VMs.".format(len(vms)))
+ for uuid in vms:
+ vm = virtualmachine.VM(vmm, {'uuid': uuid})
+ print(status_line(vm))
+ else:
+ print('Please specify an action: --start, --stop, --list,\
+--get-status, --get-vnc, --reconfigure-radvd')
diff --git a/archive/uncloud_etcd_based/uncloud/oneshot/virtualmachine.py b/archive/uncloud_etcd_based/uncloud/oneshot/virtualmachine.py
new file mode 100644
index 0000000..5749bee
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/oneshot/virtualmachine.py
@@ -0,0 +1,81 @@
+import uuid
+import os
+
+from uncloud.host.virtualmachine import create_vxlan_br_tap
+from uncloud.oneshot import logger
+
+class VM(object):
+ def __init__(self, vmm, config):
+ self.config = config
+ self.vmm = vmm
+
+ # Extract VM specs/metadata from configuration.
+ self.name = config.get('name', 'no-name')
+ self.memory = config.get('memory', 1024)
+ self.cores = config.get('cores', 1)
+ self.threads = config.get('threads', 1)
+ self.image_format = config.get('image_format', 'qcow2')
+ self.image = config.get('image')
+ self.uuid = config.get('uuid', str(uuid.uuid4()))
+ self.mac = config.get('mac')
+ self.accel = config.get('accel', 'kvm')
+
+ self.net_id = config.get('net_id', 0)
+ self.upstream_interface = config.get('upstream_interface', 'eth0')
+ self.tap_interface = config.get('tap_interface', 'uc0')
+ self.network = config.get('network')
+
+ def get_qemu_args(self):
+ command = (
+ "-uuid {uuid} -name {name} -machine pc,accel={accel}"
+ " -drive file={image},format={image_format},if=virtio"
+ " -device virtio-rng-pci"
+ " -m {memory} -smp cores={cores},threads={threads}"
+ " -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
+ " -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}"
+ ).format(
+ uuid=self.uuid, name=self.name, accel=self.accel,
+ image=self.image, image_format=self.image_format,
+ memory=self.memory, cores=self.cores, threads=self.threads,
+ net_id=self.net_id, tap=self.tap_interface, mac=self.mac
+ )
+
+ return command.split(" ")
+
+ def start(self):
+ # Check that VM image is available.
+ if not os.path.isfile(self.image):
+ logger.error("Image {} does not exist. Aborting.".format(self.image))
+
+ # Create Bridge, VXLAN and tap interface for VM.
+ create_vxlan_br_tap(
+ self.net_id, self.upstream_interface, self.tap_interface, self.network
+ )
+
+ # Generate config for and run QEMU.
+ qemu_args = self.get_qemu_args()
+ logger.debug("QEMU args for VM {}: {}".format(self.uuid, qemu_args))
+ self.vmm.start(
+ uuid=self.uuid,
+ migration=False,
+ *qemu_args
+ )
+
+ def stop(self):
+ self.vmm.stop(self.uuid)
+
+ def get_status(self):
+ return self.vmm.get_status(self.uuid)
+
+ def get_vnc_addr(self):
+ return self.vmm.get_vnc(self.uuid)
+
+ def get_uuid(self):
+ return self.uuid
+
+ def get_name(self):
+ success, json = self.vmm.execute_command(self.uuid, 'query-name')
+ if success:
+ return json['return']['name']
+
+ return None
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/__init__.py b/archive/uncloud_etcd_based/uncloud/scheduler/__init__.py
new file mode 100644
index 0000000..eea436a
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/scheduler/__init__.py
@@ -0,0 +1,3 @@
+import logging
+
+logger = logging.getLogger(__name__)
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/helper.py b/archive/uncloud_etcd_based/uncloud/scheduler/helper.py
new file mode 100755
index 0000000..79db322
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/scheduler/helper.py
@@ -0,0 +1,137 @@
+from collections import Counter
+from functools import reduce
+
+import bitmath
+
+from uncloud.common.host import HostStatus
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.vm import VMStatus
+from uncloud.common.shared import shared
+
+
+def accumulated_specs(vms_specs):
+ if not vms_specs:
+ return {}
+ return reduce((lambda x, y: Counter(x) + Counter(y)), vms_specs)
+
+
+def remaining_resources(host_specs, vms_specs):
+ # Return remaining resources host_specs - vms
+
+ _vms_specs = Counter(vms_specs)
+ _remaining = Counter(host_specs)
+
+ for component in _vms_specs:
+ if isinstance(_vms_specs[component], str):
+ _vms_specs[component] = int(
+ bitmath.parse_string_unsafe(
+ _vms_specs[component]
+ ).to_MB()
+ )
+ elif isinstance(_vms_specs[component], list):
+ _vms_specs[component] = map(
+ lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
+ _vms_specs[component],
+ )
+ _vms_specs[component] = reduce(
+ lambda x, y: x + y, _vms_specs[component], 0
+ )
+
+ for component in _remaining:
+ if isinstance(_remaining[component], str):
+ _remaining[component] = int(
+ bitmath.parse_string_unsafe(
+ _remaining[component]
+ ).to_MB()
+ )
+ elif isinstance(_remaining[component], list):
+ _remaining[component] = map(
+ lambda x: int(bitmath.parse_string_unsafe(x).to_MB()),
+ _remaining[component],
+ )
+ _remaining[component] = reduce(
+ lambda x, y: x + y, _remaining[component], 0
+ )
+
+ _remaining.subtract(_vms_specs)
+
+ return _remaining
+
+
+class NoSuitableHostFound(Exception):
+ """Exception when no host found that can host a VM."""
+
+
+def get_suitable_host(vm_specs, hosts=None):
+ if hosts is None:
+ hosts = shared.host_pool.by_status(HostStatus.alive)
+
+ for host in hosts:
+ # Filter them by host_name
+ vms = shared.vm_pool.by_host(host.key)
+
+ # Filter them by status
+ vms = shared.vm_pool.by_status(VMStatus.running, vms)
+
+ running_vms_specs = [vm.specs for vm in vms]
+
+ # Accumulate all of their combined specs
+ running_vms_accumulated_specs = accumulated_specs(
+ running_vms_specs
+ )
+
+ # Find out remaining resources after
+ # host_specs - already running vm_specs
+ remaining = remaining_resources(
+ host.specs, running_vms_accumulated_specs
+ )
+
+ # Find out remaining - new_vm_specs
+ remaining = remaining_resources(remaining, vm_specs)
+
+ if all(map(lambda x: x >= 0, remaining.values())):
+ return host.key
+
+ raise NoSuitableHostFound
+
+
+def dead_host_detection():
+ # Bring out your dead! - Monty Python and the Holy Grail
+ hosts = shared.host_pool.by_status(HostStatus.alive)
+ dead_hosts_keys = []
+
+ for host in hosts:
+ # Only check those who claims to be alive
+ if host.status == HostStatus.alive:
+ if not host.is_alive():
+ dead_hosts_keys.append(host.key)
+
+ return dead_hosts_keys
+
+
+def dead_host_mitigation(dead_hosts_keys):
+ for host_key in dead_hosts_keys:
+ host = shared.host_pool.get(host_key)
+ host.declare_dead()
+
+ vms_hosted_on_dead_host = shared.vm_pool.by_host(host_key)
+ for vm in vms_hosted_on_dead_host:
+ vm.status = "UNKNOWN"
+ shared.vm_pool.put(vm)
+ shared.host_pool.put(host)
+
+
+def assign_host(vm):
+ vm.hostname = get_suitable_host(vm.specs)
+ shared.vm_pool.put(vm)
+
+ r = RequestEntry.from_scratch(
+ type=RequestType.StartVM,
+ uuid=vm.uuid,
+ hostname=vm.hostname,
+ request_prefix=shared.settings["etcd"]["request_prefix"],
+ )
+ shared.request_pool.put(r)
+
+ vm.log.append("VM scheduled for starting")
+ return vm.hostname
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/main.py b/archive/uncloud_etcd_based/uncloud/scheduler/main.py
new file mode 100755
index 0000000..38c07bf
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/scheduler/main.py
@@ -0,0 +1,51 @@
+# TODO
+# 1. send an email to an email address defined by env['admin-email']
+# if resources are finished
+# 2. Introduce a status endpoint of the scheduler -
+# maybe expose a prometheus compatible output
+
+import argparse
+
+from uncloud.common.request import RequestEntry, RequestType
+from uncloud.common.shared import shared
+from uncloud.scheduler import logger
+from uncloud.scheduler.helper import (dead_host_mitigation, dead_host_detection,
+ assign_host, NoSuitableHostFound)
+
+arg_parser = argparse.ArgumentParser('scheduler', add_help=False)
+
+
+def main(arguments):
+ # The below while True is neccessary for gracefully handling leadership transfer and temporary
+ # unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
+ # iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
+ # which ends the loop immediately. So, having it inside infinite loop we try again and again to
+ # get prefix until either success or deamon death comes.
+ while True:
+ for request_iterator in [
+ shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
+ raise_exception=False),
+ ]:
+ for request_event in request_iterator:
+ dead_host_mitigation(dead_host_detection())
+ request_entry = RequestEntry(request_event)
+
+ if request_entry.type == RequestType.ScheduleVM:
+ logger.debug('%s, %s', request_entry.key, request_entry.value)
+
+ vm_entry = shared.vm_pool.get(request_entry.uuid)
+ if vm_entry is None:
+ logger.info('Trying to act on {} but it is deleted'.format(request_entry.uuid))
+ continue
+
+ shared.etcd_client.client.delete(request_entry.key) # consume Request
+
+ try:
+ assign_host(vm_entry)
+ except NoSuitableHostFound:
+ vm_entry.add_log('Can\'t schedule VM. No Resource Left.')
+ shared.vm_pool.put(vm_entry)
+
+ logger.info('No Resource Left. Emailing admin....')
diff --git a/uncloud/uncloud_storage/__init__.py b/archive/uncloud_etcd_based/uncloud/scheduler/tests/__init__.py
similarity index 100%
rename from uncloud/uncloud_storage/__init__.py
rename to archive/uncloud_etcd_based/uncloud/scheduler/tests/__init__.py
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py b/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py
new file mode 100755
index 0000000..defeb23
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_basics.py
@@ -0,0 +1,233 @@
+import json
+import multiprocessing
+import sys
+import unittest
+from datetime import datetime
+from os.path import dirname
+
+BASE_DIR = dirname(dirname(__file__))
+sys.path.insert(0, BASE_DIR)
+
+from main import (
+ accumulated_specs,
+ remaining_resources,
+ VmPool,
+ main,
+)
+
+from uncloud.config import etcd_client
+
+
+class TestFunctions(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.client = etcd_client
+ cls.host_prefix = "/test/host"
+ cls.vm_prefix = "/test/vm"
+
+ # These deletion could also be in
+ # tearDown() but it is more appropriate here
+ # as it enable us to check the ETCD store
+ # even after test is run
+ cls.client.client.delete_prefix(cls.host_prefix)
+ cls.client.client.delete_prefix(cls.vm_prefix)
+ cls.create_hosts(cls)
+ cls.create_vms(cls)
+
+ cls.p = multiprocessing.Process(
+ target=main, args=[cls.vm_prefix, cls.host_prefix]
+ )
+ cls.p.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.p.terminate()
+
+ def create_hosts(self):
+ host1 = {
+ "cpu": 32,
+ "ram": 128,
+ "hdd": 1024,
+ "sdd": 0,
+ "status": "ALIVE",
+ "last_heartbeat": datetime.utcnow().isoformat(),
+ }
+ host2 = {
+ "cpu": 16,
+ "ram": 64,
+ "hdd": 512,
+ "sdd": 0,
+ "status": "ALIVE",
+ "last_heartbeat": datetime.utcnow().isoformat(),
+ }
+
+ host3 = {
+ "cpu": 16,
+ "ram": 32,
+ "hdd": 256,
+ "sdd": 256,
+ "status": "ALIVE",
+ "last_heartbeat": datetime.utcnow().isoformat(),
+ }
+ with self.client.client.lock("lock"):
+ self.client.put(
+ f"{self.host_prefix}/1", host1, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/2", host2, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/3", host3, value_in_json=True
+ )
+
+ def create_vms(self):
+ vm1 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm2 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm3 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 16, "ram": 32, "hdd": 128, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm4 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm5 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 2, "ram": 2, "hdd": 10, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm6 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ vm7 = json.dumps(
+ {
+ "owner": "meow",
+ "specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ }
+ )
+ self.client.put(f"{self.vm_prefix}/1", vm1)
+ self.client.put(f"{self.vm_prefix}/2", vm2)
+ self.client.put(f"{self.vm_prefix}/3", vm3)
+ self.client.put(f"{self.vm_prefix}/4", vm4)
+ self.client.put(f"{self.vm_prefix}/5", vm5)
+ self.client.put(f"{self.vm_prefix}/6", vm6)
+ self.client.put(f"{self.vm_prefix}/7", vm7)
+
+ def test_accumulated_specs(self):
+ vms = [
+ {"ssd": 10, "cpu": 4, "ram": 8},
+ {"hdd": 10, "cpu": 4, "ram": 8},
+ {"cpu": 8, "ram": 32},
+ ]
+ self.assertEqual(
+ accumulated_specs(vms),
+ {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10},
+ )
+
+ def test_remaining_resources(self):
+ host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
+ vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
+ resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
+ self.assertEqual(
+ remaining_resources(host_specs, vms_specs), resultant_specs
+ )
+
+ def test_vmpool(self):
+ self.p.join(1)
+ vm_pool = VmPool(self.client, self.vm_prefix)
+
+ # vm_pool by host
+ actual = vm_pool.by_host(vm_pool.vms, f"{self.host_prefix}/3")
+ ground_truth = [
+ (
+ f"{self.vm_prefix}/1",
+ {
+ "owner": "meow",
+ "specs": {
+ "cpu": 4,
+ "ram": 8,
+ "hdd": 100,
+ "sdd": 256,
+ },
+ "hostname": f"{self.host_prefix}/3",
+ "status": "SCHEDULED_DEPLOY",
+ },
+ )
+ ]
+ self.assertEqual(actual[0], ground_truth[0])
+
+ # vm_pool by status
+ actual = vm_pool.by_status(vm_pool.vms, "REQUESTED_NEW")
+ ground_truth = [
+ (
+ f"{self.vm_prefix}/7",
+ {
+ "owner": "meow",
+ "specs": {
+ "cpu": 10,
+ "ram": 22,
+ "hdd": 146,
+ "sdd": 0,
+ },
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ },
+ )
+ ]
+ self.assertEqual(actual[0], ground_truth[0])
+
+ # vm_pool by except status
+ actual = vm_pool.except_status(vm_pool.vms, "SCHEDULED_DEPLOY")
+ ground_truth = [
+ (
+ f"{self.vm_prefix}/7",
+ {
+ "owner": "meow",
+ "specs": {
+ "cpu": 10,
+ "ram": 22,
+ "hdd": 146,
+ "sdd": 0,
+ },
+ "hostname": "",
+ "status": "REQUESTED_NEW",
+ },
+ )
+ ]
+ self.assertEqual(actual[0], ground_truth[0])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py b/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py
new file mode 100755
index 0000000..466b9ee
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/scheduler/tests/test_dead_host_mechanism.py
@@ -0,0 +1,83 @@
+import sys
+import unittest
+from datetime import datetime
+from os.path import dirname
+
+BASE_DIR = dirname(dirname(__file__))
+sys.path.insert(0, BASE_DIR)
+
+from main import dead_host_detection, dead_host_mitigation, config
+
+
+class TestDeadHostMechanism(unittest.TestCase):
+ def setUp(self):
+ self.client = config.etcd_client
+ self.host_prefix = "/test/host"
+ self.vm_prefix = "/test/vm"
+
+ self.client.client.delete_prefix(self.host_prefix)
+ self.client.client.delete_prefix(self.vm_prefix)
+
+ self.create_hosts()
+
+ def create_hosts(self):
+ host1 = {
+ "cpu": 32,
+ "ram": 128,
+ "hdd": 1024,
+ "sdd": 0,
+ "status": "ALIVE",
+ "last_heartbeat": datetime.utcnow().isoformat(),
+ }
+ host2 = {
+ "cpu": 16,
+ "ram": 64,
+ "hdd": 512,
+ "sdd": 0,
+ "status": "ALIVE",
+ "last_heartbeat": datetime(2011, 1, 1).isoformat(),
+ }
+
+ host3 = {"cpu": 16, "ram": 32, "hdd": 256, "sdd": 256}
+ host4 = {
+ "cpu": 16,
+ "ram": 32,
+ "hdd": 256,
+ "sdd": 256,
+ "status": "DEAD",
+ "last_heartbeat": datetime(2011, 1, 1).isoformat(),
+ }
+ with self.client.client.lock("lock"):
+ self.client.put(
+ f"{self.host_prefix}/1", host1, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/2", host2, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/3", host3, value_in_json=True
+ )
+ self.client.put(
+ f"{self.host_prefix}/4", host4, value_in_json=True
+ )
+
+ def test_dead_host_detection(self):
+ hosts = self.client.get_prefix(
+ self.host_prefix, value_in_json=True
+ )
+ deads = dead_host_detection(hosts)
+ self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
+ return deads
+
+ def test_dead_host_mitigation(self):
+ deads = self.test_dead_host_detection()
+ dead_host_mitigation(self.client, deads)
+ hosts = self.client.get_prefix(
+ self.host_prefix, value_in_json=True
+ )
+ deads = dead_host_detection(hosts)
+ self.assertEqual(deads, [])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/archive/uncloud_etcd_based/uncloud/version.py b/archive/uncloud_etcd_based/uncloud/version.py
new file mode 100644
index 0000000..ccf3980
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/version.py
@@ -0,0 +1 @@
+VERSION = "0.0.5-30-ge91fd9e"
diff --git a/archive/uncloud_etcd_based/uncloud/vmm/__init__.py b/archive/uncloud_etcd_based/uncloud/vmm/__init__.py
new file mode 100644
index 0000000..6db61eb
--- /dev/null
+++ b/archive/uncloud_etcd_based/uncloud/vmm/__init__.py
@@ -0,0 +1,284 @@
+import os
+import subprocess as sp
+import logging
+import socket
+import json
+import tempfile
+import time
+
+from contextlib import suppress
+from multiprocessing import Process
+from os.path import join as join_path
+from os.path import isdir
+
+logger = logging.getLogger(__name__)
+
+
+class VMQMPHandles:
+ def __init__(self, path):
+ self.path = path
+ self.sock = socket.socket(socket.AF_UNIX)
+ self.file = self.sock.makefile()
+
+ def __enter__(self):
+ self.sock.connect(self.path)
+
+ # eat qmp greetings
+ self.file.readline()
+
+ # init qmp
+ self.sock.sendall(b'{ "execute": "qmp_capabilities" }')
+ self.file.readline()
+
+ return self.sock, self.file
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.file.close()
+ self.sock.close()
+
+ if exc_type:
+ logger.error(
+ "Couldn't get handle for VM.", exc_type, exc_val, exc_tb
+ )
+ raise exc_type("Couldn't get handle for VM.") from exc_type
+
+
+class TransferVM(Process):
+ def __init__(self, src_uuid, dest_sock_path, host, socket_dir):
+ self.src_uuid = src_uuid
+ self.host = host
+ self.src_sock_path = os.path.join(socket_dir, self.src_uuid)
+ self.dest_sock_path = dest_sock_path
+
+ super().__init__()
+
+ def run(self):
+ with suppress(FileNotFoundError):
+ os.remove(self.src_sock_path)
+
+ command = [
+ "ssh",
+ "-nNT",
+ "-L",
+ "{}:{}".format(self.src_sock_path, self.dest_sock_path),
+ "root@{}".format(self.host),
+ ]
+
+ try:
+ p = sp.Popen(command)
+ except Exception as e:
+ logger.error(
+ "Couldn' forward unix socks over ssh.", exc_info=e
+ )
+ else:
+ time.sleep(2)
+ vmm = VMM()
+ logger.debug("Executing: ssh forwarding command: %s", command)
+ vmm.execute_command(
+ self.src_uuid,
+ command="migrate",
+ arguments={"uri": "unix:{}".format(self.src_sock_path)},
+ )
+
+ while p.poll() is None:
+ success, output = vmm.execute_command(self.src_uuid, command="query-migrate")
+ if success:
+ status = output["return"]["status"]
+ logger.info('Migration Status: {}'.format(status))
+ if status == "completed":
+ vmm.stop(self.src_uuid)
+ return
+ elif status in ['failed', 'cancelled']:
+ return
+ else:
+ logger.error("Couldn't be able to query VM {} that was in migration".format(self.src_uuid))
+ return
+
+ time.sleep(2)
+
+
+class VMM:
+ # Virtual Machine Manager
+ def __init__(
+ self,
+ qemu_path="/usr/bin/qemu-system-x86_64",
+ vmm_backend=os.path.expanduser("~/uncloud/vmm/"),
+ ):
+ self.qemu_path = qemu_path
+ self.vmm_backend = vmm_backend
+ self.socket_dir = os.path.join(self.vmm_backend, "sock")
+
+ if not os.path.isdir(self.vmm_backend):
+ logger.info(
+ "{} does not exists. Creating it...".format(
+ self.vmm_backend
+ )
+ )
+ os.makedirs(self.vmm_backend, exist_ok=True)
+
+ if not os.path.isdir(self.socket_dir):
+ logger.info(
+ "{} does not exists. Creating it...".format(
+ self.socket_dir
+ )
+ )
+ os.makedirs(self.socket_dir, exist_ok=True)
+
+ def is_running(self, uuid):
+ sock_path = os.path.join(self.socket_dir, uuid)
+ try:
+ sock = socket.socket(socket.AF_UNIX)
+ sock.connect(sock_path)
+ recv = sock.recv(4096)
+ except Exception as err:
+ # unix sock doesn't exists or it is closed
+ logger.debug(
+ "VM {} sock either don' exists or it is closed. It mean VM is stopped.".format(
+ uuid
+ ),
+ exc_info=err,
+ )
+ else:
+ # if we receive greetings from qmp it mean VM is running
+ if len(recv) > 0:
+ return True
+
+ with suppress(FileNotFoundError):
+ os.remove(sock_path)
+
+ return False
+
+ def start(self, *args, uuid, migration=False):
+ # start --> sucess?
+ migration_args = ()
+ if migration:
+ migration_args = (
+ "-incoming",
+ "unix:{}".format(os.path.join(self.socket_dir, uuid)),
+ )
+
+ if self.is_running(uuid):
+ logger.warning("Cannot start VM. It is already running.")
+ else:
+ qmp_arg = (
+ "-qmp",
+ "unix:{},server,nowait".format(
+ join_path(self.socket_dir, uuid)
+ ),
+ )
+ vnc_arg = (
+ "-vnc",
+ "unix:{}".format(tempfile.NamedTemporaryFile().name),
+ )
+
+ command = [
+ "sudo",
+ "-p",
+ "Enter password to start VM {}: ".format(uuid),
+ self.qemu_path,
+ *args,
+ *qmp_arg,
+ *migration_args,
+ *vnc_arg,
+ "-daemonize",
+ ]
+ try:
+ sp.check_output(command, stderr=sp.PIPE)
+ except sp.CalledProcessError as err:
+ logger.exception(
+ "Error occurred while starting VM.\nDetail %s",
+ err.stderr.decode("utf-8"),
+ )
+ else:
+ sp.check_output(
+ ["sudo", "-p", "Enter password to correct permission for uncloud-vmm's directory",
+ "chmod", "-R", "o=rwx,g=rwx", self.vmm_backend]
+ )
+
+ # TODO: Find some good way to check whether the virtual machine is up and
+ # running without relying on non-guarenteed ways.
+ for _ in range(10):
+ time.sleep(2)
+ status = self.get_status(uuid)
+ if status in ["running", "inmigrate"]:
+ return status
+ logger.warning(
+ "Timeout on VM's status. Shutting down VM %s", uuid
+ )
+ self.stop(uuid)
+ # TODO: What should we do more. VM can still continue to run in background.
+ # If we have pid of vm we can kill it using OS.
+
+ def execute_command(self, uuid, command, **kwargs):
+ # execute_command -> sucess?, output
+ try:
+ with VMQMPHandles(os.path.join(self.socket_dir, uuid)) as (
+ sock_handle,
+ file_handle,
+ ):
+ command_to_execute = {"execute": command, **kwargs}
+ sock_handle.sendall(
+ json.dumps(command_to_execute).encode("utf-8")
+ )
+ output = file_handle.readline()
+ except Exception:
+ logger.exception(
+ "Error occurred while executing command and getting valid output from qmp"
+ )
+ else:
+ try:
+ output = json.loads(output)
+ except Exception:
+ logger.exception(
+ "QMP Output isn't valid JSON. %s", output
+ )
+ else:
+ return "return" in output, output
+ return False, None
+
+ def stop(self, uuid):
+ success, output = self.execute_command(
+ command="quit", uuid=uuid
+ )
+ return success
+
+ def get_status(self, uuid):
+ success, output = self.execute_command(
+ command="query-status", uuid=uuid
+ )
+ if success:
+ return output["return"]["status"]
+ else:
+ # TODO: Think about this for a little more
+ return "STOPPED"
+
+ def discover(self):
+ vms = [
+ uuid
+ for uuid in os.listdir(self.socket_dir)
+ if not isdir(join_path(self.socket_dir, uuid))
+ ]
+ return vms
+
+ def get_vnc(self, uuid):
+ success, output = self.execute_command(
+ uuid, command="query-vnc"
+ )
+ if success:
+ return output["return"]["service"]
+ return None
+
+ def transfer(self, src_uuid, destination_sock_path, host):
+ p = TransferVM(
+ src_uuid,
+ destination_sock_path,
+ socket_dir=self.socket_dir,
+ host=host,
+ )
+ p.start()
+
+ # TODO: the following method should clean things that went wrong
+ # e.g If VM migration fails or didn't start for long time
+ # i.e 15 minutes we should stop the waiting VM.
+ def maintenace(self):
+ pass
diff --git a/bin/make-migrations-from-scratch.sh b/bin/make-migrations-from-scratch.sh
new file mode 100644
index 0000000..8baccfa
--- /dev/null
+++ b/bin/make-migrations-from-scratch.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# For undoing/redoing everything
+# Needed in special cases and needs to be avoided as soon as
+# uncloud.version >= 1
+for a in */migrations; do rm ${a}/*.py; done
+for a in */migrations; do python manage.py makemigrations ${a%%/migrations}; done
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000..b51a70d
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,2 @@
+*.pdf
+*.tex
diff --git a/doc/README-billing.org b/doc/README-billing.org
new file mode 100644
index 0000000..50b26fa
--- /dev/null
+++ b/doc/README-billing.org
@@ -0,0 +1,85 @@
+* How to handle billing in general
+** Manual test flow / setting up bills
+ - Needs orders
+ -
+** Orders
+ - Orders are the heart of uncloud billing
+ - Have a starting date
+ - Have an ending date
+ - Orders are immutable
+ - Can usually not be cancelled / cancellation is not a refund
+ - Customer/user commits on a certain period -> gets discount
+ based on it
+ - Can be upgraded
+ - Create a new order
+ - We link the new order to the old order and say this one
+ replaces it
+ - If the price of the new order is HIGHER than the OLD order,
+ then we charge the difference until the end of the order period
+ - In the next billing run we set the OLD order to not to bill anymore
+ - And only the NEW order will be billed afterwards
+ - Can be downgraded in the next period (but not for this period)
+ - We create a new order, same as for upgrade
+ - The new order starts directly after the OLD order
+ - As the amount is LOWER than the OLD order, no additional charge is done
+ during this order period
+ - We might need to have an activate datetime
+ - When to implement this
+ - Order periods can be
+*** Statuses
+ - CREATING/PREPARING
+ - INACTIVE (?)
+ - TO_BILL
+ - NOT_TO_BILL: we use this to accelerate queries to the DB
+*** Updating status of orders
+ - If has succeeding order and billing date is last month -> set inactive
+** Bills
+ - Are always for a month
+ - Can be preliminary
+*** Which orders to include
+ - Not the cancelled ones / not active ones
+** Flows / Approach
+*** Finding all orders for a bill
+ - Get all orders, state != NOT_TO_BILL; for each order do:
+ - is it a one time order?
+ - has it a bill assigned?
+ - yes: set to NOT_TO_BILL
+ - no:
+ - get_or_create_bill_for_this_month
+ - assign bill to this order
+ - set to NOT_TO_BILL
+ - is it a recurring order?
+ - if it has a REPLACING order:
+ -
+ - First of month
+ - Last of month
+*** Handling replacement of orders
+ - The OLD order will appear in the month that it was cancelled on
+ the bill
+ - The OLD order needs to be set to NOT_TO_BILL after it was billed
+ the last time
+ - The NEW order will be added pro rata if the amount is higher in
+ the same month
+ - The NEW order will be used next month
+**** Disabling the old order
+ - On billing run
+ - If order.replacement_order (naming!) is set
+ - if the order.replacement_order starts during THIS_MONTH
+ - add order to bill
+ - if NOT:
+ - the order was already replaced in a previous billing period
+ - set the order to NOT_TO_BILL
+**** Billing the new order
+ - If order.previous_order
+*** Handling multiple times a recurring order
+ - For each recurring order check the order.period
+ - Find out when it was billed last
+ - lookup latest bill
+ - Calculate how many times it has been used until 2359, last day
+ of month
+ - For preliminary bill: until datetime.now()
+ - Call the bill_end_datetime
+ - Getting duration: bill_end_datetime - order.last_billed
+ - Amount in seconds; duration_in_seconds
+ - Divide duration_in_seconds by order.period; amount_used:
+ - If >= 1: add amount_used * order.recurring_amount to bill
diff --git a/doc/README-how-to-configure-remote-uncloud-clients.org b/doc/README-how-to-configure-remote-uncloud-clients.org
new file mode 100644
index 0000000..b48886b
--- /dev/null
+++ b/doc/README-how-to-configure-remote-uncloud-clients.org
@@ -0,0 +1,28 @@
+* What is a remote uncloud client?
+** Systems that configure themselves for the use with uncloud
+** Examples are VMHosts, VPN Servers, cdist control server, etc.
+* Which access do these clients need?
+** They need read / write access to the database
+* Possible methods
+** Overview
+| | pros | cons |
+| SSL based | Once setup, can access all django parts natively, locally | X.509 infrastructure |
+| SSH -L tunnel | All nodes can use [::1]:5432 | SSH setup can be fragile |
+| ssh djangohost manage.py | All DB ops locally | Code is only executed on django host |
+| https + token | Rest alike / consistent access | Code is only executed on django host |
+| from_django | Everything is on the django host | main host can become bottleneck |
+** remote vs. local Django code execution
+ - If manage.py is executed locally (= on the client), it can
+ check/modify local configs
+ - However local execution requires a pyvenv + packages + db access
+ - Local execution also *could* make use of postgresql notify for
+ triggering actions (which is quite neat)
+ - Remote execution (= on the primary django host) can acess the db
+ via unix socket
+ - However remote execution cannot check local state
+** from_django
+ - might reuse existing methods like celery
+ - reduces the amount of things to be installed on the client to
+ almost zero
+ - follows the opennebula model
+ - has a single point of failurebin
diff --git a/doc/uncloud-manual-2020-08-01.org b/doc/uncloud-manual-2020-08-01.org
new file mode 100644
index 0000000..5c6a9f7
--- /dev/null
+++ b/doc/uncloud-manual-2020-08-01.org
@@ -0,0 +1,382 @@
+* Bootstrap / Installation
+** Pre-requisites by operating system
+*** General
+ To run uncloud you need:
+ - ldap development libraries
+ - libxml2-dev libxslt-dev
+ - gcc / libc headers: for compiling things
+ - python3-dev
+ - wireguard: wg (for checking keys)
+*** Alpine
+ #+BEGIN_SRC sh
+apk add openldap-dev postgresql-dev libxml2-dev libxslt-dev gcc python3-dev musl-dev wireguard-tools-wg
+#+END_SRC
+*** Debian/Devuan:
+ #+BEGIN_SRC sh
+apt install postgresql-server-dev-all
+#+END_SRC
+** Creating a virtual environment / installing python requirements
+*** Virtual env
+ To separate uncloud requirements, you can use a python virtual
+ env as follows:
+ #+BEGIN_SRC sh
+python3 -m venv venv
+. ./venv/bin/activate
+#+END_SRC
+ Then install the requirements
+ #+BEGIN_SRC sh
+pip install -r requirements.txt
+#+END_SRC
+** Setting up the the database
+*** Install the database service
+ The database can run on the same host as uncloud, but can also run
+ a different server. Consult the usual postgresql documentation for
+ a secure configuration.
+**** Alpine
+ #+BEGIN_SRC sh
+apk add postgresql-server
+rc-update add postgresql
+rc-service postgresql start`
+#+END_SRC
+
+**** Debian/Devuan:
+ #+BEGIN_SRC sh
+ apt install postgresql
+ #+END_SRC
+*** Create the database
+ Due to the use of the JSONField, postgresql is required.
+ To get started,
+ create a database and have it owned by the user that runs uncloud
+ (usually "uncloud"):
+
+ #+BEGIN_SRC sh
+bridge:~# su - postgres
+bridge:~$ psql
+postgres=# create role uncloud login;
+postgres=# create database uncloud owner nico;
+#+END_SRC
+*** Creating the schema
+ #+BEGIN_SRC sh
+python manage.py migrate
+#+END_SRC
+
+** Bootstrap
+ - Login via a user so that the user object gets created
+ - Run the following (replace nicocustomer with the username)
+ #+BEGIN_SRC sh
+ python manage.py bootstrap-user --username nicocustomer
+ #+END_SRC
+
+** Initialise the database
+ While it is not strictly required to add default values to the
+ database, it might significantly reduce the starting time with
+ uncloud.
+
+ To add the default database values run:
+
+ #+BEGIN_SRC shell
+ # Add local objects
+ python manage.py db-add-defaults
+
+ # Import VAT rates
+ python manage.py import-vat-rates
+ #+END_SRC
+
+* Testing / CLI Access
+ Access via the commandline (CLI) can be done using curl or
+ httpie. In our examples we will use httpie.
+** Checkout out the API
+ #+BEGIN_SRC sh
+ http localhost:8000/api/
+ #+END_SRC
+** Authenticate via ldap user in password store
+ #+BEGIN_SRC sh
+ http --auth nicocustomer:$(pass ldap/nicocustomer) localhost:8000/api/
+ #+END_SRC
+* Database
+** uncloud clients access the data base from a variety of outside hosts
+** So the postgresql data base needs to be remotely accessible
+** Instead of exposing the tcp socket, we make postgresql bind to localhost via IPv6
+*** ::1, port 5432
+** Then we remotely connect to the database server with ssh tunneling
+*** ssh -L5432:localhost:5432 uncloud-database-host
+** Configuring your database for SSH based remote access
+*** host all all ::1/128 trust
+
+* URLs
+ - api/ - the rest API
+* uncloud Products
+** Product features
+ - Dependencies on other products
+ - Minimum parameters (min cpu, min ram, etc).
+ - Can also realise the dcl vm
+ - dualstack vm = VM + IPv4 + SSD
+ - Need to have a non-misguiding name for the "bare VM"
+ - Should support network boot (?)
+
+** VPN
+*** How to add a new VPN Host
+**** Install wireguard to the host
+**** Install uncloud to the host
+**** Add `python manage.py vpn --hostname fqdn-of-this-host` to the crontab
+**** Use the CLI to configure one or more VPN Networks for this host
+*** Example of adding a VPN host at ungleich
+**** Create a new dual stack alpine VM
+**** Add it to DNS as vpn-XXX.ungleich.ch
+**** Route a /40 network to its IPv6 address
+**** Install wireguard on it
+**** TODO [#C] Enable wireguard on boot
+**** TODO [#C] Create a new VPNPool on uncloud with
+***** the network address (selecting from our existing pool)
+***** the network size (/...)
+***** the vpn host that provides the network (selecting the created VM)
+***** the wireguard private key of the vpn host (using wg genkey)
+***** http command
+ ```
+ http -a nicoschottelius:$(pass
+ ungleich.ch/nico.schottelius@ungleich.ch)
+ http://localhost:8000/admin/vpnpool/ network=2a0a:e5c1:200:: \
+ network_size=40 subnetwork_size=48
+ vpn_hostname=vpn-2a0ae5c1200.ungleich.ch
+ wireguard_private_key=...
+ ```
+*** Example http commands / REST calls
+**** creating a new vpn pool
+ http -a nicoschottelius:$(pass
+ ungleich.ch/nico.schottelius@ungleich.ch)
+ http://localhost:8000/admin/vpnpool/ network_size=40
+ subnetwork_size=48 network=2a0a:e5c1:200::
+ vpn_hostname=vpn-2a0ae5c1200.ungleich.ch wireguard_private_key=$(wg
+ genkey)
+**** Creating a new vpn network
+*** Creating a VPN pool
+
+ #+BEGIN_SRC sh
+http -a uncloudadmin:$(pass uncloudadmin) https://localhost:8000/v1/admin/vpnpool/ \
+ network=2a0a:e5c1:200:: network_size=40 subnetwork_size=48 \
+ vpn_hostname=vpn-2a0ae5c1200.ungleich.ch wireguard_private_key=$(wg genkey)
+ #+END_SRC
+
+This will create the VPNPool 2a0a:e5c1:200::/40 from which /48
+networks will be used for clients.
+
+VPNPools can only be managed by staff.
+
+*** Managing VPNNetworks
+
+To request a network as a client, use the following call:
+
+ #+BEGIN_SRC sh
+ http -a user:$(pass user) https://localhost:8000/v1/net/vpn/ \
+ network_size=48 \
+ wireguard_public_key=$(wg genkey | tee privatekey | wg pubkey)
+```
+
+VPNNetworks can be managed by all authenticated users.
+
+* Developer Handbook
+ The following section describe decisions / architecture of
+ uncloud. These chapters are intended to be read by developers.
+** This Documentation
+ This documentation is written in org-mode. To compile it to
+ html/pdf, just open emacs and press *C-c C-e l p*.
+** Models
+*** Bill
+ Bills are summarising usage in a specific timeframe. Bills usually
+ spawn one month.
+*** BillRecord
+ Bill records are used to model the usage of one order during the
+ timeframe.
+*** Order
+ Orders register the intent of a user to buy something. They might
+ refer to a product. (???)
+ Order register the one time price and the recurring price. These
+ fields should be treated as immutable. If they need to be modified,
+ a new order that replaces the current order should be created.
+**** Replacing orders
+ If an order is updated, a new order is created and points to the
+ old order. The old order stops one second before the new order
+ starts.
+
+ If a order has been replaced can be seen by its replaced_by count:
+ #+BEGIN_SRC sh
+ >>> Order.objects.get(id=1).replaced_by.count()
+ 1
+ #+END_SRC
+
+*** Product and Product Children
+ - A product describes something a user can buy
+ - A product inherits from the uncloud_pay.models.Product model to
+ get basic attributes
+** Identifiers
+*** Problem description
+ Identifiers can be integers, strings or other objects. They should
+ be unique.
+*** Approach 1: integers
+ Integers are somewhat easy to remember, but also include
+ predictable growth, which might allow access to guessed hacking
+ (obivously proper permissions should prevent this).
+*** Approach 2: random uuids
+ UUIDs are 128 bit integers. Python supports uuid.uuid4() for random
+ uuids.
+*** Approach 3: IPv6 addresses
+ uncloud heavily depends on IPv6 in the first place. uncloud could
+ use a /48 to identify all objects. Objects that have IPv6 addresses
+ on their own, don't need to draw from the system /48.
+**** Possible Subnetworks
+ Assuming uncloud uses a /48 to represent all resources.
+
+ | Network | Name | Description |
+ |-----------------+-----------------+----------------------------------------------|
+ | 2001:db8::/48 | uncloud network | All identifiers drawn from here |
+ | 2001:db8:1::/64 | VM network | Every VM has an IPv6 address in this network |
+ | 2001:db8:2::/64 | Bill network | Every bill has an IPv6 address |
+ | 2001:db8:3::/64 | Order network | Every order has an IPv6 address |
+ | 2001:db8:5::/64 | Product network | Every product (?) has an IPv6 address |
+ | 2001:db8:4::/64 | Disk network | Every disk is identified |
+
+**** Tests
+ [15:47:37] black3.place6:~# rbd create -s 10G ssd/2a0a:e5c0:1::8
+
+*** Decision
+ We use integers, because they are easy.
+
+** Distributing/Dispatching/Orchestrating
+*** Variant 1: using cdist
+ - The uncloud server can git commit things
+ - The uncloud server loads cdist and configures the server
+ - Advantages
+ - Fully integrated into normal flow
+ - Disadvantage
+ - web frontend has access to more data than it needs
+ - On compromise of the machine, more data leaks
+ - Some cdist usual delay
+*** Variant 2: via celery
+ - The uncloud server dispatches via celery
+ - Every decentral node also runs celery/connects to the broker
+ - Summary brokers:
+ - If local only celery -> good to use redis - Broker
+ - If remote: probably better to use rabbitmq
+ - redis
+ - simpler
+ - rabbitmq
+ - more versatile
+ - made for remote connections
+ - quorom queues would be nice, but not clear if supported
+ - https://github.com/celery/py-amqp/issues/302
+ - https://github.com/celery/celery/issues/6067
+ - Cannot be installed on alpine Linux at the moment
+ - Advantage
+ - Very python / django integrated
+ - Rather instant
+ - Disadvantages
+ - Every decentral node needs to have the uncloud code available
+ - Decentral nodes *might* need to access the database
+ - Tasks can probably be written to work without that
+ (i.e. only strings/bytes)
+
+**** log/tests
+ (venv) [19:54] vpn-2a0ae5c1200:~/uncloud$ celery -A uncloud -b redis://bridge.place7.ungleich.ch worker -n worker1@%h --logfile ~/celery.log -
+Q vpn-2a0ae5c1200.ungleich.ch
+
+
+*** Variant 3: dedicated cdist instance via message broker
+ - A separate VM/machine
+ - Has Checkout of ~/.cdist
+ - Has cdist checkout
+ - Tiny API for management
+ - Not directly web accessible
+ - "cdist" queue
+
+** Milestones :uncloud:
+*** 1.1 (cleanup 1)
+**** TODO [#C] Unify ValidationError, FieldError - define proper Exception
+ - What do we use for model errors
+*** 1.0 (initial release)
+**** TODO [#C] Initial Generic product support
+ - Product
+***** TODO [#C] Recurring product support
+****** TODO [#C] Support replacing orders for updates
+****** DONE [#A] Finish split of bill creation
+ CLOSED: [2020-09-11 Fri 23:19]
+****** TODO [#C] Test the new functions in the Order class
+****** Define the correct order replacement logic
+ Assumption:
+ - recurringperiods are 30days
+******* Case 1: downgrading
+ - User commits to 10 CHF for 30 days
+ - Wants to downgrade after 15 days to 5 CHF product
+ - Expected result:
+ - order 1: 10 CHF until +30days
+ - order 2: 5 CHF starting 30days + 1s
+ - Sum of the two orders is 15 CHF
+ - Question is
+ - when is the VM shutdown?
+ - a) instantly
+ - b) at the end of the cycle
+ - best solution
+ - user can choose between a ... b any time
+******* Duration
+ - You cannot cancel the duration
+ - You can upgrade and with that cancel the duration
+ - The idea of a duration is that you commit for it
+ - If you want to commit lower (daily basis for instance) you
+ have higher per period prices
+******* Case X
+ - User has VM with 2 Core / 2 GB RAM
+ - User modifies with to 1 core / 3 GB RAM
+ - We treat it as down/upgrade independent of the modifications
+
+******* Case 2: upgrading after 1 day
+ - committed for 30 days
+ - upgrade after 1 day
+ - so first order will be charged for 1/30ths
+
+******* Case 2: upgrading
+ - User commits to 10 CHF for 30 days
+ - Wants to upgrade after 15 days to 20 CHF product
+ - Order 1 : 1 VM with 2 Core / 2 GB / 10 SSD -- 10 CHF
+ - 30days period, stopped after 15, so quantity is 0.5 = 5 CHF
+ - Order 2 : 1 VM with 2 Core / 6 GB / 10 SSD -- 20 CHF
+ - after 15 days
+ - VM is upgraded instantly
+ - Expected result:
+ - order 1: 10 CHF until +15days = 0.5 units = 5 CHF
+ - order 2: 20 CHF starting 15days + 1s ... +30 days after
+ the 15 days -> 45 days = 1 unit = 20 CHF
+ - Total on bill: 25 CHF
+
+******* Case 2: upgrading
+ - User commits to 10 CHF for 30 days
+ - Wants to upgrade after 15 days to 20 CHF product
+ - Expected result:
+ - order 1: 10 CHF until +30days = 1 units = 10 CHF
+
+ - order 2: 20 CHF starting 15days + 1s = 1 unit = 20 CHF
+ - Total on bill: 30 CHF
+
+
+****** TODO [#C] Note: ending date not set if replaced by default (implicit!)
+ - Should the new order modify the old order on save()?
+****** DONE Fix totally wrong bill dates in our test case
+ CLOSED: [2020-09-09 Wed 01:00]
+ - 2020 used instead of 2019
+ - Was due to existing test data ...
+***** DONE Bill logic is still wrong
+ CLOSED: [2020-11-05 Thu 18:58]
+ - Bill starting_date is the date of the first order
+ - However first encountered order does not have to be the
+ earliest in the bill!
+ - Bills should not have a duration
+ - Bills should only have a (unique) issue date
+ - We charge based on bill_records
+ - Last time charged issue date of the bill OR earliest date
+ after that
+ - Every bill generation checks all (relevant) orders
+ - add a flag "not_for_billing" or "closed"
+ - query on that flag
+ - verify it every time
+
+
+***** TODO Generating bill for admins/staff
+ -
diff --git a/uncloud/manage.py b/manage.py
similarity index 100%
rename from uncloud/manage.py
rename to manage.py
diff --git a/meow-payv1/ldaptest.py b/meow-payv1/ldaptest.py
deleted file mode 100644
index eb5a5be..0000000
--- a/meow-payv1/ldaptest.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import ldap3
-from ldap3 import Server, Connection, ObjectDef, Reader, ALL
-import os
-import sys
-
-def is_valid_ldap_user(username, password):
- server = Server("ldaps://ldap1.ungleich.ch")
- is_valid = False
-
- try:
- conn = Connection(server, 'cn={},ou=users,dc=ungleich,dc=ch'.format(username), password, auto_bind=True)
- is_valid = True
- except Exception as e:
- print("user: {}".format(e))
-
- try:
- conn = Connection(server, 'uid={},ou=customer,dc=ungleich,dc=ch'.format(username), password, auto_bind=True)
- is_valid = True
- except Exception as e:
- print("customer: {}".format(e))
-
-
- return is_valid
-
-
-if __name__ == '__main__':
- print(is_valid_ldap_user(sys.argv[1], sys.argv[2]))
diff --git a/models.dot b/models.dot
new file mode 100644
index 0000000..0adfba8
--- /dev/null
+++ b/models.dot
@@ -0,0 +1,1482 @@
+digraph model_graph {
+ // Dotfile by Django-Extensions graph_models
+ // Created: 2020-03-17 12:30
+ // Cli Options: -a
+
+ fontname = "Roboto"
+ fontsize = 8
+ splines = true
+
+ node [
+ fontname = "Roboto"
+ fontsize = 8
+ shape = "plaintext"
+ ]
+
+ edge [
+ fontname = "Roboto"
+ fontsize = 8
+ ]
+
+ // Labels
+
+
+ django_contrib_admin_models_LogEntry [label=<
+
+ |
+
+ LogEntry
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ content_type
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ user
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ action_flag
+ |
+ PositiveSmallIntegerField
+ |
+
+
+
+ |
+ action_time
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ change_message
+ |
+ TextField
+ |
+
+
+
+ |
+ object_id
+ |
+ TextField
+ |
+
+
+
+ |
+ object_repr
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ django_contrib_auth_models_Permission [label=<
+
+ |
+
+ Permission
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ content_type
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ codename
+ |
+ CharField
+ |
+
+
+
+ |
+ name
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ django_contrib_auth_models_Group [label=<
+
+ |
+
+ Group
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ name
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ django_contrib_contenttypes_models_ContentType [label=<
+
+ |
+
+ ContentType
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ app_label
+ |
+ CharField
+ |
+
+
+
+ |
+ model
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ django_contrib_sessions_base_session_AbstractBaseSession [label=<
+
+ |
+
+ AbstractBaseSession
+ |
+
+
+ |
+ expire_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ session_data
+ |
+ TextField
+ |
+
+
+
+ >]
+
+ django_contrib_sessions_models_Session [label=<
+
+
+
+ Session <AbstractBaseSession>
+ |
+
+
+ |
+ session_key
+ |
+ CharField
+ |
+
+
+
+ |
+ expire_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ session_data
+ |
+ TextField
+ |
+
+
+
+ >]
+
+
+
+
+ uncloud_pay_models_StripeCustomer [label=<
+
+ |
+
+ StripeCustomer
+ |
+
+
+ |
+ owner
+ |
+ OneToOneField (id)
+ |
+
+
+
+ |
+ stripe_id
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ uncloud_pay_models_Payment [label=<
+
+ |
+
+ Payment
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ amount
+ |
+ DecimalField
+ |
+
+
+
+ |
+ source
+ |
+ CharField
+ |
+
+
+
+ |
+ timestamp
+ |
+ DateTimeField
+ |
+
+
+
+ >]
+
+ uncloud_pay_models_PaymentMethod [label=<
+
+ |
+
+ PaymentMethod
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ description
+ |
+ TextField
+ |
+
+
+
+ |
+ primary
+ |
+ BooleanField
+ |
+
+
+
+ |
+ source
+ |
+ CharField
+ |
+
+
+
+ |
+ stripe_card_id
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ uncloud_pay_models_Bill [label=<
+
+ |
+
+ Bill
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ creation_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ due_date
+ |
+ DateField
+ |
+
+
+
+ |
+ ending_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ starting_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ valid
+ |
+ BooleanField
+ |
+
+
+
+ >]
+
+ uncloud_pay_models_Order [label=<
+
+ |
+
+ Order
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ creation_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ ending_date
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ recurring_period
+ |
+ CharField
+ |
+
+
+
+ |
+ starting_date
+ |
+ DateTimeField
+ |
+
+
+
+ >]
+
+ uncloud_pay_models_OrderRecord [label=<
+
+ |
+
+ OrderRecord
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ description
+ |
+ TextField
+ |
+
+
+
+ |
+ one_time_price
+ |
+ DecimalField
+ |
+
+
+
+ |
+ recurring_price
+ |
+ DecimalField
+ |
+
+
+
+ >]
+
+
+
+
+ django_contrib_auth_models_AbstractUser [label=<
+
+
+
+ AbstractUser <AbstractBaseUser,PermissionsMixin>
+ |
+
+
+ |
+ date_joined
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ email
+ |
+ EmailField
+ |
+
+
+
+ |
+ first_name
+ |
+ CharField
+ |
+
+
+
+ |
+ is_active
+ |
+ BooleanField
+ |
+
+
+
+ |
+ is_staff
+ |
+ BooleanField
+ |
+
+
+
+ |
+ is_superuser
+ |
+ BooleanField
+ |
+
+
+
+ |
+ last_login
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ last_name
+ |
+ CharField
+ |
+
+
+
+ |
+ password
+ |
+ CharField
+ |
+
+
+
+ |
+ username
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ uncloud_auth_models_User [label=<
+
+
+
+ User <AbstractUser>
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ date_joined
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ email
+ |
+ EmailField
+ |
+
+
+
+ |
+ first_name
+ |
+ CharField
+ |
+
+
+
+ |
+ is_active
+ |
+ BooleanField
+ |
+
+
+
+ |
+ is_staff
+ |
+ BooleanField
+ |
+
+
+
+ |
+ is_superuser
+ |
+ BooleanField
+ |
+
+
+
+ |
+ last_login
+ |
+ DateTimeField
+ |
+
+
+
+ |
+ last_name
+ |
+ CharField
+ |
+
+
+
+ |
+ password
+ |
+ CharField
+ |
+
+
+
+ |
+ username
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ uncloud_pay_models_Product [label=<
+
+ |
+
+ Product
+ |
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMHost [label=<
+
+ |
+
+ VMHost
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ hostname
+ |
+ CharField
+ |
+
+
+
+ |
+ physical_cores
+ |
+ IntegerField
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ |
+ usable_cores
+ |
+ IntegerField
+ |
+
+
+
+ |
+ usable_ram_in_gb
+ |
+ FloatField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMProduct [label=<
+
+
+
+ VMProduct <Product>
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ vmhost
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ cores
+ |
+ IntegerField
+ |
+
+
+
+ |
+ name
+ |
+ CharField
+ |
+
+
+
+ |
+ ram_in_gb
+ |
+ FloatField
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ |
+ vmid
+ |
+ IntegerField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMWithOSProduct [label=<
+
+ |
+
+ VMWithOSProduct
+ |
+
+
+ |
+ vmproduct_ptr
+ |
+ OneToOneField (uuid)
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMDiskImageProduct [label=<
+
+ |
+
+ VMDiskImageProduct
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ image_source
+ |
+ CharField
+ |
+
+
+
+ |
+ image_source_type
+ |
+ CharField
+ |
+
+
+
+ |
+ import_url
+ |
+ URLField
+ |
+
+
+
+ |
+ is_os_image
+ |
+ BooleanField
+ |
+
+
+
+ |
+ is_public
+ |
+ BooleanField
+ |
+
+
+
+ |
+ name
+ |
+ CharField
+ |
+
+
+
+ |
+ size_in_gb
+ |
+ FloatField
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ |
+ storage_class
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMDiskProduct [label=<
+
+ |
+
+ VMDiskProduct
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ image
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ vm
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ size_in_gb
+ |
+ FloatField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMNetworkCard [label=<
+
+ |
+
+ VMNetworkCard
+ |
+
+
+ |
+ id
+ |
+ AutoField
+ |
+
+
+
+ |
+ vm
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ ip_address
+ |
+ GenericIPAddressField
+ |
+
+
+
+ |
+ mac_address
+ |
+ BigIntegerField
+ |
+
+
+
+ >]
+
+ uncloud_vm_models_VMSnapshotProduct [label=<
+
+
+
+ VMSnapshotProduct <Product>
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ vm
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ gb_hdd
+ |
+ FloatField
+ |
+
+
+
+ |
+ gb_ssd
+ |
+ FloatField
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ uncloud_pay_models_Product [label=<
+
+ |
+
+ Product
+ |
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ >]
+
+ ungleich_service_models_MatrixServiceProduct [label=<
+
+
+
+ MatrixServiceProduct <Product>
+ |
+
+
+ |
+ uuid
+ |
+ UUIDField
+ |
+
+
+
+ |
+ order
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ vm
+ |
+ ForeignKey (uuid)
+ |
+
+
+
+ |
+ domain
+ |
+ CharField
+ |
+
+
+
+ |
+ status
+ |
+ CharField
+ |
+
+
+
+ >]
+
+
+
+
+ opennebula_models_VM [label=<
+
+ |
+
+ VM
+ |
+
+
+ |
+ vmid
+ |
+ IntegerField
+ |
+
+
+
+ |
+ owner
+ |
+ ForeignKey (id)
+ |
+
+
+
+ |
+ data
+ |
+ JSONField
+ |
+
+
+
+ >]
+
+
+
+
+ // Relations
+
+ django_contrib_admin_models_LogEntry -> uncloud_auth_models_User
+ [label=" user (logentry)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ django_contrib_admin_models_LogEntry -> django_contrib_contenttypes_models_ContentType
+ [label=" content_type (logentry)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+
+ django_contrib_auth_models_Permission -> django_contrib_contenttypes_models_ContentType
+ [label=" content_type (permission)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ django_contrib_auth_models_Group -> django_contrib_auth_models_Permission
+ [label=" permissions (group)"] [arrowhead=dot arrowtail=dot, dir=both];
+
+
+
+ django_contrib_sessions_models_Session -> django_contrib_sessions_base_session_AbstractBaseSession
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+
+ uncloud_pay_models_StripeCustomer -> uncloud_auth_models_User
+ [label=" owner (stripecustomer)"] [arrowhead=none, arrowtail=none, dir=both];
+
+ uncloud_pay_models_Payment -> uncloud_auth_models_User
+ [label=" owner (payment)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_PaymentMethod -> uncloud_auth_models_User
+ [label=" owner (paymentmethod)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_Bill -> uncloud_auth_models_User
+ [label=" owner (bill)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_Order -> uncloud_auth_models_User
+ [label=" owner (order)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_Order -> uncloud_pay_models_Bill
+ [label=" bill (order)"] [arrowhead=dot arrowtail=dot, dir=both];
+
+ uncloud_pay_models_OrderRecord -> uncloud_pay_models_Order
+ [label=" order (orderrecord)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ django_contrib_auth_base_user_AbstractBaseUser [label=<
+
+ >]
+ django_contrib_auth_models_AbstractUser -> django_contrib_auth_base_user_AbstractBaseUser
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+ django_contrib_auth_models_PermissionsMixin [label=<
+
+ >]
+ django_contrib_auth_models_AbstractUser -> django_contrib_auth_models_PermissionsMixin
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+ uncloud_auth_models_User -> django_contrib_auth_models_Group
+ [label=" groups (user)"] [arrowhead=dot arrowtail=dot, dir=both];
+
+ uncloud_auth_models_User -> django_contrib_auth_models_Permission
+ [label=" user_permissions (user)"] [arrowhead=dot arrowtail=dot, dir=both];
+
+ uncloud_auth_models_User -> django_contrib_auth_models_AbstractUser
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+
+ uncloud_pay_models_Product -> uncloud_auth_models_User
+ [label=" owner (product)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_Product -> uncloud_pay_models_Order
+ [label=" order (product)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMProduct -> uncloud_vm_models_VMHost
+ [label=" vmhost (vmproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMProduct -> uncloud_pay_models_Product
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+ uncloud_vm_models_VMWithOSProduct -> uncloud_vm_models_VMProduct
+ [label=" multi-table\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+ uncloud_vm_models_VMDiskImageProduct -> uncloud_auth_models_User
+ [label=" owner (vmdiskimageproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMDiskProduct -> uncloud_auth_models_User
+ [label=" owner (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMDiskProduct -> uncloud_vm_models_VMProduct
+ [label=" vm (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMDiskProduct -> uncloud_vm_models_VMDiskImageProduct
+ [label=" image (vmdiskproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMNetworkCard -> uncloud_vm_models_VMProduct
+ [label=" vm (vmnetworkcard)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMSnapshotProduct -> uncloud_vm_models_VMProduct
+ [label=" vm (vmsnapshotproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_vm_models_VMSnapshotProduct -> uncloud_pay_models_Product
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+
+ uncloud_pay_models_Product -> uncloud_auth_models_User
+ [label=" owner (product)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ uncloud_pay_models_Product -> uncloud_pay_models_Order
+ [label=" order (product)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ ungleich_service_models_MatrixServiceProduct -> uncloud_vm_models_VMProduct
+ [label=" vm (matrixserviceproduct)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+ ungleich_service_models_MatrixServiceProduct -> uncloud_pay_models_Product
+ [label=" abstract\ninheritance"] [arrowhead=empty, arrowtail=none, dir=both];
+
+
+ opennebula_models_VM -> uncloud_auth_models_User
+ [label=" owner (vm)"] [arrowhead=none, arrowtail=dot, dir=both];
+
+
+}
diff --git a/models.png b/models.png
new file mode 100644
index 0000000..f9d0c2e
Binary files /dev/null and b/models.png differ
diff --git a/uncloud/uncloud_vm/__init__.py b/opennebula/__init__.py
similarity index 100%
rename from uncloud/uncloud_vm/__init__.py
rename to opennebula/__init__.py
diff --git a/uncloud/opennebula/admin.py b/opennebula/admin.py
similarity index 100%
rename from uncloud/opennebula/admin.py
rename to opennebula/admin.py
diff --git a/uncloud/opennebula/apps.py b/opennebula/apps.py
similarity index 100%
rename from uncloud/opennebula/apps.py
rename to opennebula/apps.py
diff --git a/uncloud/opennebula/management/commands/opennebula-synchosts.py b/opennebula/management/commands/opennebula-synchosts.py
similarity index 100%
rename from uncloud/opennebula/management/commands/opennebula-synchosts.py
rename to opennebula/management/commands/opennebula-synchosts.py
diff --git a/uncloud/opennebula/management/commands/opennebula-syncvms.py b/opennebula/management/commands/opennebula-syncvms.py
similarity index 88%
rename from uncloud/opennebula/management/commands/opennebula-syncvms.py
rename to opennebula/management/commands/opennebula-syncvms.py
index 458528b..3c12fa9 100644
--- a/uncloud/opennebula/management/commands/opennebula-syncvms.py
+++ b/opennebula/management/commands/opennebula-syncvms.py
@@ -1,12 +1,9 @@
import json
-import uncloud.secrets as secrets
-
-
from xmlrpc.client import ServerProxy as RPCClient
-
from django_auth_ldap.backend import LDAPBackend
from django.core.management.base import BaseCommand
+from django.conf import settings
from xmltodict import parse
from opennebula.models import VM as VMModel
@@ -19,9 +16,9 @@ class Command(BaseCommand):
pass
def handle(self, *args, **options):
- with RPCClient(secrets.OPENNEBULA_URL) as rpc_client:
+ with RPCClient(settings.OPENNEBULA_URL) as rpc_client:
success, response, *_ = rpc_client.one.vmpool.infoextended(
- secrets.OPENNEBULA_USER_PASS, -2, -1, -1, -1
+ settings.OPENNEBULA_USER_PASS, -2, -1, -1, -1
)
if success:
vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
diff --git a/uncloud/opennebula/management/commands/opennebula-to-uncloud.py b/opennebula/management/commands/opennebula-to-uncloud.py
similarity index 56%
rename from uncloud/opennebula/management/commands/opennebula-to-uncloud.py
rename to opennebula/management/commands/opennebula-to-uncloud.py
index dc7cb45..230159a 100644
--- a/uncloud/opennebula/management/commands/opennebula-to-uncloud.py
+++ b/opennebula/management/commands/opennebula-to-uncloud.py
@@ -1,13 +1,18 @@
+import sys
from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
+from django.contrib.auth import get_user_model
from opennebula.models import VM as VMModel
from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct
from uncloud_pay.models import Order
+import logging
+
+log = logging.getLogger(__name__)
def convert_mac_to_int(mac_address: str):
# Remove octet connecting characters
@@ -41,24 +46,35 @@ def create_nics(one_vm, vm_product):
)
-def create_disk_and_image(one_vm, vm_product):
- for disk in one_vm.disks:
- owner = one_vm.owner
- name = disk.get('image')
+def sync_disk_and_image(one_vm, vm_product, disk_owner):
+ """
+ a) Check all opennebula disk if they are in the uncloud VM, if not add
+ b) Check all uncloud disks and remove them if they are not in the opennebula VM
+ """
- # TODO: Fix the following hard coded values
- is_os_image, is_public, status = True, True, 'active'
+ vmdisknum = 0
+
+ one_disks_extra_data = []
+
+ for disk in one_vm.disks:
+ vmowner = one_vm.owner
+ name = disk.get('image')
+ vmdisknum += 1
+
+ log.info("Checking disk {} for VM {}".format(name, one_vm))
+
+ is_os_image, is_public, status = True, False, 'active'
image_size_in_gb = disk.get('image_size_in_gb')
disk_size_in_gb = disk.get('size_in_gb')
- storage_class = disk.get('pool_name')
+ storage_class = disk.get('storage_class')
image_source = disk.get('source')
image_source_type = disk.get('source_type')
image, _ = VMDiskImageProduct.objects.update_or_create(
name=name,
defaults={
- 'owner': owner,
+ 'owner': disk_owner,
'is_os_image': is_os_image,
'is_public': is_public,
'size_in_gb': image_size_in_gb,
@@ -68,29 +84,59 @@ def create_disk_and_image(one_vm, vm_product):
'status': status
}
)
- VMDiskProduct.objects.update_or_create(
- owner=owner, vm=vm_product,
- defaults={
- 'image': image,
- 'size_in_gb': disk_size_in_gb
- }
- )
+ # identify vmdisk from opennebula - primary mapping key
+ extra_data = {
+ 'opennebula_vm': one_vm.vmid,
+ 'opennebula_size_in_gb': disk_size_in_gb,
+ 'opennebula_source': disk.get('opennebula_source'),
+ 'opennebula_disk_num': vmdisknum
+ }
+ # Save for comparing later
+ one_disks_extra_data.append(extra_data)
+
+ try:
+ vm_disk = VMDiskProduct.objects.get(extra_data=extra_data)
+ except VMDiskProduct.DoesNotExist:
+ vm_disk = VMDiskProduct.objects.create(
+ owner=vmowner,
+ vm=vm_product,
+ image=image,
+ size_in_gb=disk_size_in_gb,
+ extra_data=extra_data
+ )
+
+ # Now remove all disks that are not in above extra_data list
+ for disk in VMDiskProduct.objects.filter(vm=vm_product):
+ extra_data = disk.extra_data
+ if not extra_data in one_disks_extra_data:
+ log.info("Removing disk {} from VM {}".format(disk, vm_product))
+ disk.delete()
+
+ disks = [ disk.extra_data for disk in VMDiskProduct.objects.filter(vm=vm_product) ]
+ log.info("VM {} has disks: {}".format(vm_product, disks))
class Command(BaseCommand):
help = 'Migrate Opennebula VM to regular (uncloud) vm'
+ def add_arguments(self, parser):
+ parser.add_argument('--disk-owner', required=True, help="The user who owns the the opennebula disks")
+
def handle(self, *args, **options):
+ log.debug("{} {}".format(args, options))
+
+ disk_owner = get_user_model().objects.get(username=options['disk_owner'])
+
for one_vm in VMModel.objects.all():
if not one_vm.last_host:
- print("No VMHost for VM {} - VM might be on hold - skipping".format(one_vm.vmid))
+ log.warning("No VMHost for VM {} - VM might be on hold - skipping".format(one_vm.vmid))
continue
try:
vmhost = VMHost.objects.get(hostname=one_vm.last_host)
except VMHost.DoesNotExist:
- print("VMHost {} does not exist, aborting".format(one_vm.last_host))
+ log.error("VMHost {} does not exist, aborting".format(one_vm.last_host))
raise
cores = one_vm.cores
@@ -98,9 +144,6 @@ class Command(BaseCommand):
owner = one_vm.owner
status = 'active'
- # Total Amount of SSD Storage
- # TODO: What would happen if the attached storage is not SSD but HDD?
-
ssd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['ssd', 'one'] ])
hdd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['hdd'] ])
@@ -119,30 +162,32 @@ class Command(BaseCommand):
len(ipv4), len(ipv6))
try:
- vm_product = VMProduct.objects.get(name=one_vm.uncloud_name)
+ vm_product = VMProduct.objects.get(extra_data__opennebula_id=one_vm.vmid)
except VMProduct.DoesNotExist:
order = Order.objects.create(
owner=owner,
creation_date=creation_date,
starting_date=starting_date
-# one_time_price=one_time_price,
-# recurring_price=recurring_price,
-# recurring_period=recurring_period
)
- vm_product, _ = VMProduct.objects.update_or_create(
+ vm_product = VMProduct(
+ extra_data={ 'opennebula_id': one_vm.vmid },
name=one_vm.uncloud_name,
- defaults={
- 'cores': cores,
- 'ram_in_gb': ram_in_gb,
- 'owner': owner,
- 'vmhost': vmhost,
- 'order': order,
- 'status': status
- }
+ order=order
)
+ # we don't use update_or_create, as filtering by json AND setting json
+ # at the same time does not work
+
+ vm_product.vmhost = vmhost
+ vm_product.owner = owner
+ vm_product.cores = cores
+ vm_product.ram_in_gb = ram_in_gb
+ vm_product.status = status
+
+ vm_product.save()
+
# Create VMNetworkCards
create_nics(one_vm, vm_product)
# Create VMDiskImageProduct and VMDiskProduct
- create_disk_and_image(one_vm, vm_product)
+ sync_disk_and_image(one_vm, vm_product, disk_owner=disk_owner)
diff --git a/opennebula/migrations/0001_initial.py b/opennebula/migrations/0001_initial.py
new file mode 100644
index 0000000..9a135c6
--- /dev/null
+++ b/opennebula/migrations/0001_initial.py
@@ -0,0 +1,21 @@
+# Generated by Django 3.1 on 2020-12-13 10:38
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='VM',
+ fields=[
+ ('vmid', models.IntegerField(primary_key=True, serialize=False)),
+ ('data', models.JSONField()),
+ ],
+ ),
+ ]
diff --git a/uncloud/uncloud_vm/migrations/__init__.py b/opennebula/migrations/__init__.py
similarity index 100%
rename from uncloud/uncloud_vm/migrations/__init__.py
rename to opennebula/migrations/__init__.py
diff --git a/uncloud/opennebula/models.py b/opennebula/models.py
similarity index 88%
rename from uncloud/opennebula/models.py
rename to opennebula/models.py
index f5faeb5..f15b845 100644
--- a/uncloud/opennebula/models.py
+++ b/opennebula/models.py
@@ -1,13 +1,18 @@
import uuid
from django.db import models
from django.contrib.auth import get_user_model
-from django.contrib.postgres.fields import JSONField
+from uncloud_pay.models import Product
+# ungleich specific
+storage_class_mapping = {
+ 'one': 'ssd',
+ 'ssd': 'ssd',
+ 'hdd': 'hdd'
+}
class VM(models.Model):
vmid = models.IntegerField(primary_key=True)
- owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
- data = JSONField()
+ data = models.JSONField()
@property
def uncloud_name(self):
@@ -48,7 +53,8 @@ class VM(models.Model):
'pool_name': d['POOL_NAME'],
'image': d['IMAGE'],
'source': d['SOURCE'],
- 'source_type': d['TM_MAD']
+ 'source_type': d['TM_MAD'],
+ 'storage_class': storage_class_mapping[d['POOL_NAME']]
}
for d in disks
diff --git a/uncloud/opennebula/serializers.py b/opennebula/serializers.py
similarity index 55%
rename from uncloud/opennebula/serializers.py
rename to opennebula/serializers.py
index 8e0c513..cd00622 100644
--- a/uncloud/opennebula/serializers.py
+++ b/opennebula/serializers.py
@@ -5,4 +5,6 @@ from opennebula.models import VM
class OpenNebulaVMSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = VM
- fields = '__all__'
+ fields = [ 'vmid', 'owner', 'data',
+ 'uncloud_name', 'cores', 'ram_in_gb',
+ 'disks', 'nics', 'ips' ]
diff --git a/uncloud/opennebula/tests.py b/opennebula/tests.py
similarity index 100%
rename from uncloud/opennebula/tests.py
rename to opennebula/tests.py
diff --git a/opennebula/views.py b/opennebula/views.py
new file mode 100644
index 0000000..688f0b4
--- /dev/null
+++ b/opennebula/views.py
@@ -0,0 +1,16 @@
+from rest_framework import viewsets, permissions
+
+#from .models import VM
+# from .serializers import OpenNebulaVMSerializer
+
+# class VMViewSet(viewsets.ModelViewSet):
+# permission_classes = [permissions.IsAuthenticated]
+# serializer_class = OpenNebulaVMSerializer
+
+# def get_queryset(self):
+# if self.request.user.is_superuser:
+# obj = VM.objects.all()
+# else:
+# obj = VM.objects.filter(owner=self.request.user)
+
+# return obj
diff --git a/uncloud/requirements.txt b/requirements.txt
similarity index 51%
rename from uncloud/requirements.txt
rename to requirements.txt
index c7ebc65..adbda9c 100644
--- a/uncloud/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,12 @@
+# Django basics
django
djangorestframework
django-auth-ldap
-stripe
-xmltodict
+
psycopg2
+ldap3
+
+xmltodict
parsedatetime
@@ -11,3 +14,19 @@ parsedatetime
pyparsing
pydot
django-extensions
+
+# PDF creating
+django-hardcopy
+
+# schema support
+pyyaml
+uritemplate
+
+# Payment & VAT
+vat-validator
+stripe
+
+
+# Tasks
+celery
+redis
diff --git a/uncloud/ungleich_service/__init__.py b/resources/ci/.lock
similarity index 100%
rename from uncloud/ungleich_service/__init__.py
rename to resources/ci/.lock
diff --git a/resources/ci/Dockerfile b/resources/ci/Dockerfile
new file mode 100644
index 0000000..020b66e
--- /dev/null
+++ b/resources/ci/Dockerfile
@@ -0,0 +1,3 @@
+FROM fedora:latest
+
+RUN dnf install -y python3-devel python3-pip python3-coverage libpq-devel openldap-devel gcc chromium
diff --git a/vat_rates.csv b/resources/vat-rates.csv
similarity index 100%
rename from vat_rates.csv
rename to resources/vat-rates.csv
diff --git a/uncloud/.gitignore b/uncloud/.gitignore
index 71202e1..b03e0a5 100644
--- a/uncloud/.gitignore
+++ b/uncloud/.gitignore
@@ -1,4 +1,2 @@
-db.sqlite3
-uncloud/secrets.py
-debug.log
-uncloud/local_settings.py
\ No newline at end of file
+local_settings.py
+ldap_max_uid_file
\ No newline at end of file
diff --git a/uncloud/README-how-to-create-a-product.md b/uncloud/README-how-to-create-a-product.md
deleted file mode 100644
index 6ddd1fa..0000000
--- a/uncloud/README-how-to-create-a-product.md
+++ /dev/null
@@ -1,9 +0,0 @@
-## Introduction
-
-This document describes how to create a product and use it.
-
-A product (like a VMSnapshotproduct) creates an order when ordered.
-The "order" is used to combine products together.
-
-Sub-products or related products link to the same order.
-Each product has one (?) orderrecord
diff --git a/uncloud/README-object-relations.md b/uncloud/README-object-relations.md
deleted file mode 100644
index 58f2413..0000000
--- a/uncloud/README-object-relations.md
+++ /dev/null
@@ -1,82 +0,0 @@
-## Introduction
-
-This article describes how models relate to each other and what the
-design ideas are. It is meant to prevent us from double implementing
-something or changing something that is already solved.
-
-
-## Products
-
-A product is something someone can order. We might have "low level"
-products that need to be composed (= higher degree of flexibility, but
-more amount of details necessary) and "composed products" that present
-some defaults or select other products automatically (f.i. a "dual
-stack VM" can be a VM + a disk + an IPv4 address).
-
-
-## Bills
-
-Bills represent active orders of a month. Bills can be shown during a
-month but only become definitive at the end of the month.
-
-## Orders
-
-When customer X order a (set) of product, it generates an order for billing
-purposes. The ordered products point to that order and register an Order Record
-at creation.
-
-Orders and Order Records are assumed immutable => they are used to generate
-bills and should not be mutated. If a product is updated (e.g. adding RAM to
-VM), a new order should be generated.
-
-The order MUST NOT be deleted when a product is deleted, as it is used for
-billing (including past bills).
-
-### Order record
-
-Used to store billing details of a product at creation: will stay there even if
-the product change (e.g. new pricing, updated) and act as some kind of archive.
-Used to generate bills.
-
-## Payment Methods
-
-Users/customers can register payment methods.
-
-## Sample flows / products
-
-### A VM snapshot
-
-A VM snapshot creates a snapshot of all disks attached to a VM to be
-able to rollback the VM to a previous state.
-
-Creating a VM snapshot (-product) creates a related order. Deleting a
-VMSnapshotproduct sets the order to deleted.
-
-### Object Storage
-
-(tbd by Balazs)
-
-### A "raw" VM
-
-(tbd by Ahmed)
-
-### An IPv6 only VM
-
-(tbd by Ahmed)
-
-### A dual stack VM
-
-(tbd by Ahmed)
-
-### A managed service (e.g. Matrix-as-a-Service)
-
-Customer orders service with:
- * Service-specific configuration: e.g. domain name for matrix
- * VM configuration:
- - CPU
- - Memory
- - Disk (soon)
-
-It creates a new Order with two products/records:
- * Service itself (= management)
- * Underlying VM
diff --git a/uncloud/README.md b/uncloud/README.md
deleted file mode 100644
index 390a3af..0000000
--- a/uncloud/README.md
+++ /dev/null
@@ -1,95 +0,0 @@
-## Install
-
-### OS package requirements
-
-Alpine:
-
-```
-apk add openldap-dev postgresql-dev
-```
-
-Debian/Devuan:
-
-```
-apt install postgresql-server-dev-all
-```
-
-
-### Python requirements
-
-If you prefer using a venv, use:
-
-```
-python -m venv venv
-. ./venv/bin/activate
-```
-
-Then install the requirements
-
-```
-pip install -r requirements.txt
-```
-
-### Database requirements
-
-Due to the use of the JSONField, postgresql is required.
-
-First create a role to be used:
-
-```
-postgres=# create role nico login;
-```
-
-Then create the database owner by the new role:
-
-```
-postgres=# create database uncloud owner nico;
-```
-
-Installing the postgresql service is os dependent, but some hints:
-
-* Alpine: `apk add postgresql-server && rc-update add postgresql && rc-service postgresql start`
-* Debian/Devuan: `apt install postgresql`
-
-After postresql is started, apply the migrations:
-
-```
-python manage.py migrate
-```
-
-### Secrets
-
-cp `uncloud/secrets_sample.py` to `uncloud/secrets.py` and replace the
-sample values with real values.
-
-
-## Flows / Orders
-
-### Creating a VMHost
-
-
-
-### Creating a VM
-
-* Create a VMHost
-* Create a VM on a VMHost
-
-
-### Creating a VM Snapshot
-
-
-## Working Beta APIs
-
-These APIs can be used for internal testing.
-
-### URL Overview
-
-```
-http -a nicoschottelius:$(pass ungleich.ch/nico.schottelius@ungleich.ch) http://localhost:8000
-```
-
-### Snapshotting
-
-```
-http -a nicoschottelius:$(pass ungleich.ch/nico.schottelius@ungleich.ch) http://localhost:8000/vm/snapshot/ vm_uuid=$(uuidgen)
-```
diff --git a/uncloud/__init__.py b/uncloud/__init__.py
new file mode 100644
index 0000000..e073dd5
--- /dev/null
+++ b/uncloud/__init__.py
@@ -0,0 +1,254 @@
+from django.utils.translation import gettext_lazy as _
+import decimal
+from .celery import app as celery_app
+
+# Define DecimalField properties, used to represent amounts of money.
+AMOUNT_MAX_DIGITS=10
+AMOUNT_DECIMALS=2
+
+decimal.getcontext().prec = AMOUNT_DECIMALS
+
+# http://xml.coverpages.org/country3166.html
+COUNTRIES = (
+ ('AD', _('Andorra')),
+ ('AE', _('United Arab Emirates')),
+ ('AF', _('Afghanistan')),
+ ('AG', _('Antigua & Barbuda')),
+ ('AI', _('Anguilla')),
+ ('AL', _('Albania')),
+ ('AM', _('Armenia')),
+ ('AN', _('Netherlands Antilles')),
+ ('AO', _('Angola')),
+ ('AQ', _('Antarctica')),
+ ('AR', _('Argentina')),
+ ('AS', _('American Samoa')),
+ ('AT', _('Austria')),
+ ('AU', _('Australia')),
+ ('AW', _('Aruba')),
+ ('AZ', _('Azerbaijan')),
+ ('BA', _('Bosnia and Herzegovina')),
+ ('BB', _('Barbados')),
+ ('BD', _('Bangladesh')),
+ ('BE', _('Belgium')),
+ ('BF', _('Burkina Faso')),
+ ('BG', _('Bulgaria')),
+ ('BH', _('Bahrain')),
+ ('BI', _('Burundi')),
+ ('BJ', _('Benin')),
+ ('BM', _('Bermuda')),
+ ('BN', _('Brunei Darussalam')),
+ ('BO', _('Bolivia')),
+ ('BR', _('Brazil')),
+ ('BS', _('Bahama')),
+ ('BT', _('Bhutan')),
+ ('BV', _('Bouvet Island')),
+ ('BW', _('Botswana')),
+ ('BY', _('Belarus')),
+ ('BZ', _('Belize')),
+ ('CA', _('Canada')),
+ ('CC', _('Cocos (Keeling) Islands')),
+ ('CF', _('Central African Republic')),
+ ('CG', _('Congo')),
+ ('CH', _('Switzerland')),
+ ('CI', _('Ivory Coast')),
+ ('CK', _('Cook Iislands')),
+ ('CL', _('Chile')),
+ ('CM', _('Cameroon')),
+ ('CN', _('China')),
+ ('CO', _('Colombia')),
+ ('CR', _('Costa Rica')),
+ ('CU', _('Cuba')),
+ ('CV', _('Cape Verde')),
+ ('CX', _('Christmas Island')),
+ ('CY', _('Cyprus')),
+ ('CZ', _('Czech Republic')),
+ ('DE', _('Germany')),
+ ('DJ', _('Djibouti')),
+ ('DK', _('Denmark')),
+ ('DM', _('Dominica')),
+ ('DO', _('Dominican Republic')),
+ ('DZ', _('Algeria')),
+ ('EC', _('Ecuador')),
+ ('EE', _('Estonia')),
+ ('EG', _('Egypt')),
+ ('EH', _('Western Sahara')),
+ ('ER', _('Eritrea')),
+ ('ES', _('Spain')),
+ ('ET', _('Ethiopia')),
+ ('FI', _('Finland')),
+ ('FJ', _('Fiji')),
+ ('FK', _('Falkland Islands (Malvinas)')),
+ ('FM', _('Micronesia')),
+ ('FO', _('Faroe Islands')),
+ ('FR', _('France')),
+ ('FX', _('France, Metropolitan')),
+ ('GA', _('Gabon')),
+ ('GB', _('United Kingdom (Great Britain)')),
+ ('GD', _('Grenada')),
+ ('GE', _('Georgia')),
+ ('GF', _('French Guiana')),
+ ('GH', _('Ghana')),
+ ('GI', _('Gibraltar')),
+ ('GL', _('Greenland')),
+ ('GM', _('Gambia')),
+ ('GN', _('Guinea')),
+ ('GP', _('Guadeloupe')),
+ ('GQ', _('Equatorial Guinea')),
+ ('GR', _('Greece')),
+ ('GS', _('South Georgia and the South Sandwich Islands')),
+ ('GT', _('Guatemala')),
+ ('GU', _('Guam')),
+ ('GW', _('Guinea-Bissau')),
+ ('GY', _('Guyana')),
+ ('HK', _('Hong Kong')),
+ ('HM', _('Heard & McDonald Islands')),
+ ('HN', _('Honduras')),
+ ('HR', _('Croatia')),
+ ('HT', _('Haiti')),
+ ('HU', _('Hungary')),
+ ('ID', _('Indonesia')),
+ ('IE', _('Ireland')),
+ ('IL', _('Israel')),
+ ('IN', _('India')),
+ ('IO', _('British Indian Ocean Territory')),
+ ('IQ', _('Iraq')),
+ ('IR', _('Islamic Republic of Iran')),
+ ('IS', _('Iceland')),
+ ('IT', _('Italy')),
+ ('JM', _('Jamaica')),
+ ('JO', _('Jordan')),
+ ('JP', _('Japan')),
+ ('KE', _('Kenya')),
+ ('KG', _('Kyrgyzstan')),
+ ('KH', _('Cambodia')),
+ ('KI', _('Kiribati')),
+ ('KM', _('Comoros')),
+ ('KN', _('St. Kitts and Nevis')),
+ ('KP', _('Korea, Democratic People\'s Republic of')),
+ ('KR', _('Korea, Republic of')),
+ ('KW', _('Kuwait')),
+ ('KY', _('Cayman Islands')),
+ ('KZ', _('Kazakhstan')),
+ ('LA', _('Lao People\'s Democratic Republic')),
+ ('LB', _('Lebanon')),
+ ('LC', _('Saint Lucia')),
+ ('LI', _('Liechtenstein')),
+ ('LK', _('Sri Lanka')),
+ ('LR', _('Liberia')),
+ ('LS', _('Lesotho')),
+ ('LT', _('Lithuania')),
+ ('LU', _('Luxembourg')),
+ ('LV', _('Latvia')),
+ ('LY', _('Libyan Arab Jamahiriya')),
+ ('MA', _('Morocco')),
+ ('MC', _('Monaco')),
+ ('MD', _('Moldova, Republic of')),
+ ('MG', _('Madagascar')),
+ ('MH', _('Marshall Islands')),
+ ('ML', _('Mali')),
+ ('MN', _('Mongolia')),
+ ('MM', _('Myanmar')),
+ ('MO', _('Macau')),
+ ('MP', _('Northern Mariana Islands')),
+ ('MQ', _('Martinique')),
+ ('MR', _('Mauritania')),
+ ('MS', _('Monserrat')),
+ ('MT', _('Malta')),
+ ('MU', _('Mauritius')),
+ ('MV', _('Maldives')),
+ ('MW', _('Malawi')),
+ ('MX', _('Mexico')),
+ ('MY', _('Malaysia')),
+ ('MZ', _('Mozambique')),
+ ('NA', _('Namibia')),
+ ('NC', _('New Caledonia')),
+ ('NE', _('Niger')),
+ ('NF', _('Norfolk Island')),
+ ('NG', _('Nigeria')),
+ ('NI', _('Nicaragua')),
+ ('NL', _('Netherlands')),
+ ('NO', _('Norway')),
+ ('NP', _('Nepal')),
+ ('NR', _('Nauru')),
+ ('NU', _('Niue')),
+ ('NZ', _('New Zealand')),
+ ('OM', _('Oman')),
+ ('PA', _('Panama')),
+ ('PE', _('Peru')),
+ ('PF', _('French Polynesia')),
+ ('PG', _('Papua New Guinea')),
+ ('PH', _('Philippines')),
+ ('PK', _('Pakistan')),
+ ('PL', _('Poland')),
+ ('PM', _('St. Pierre & Miquelon')),
+ ('PN', _('Pitcairn')),
+ ('PR', _('Puerto Rico')),
+ ('PT', _('Portugal')),
+ ('PW', _('Palau')),
+ ('PY', _('Paraguay')),
+ ('QA', _('Qatar')),
+ ('RE', _('Reunion')),
+ ('RO', _('Romania')),
+ ('RU', _('Russian Federation')),
+ ('RW', _('Rwanda')),
+ ('SA', _('Saudi Arabia')),
+ ('SB', _('Solomon Islands')),
+ ('SC', _('Seychelles')),
+ ('SD', _('Sudan')),
+ ('SE', _('Sweden')),
+ ('SG', _('Singapore')),
+ ('SH', _('St. Helena')),
+ ('SI', _('Slovenia')),
+ ('SJ', _('Svalbard & Jan Mayen Islands')),
+ ('SK', _('Slovakia')),
+ ('SL', _('Sierra Leone')),
+ ('SM', _('San Marino')),
+ ('SN', _('Senegal')),
+ ('SO', _('Somalia')),
+ ('SR', _('Suriname')),
+ ('ST', _('Sao Tome & Principe')),
+ ('SV', _('El Salvador')),
+ ('SY', _('Syrian Arab Republic')),
+ ('SZ', _('Swaziland')),
+ ('TC', _('Turks & Caicos Islands')),
+ ('TD', _('Chad')),
+ ('TF', _('French Southern Territories')),
+ ('TG', _('Togo')),
+ ('TH', _('Thailand')),
+ ('TJ', _('Tajikistan')),
+ ('TK', _('Tokelau')),
+ ('TM', _('Turkmenistan')),
+ ('TN', _('Tunisia')),
+ ('TO', _('Tonga')),
+ ('TP', _('East Timor')),
+ ('TR', _('Turkey')),
+ ('TT', _('Trinidad & Tobago')),
+ ('TV', _('Tuvalu')),
+ ('TW', _('Taiwan, Province of China')),
+ ('TZ', _('Tanzania, United Republic of')),
+ ('UA', _('Ukraine')),
+ ('UG', _('Uganda')),
+ ('UM', _('United States Minor Outlying Islands')),
+ ('US', _('United States of America')),
+ ('UY', _('Uruguay')),
+ ('UZ', _('Uzbekistan')),
+ ('VA', _('Vatican City State (Holy See)')),
+ ('VC', _('St. Vincent & the Grenadines')),
+ ('VE', _('Venezuela')),
+ ('VG', _('British Virgin Islands')),
+ ('VI', _('United States Virgin Islands')),
+ ('VN', _('Viet Nam')),
+ ('VU', _('Vanuatu')),
+ ('WF', _('Wallis & Futuna Islands')),
+ ('WS', _('Samoa')),
+ ('YE', _('Yemen')),
+ ('YT', _('Mayotte')),
+ ('YU', _('Yugoslavia')),
+ ('ZA', _('South Africa')),
+ ('ZM', _('Zambia')),
+ ('ZR', _('Zaire')),
+ ('ZW', _('Zimbabwe')),
+)
+
+
+__all__ = ('celery_app',)
diff --git a/uncloud/admin.py b/uncloud/admin.py
new file mode 100644
index 0000000..a89a574
--- /dev/null
+++ b/uncloud/admin.py
@@ -0,0 +1,6 @@
+from django.contrib import admin
+
+from .models import *
+
+for m in [ UncloudProvider, UncloudNetwork, UncloudTask ]:
+ admin.site.register(m)
diff --git a/uncloud/uncloud/asgi.py b/uncloud/asgi.py
similarity index 100%
rename from uncloud/uncloud/asgi.py
rename to uncloud/asgi.py
diff --git a/uncloud/celery.py b/uncloud/celery.py
new file mode 100644
index 0000000..3408634
--- /dev/null
+++ b/uncloud/celery.py
@@ -0,0 +1,17 @@
+import os
+
+from celery import Celery
+
+# set the default Django settings module for the 'celery' program.
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uncloud.settings')
+
+app = Celery('uncloud')
+
+# Using a string here means the worker doesn't have to serialize
+# the configuration object to child processes.
+# - namespace='CELERY' means all celery-related configuration keys
+# should have a `CELERY_` prefix.
+app.config_from_object('django.conf:settings', namespace='CELERY')
+
+# Load task modules from all registered Django app configs.
+app.autodiscover_tasks()
diff --git a/uncloud/management/commands/db-add-defaults.py b/uncloud/management/commands/db-add-defaults.py
new file mode 100644
index 0000000..605c8f5
--- /dev/null
+++ b/uncloud/management/commands/db-add-defaults.py
@@ -0,0 +1,43 @@
+import random
+import string
+
+from django.core.management.base import BaseCommand
+from django.core.exceptions import ObjectDoesNotExist
+from django.contrib.auth import get_user_model
+from django.conf import settings
+
+from uncloud_pay.models import BillingAddress, RecurringPeriod, Product
+from uncloud.models import UncloudProvider, UncloudNetwork
+
+
+class Command(BaseCommand):
+ help = 'Add standard uncloud values'
+
+ def add_arguments(self, parser):
+ pass
+
+ def handle(self, *args, **options):
+ # Order matters, objects can be dependent on each other
+
+ admin_username="uncloud-admin"
+ pw_length = 32
+
+ # Only set password if the user did not exist before
+ try:
+ admin_user = get_user_model().objects.get(username=settings.UNCLOUD_ADMIN_NAME)
+ except ObjectDoesNotExist:
+ random_password = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(pw_length))
+
+ admin_user = get_user_model().objects.create_user(username=settings.UNCLOUD_ADMIN_NAME, password=random_password)
+ admin_user.is_superuser=True
+ admin_user.is_staff=True
+ admin_user.save()
+
+ print(f"Created admin user '{admin_username}' with password '{random_password}'")
+
+ BillingAddress.populate_db_defaults()
+ RecurringPeriod.populate_db_defaults()
+ Product.populate_db_defaults()
+
+ UncloudNetwork.populate_db_defaults()
+ UncloudProvider.populate_db_defaults()
diff --git a/uncloud/management/commands/uncloud.py b/uncloud/management/commands/uncloud.py
new file mode 100644
index 0000000..bd47c6b
--- /dev/null
+++ b/uncloud/management/commands/uncloud.py
@@ -0,0 +1,28 @@
+import sys
+from datetime import datetime
+
+from django.core.management.base import BaseCommand
+
+from django.contrib.auth import get_user_model
+
+from opennebula.models import VM as VMModel
+from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct, VMCluster
+
+import logging
+log = logging.getLogger(__name__)
+
+
+class Command(BaseCommand):
+ help = 'General uncloud commands'
+
+ def add_arguments(self, parser):
+ parser.add_argument('--bootstrap', action='store_true', help='Bootstrap a typical uncloud installation')
+
+ def handle(self, *args, **options):
+
+ if options['bootstrap']:
+ self.bootstrap()
+
+ def bootstrap(self):
+ default_cluster = VMCluster.objects.get_or_create(name="default")
+# local_host =
diff --git a/uncloud/migrations/0001_initial.py b/uncloud/migrations/0001_initial.py
new file mode 100644
index 0000000..10d1144
--- /dev/null
+++ b/uncloud/migrations/0001_initial.py
@@ -0,0 +1,46 @@
+# Generated by Django 3.1 on 2020-12-13 10:38
+
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import uncloud.models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='UncloudNetwork',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('network_address', models.GenericIPAddressField(unique=True)),
+ ('network_mask', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(128)])),
+ ('description', models.CharField(max_length=256)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='UncloudProvider',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('full_name', models.CharField(max_length=256)),
+ ('organization', models.CharField(blank=True, max_length=256, null=True)),
+ ('street', models.CharField(max_length=256)),
+ ('city', models.CharField(max_length=256)),
+ ('postal_code', models.CharField(max_length=64)),
+ ('country', uncloud.models.CountryField(blank=True, choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua & Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AN', 'Netherlands Antilles'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia'), ('BR', 'Brazil'), ('BS', 'Bahama'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', 'Ivory Coast'), ('CK', 'Cook Iislands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cape Verde'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('FX', 'France, Metropolitan'), ('GA', 'Gabon'), ('GB', 'United Kingdom (Great Britain)'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard & McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Islamic Republic of Iran'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'St. Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libyan Arab Jamahiriya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('ML', 'Mali'), ('MN', 'Mongolia'), ('MM', 'Myanmar'), ('MO', 'Macau'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Monserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'St. Pierre & Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'St. Helena'), ('SI', 'Slovenia'), ('SJ', 'Svalbard & Jan Mayen Islands'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('ST', 'Sao Tome & Principe'), ('SV', 'El Salvador'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks & Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TP', 'East Timor'), ('TR', 'Turkey'), ('TT', 'Trinidad & Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States of America'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Vatican City State (Holy See)'), ('VC', 'St. Vincent & the Grenadines'), ('VE', 'Venezuela'), ('VG', 'British Virgin Islands'), ('VI', 'United States Virgin Islands'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis & Futuna Islands'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('YU', 'Yugoslavia'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZR', 'Zaire'), ('ZW', 'Zimbabwe')], default='CH', max_length=2)),
+ ('starting_date', models.DateField()),
+ ('ending_date', models.DateField(blank=True, null=True)),
+ ('billing_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudproviderbill', to='uncloud.uncloudnetwork')),
+ ('coupon_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudprovidercoupon', to='uncloud.uncloudnetwork')),
+ ('referral_network', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uncloudproviderreferral', to='uncloud.uncloudnetwork')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ ]
diff --git a/uncloud/migrations/0002_uncloudtasks.py b/uncloud/migrations/0002_uncloudtasks.py
new file mode 100644
index 0000000..9c69606
--- /dev/null
+++ b/uncloud/migrations/0002_uncloudtasks.py
@@ -0,0 +1,19 @@
+# Generated by Django 3.1 on 2020-12-20 17:16
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='UncloudTasks',
+ fields=[
+ ('task_id', models.UUIDField(primary_key=True, serialize=False)),
+ ],
+ ),
+ ]
diff --git a/uncloud/migrations/0003_auto_20201220_1728.py b/uncloud/migrations/0003_auto_20201220_1728.py
new file mode 100644
index 0000000..2ec0eec
--- /dev/null
+++ b/uncloud/migrations/0003_auto_20201220_1728.py
@@ -0,0 +1,17 @@
+# Generated by Django 3.1 on 2020-12-20 17:28
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud', '0002_uncloudtasks'),
+ ]
+
+ operations = [
+ migrations.RenameModel(
+ old_name='UncloudTasks',
+ new_name='UncloudTask',
+ ),
+ ]
diff --git a/uncloud/ungleich_service/migrations/__init__.py b/uncloud/migrations/__init__.py
similarity index 100%
rename from uncloud/ungleich_service/migrations/__init__.py
rename to uncloud/migrations/__init__.py
diff --git a/uncloud/models.py b/uncloud/models.py
new file mode 100644
index 0000000..5545303
--- /dev/null
+++ b/uncloud/models.py
@@ -0,0 +1,172 @@
+from django.db import models
+from django.db.models import JSONField, Q
+from django.utils import timezone
+from django.utils.translation import gettext_lazy as _
+from django.core.validators import MinValueValidator, MaxValueValidator
+from django.core.exceptions import FieldError
+
+from uncloud import COUNTRIES
+
+class UncloudModel(models.Model):
+ """
+ This class extends the standard model with an
+ extra_data field that can be used to include public,
+ but internal information.
+
+ For instance if you migrate from an existing virtualisation
+ framework to uncloud.
+
+ The extra_data attribute should be considered a hack and whenever
+ data is necessary for running uncloud, it should **not** be stored
+ in there.
+
+ """
+
+ extra_data = JSONField(editable=False, blank=True, null=True)
+
+ class Meta:
+ abstract = True
+
+# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
+class UncloudStatus(models.TextChoices):
+ PENDING = 'PENDING', _('Pending')
+ AWAITING_PAYMENT = 'AWAITING_PAYMENT', _('Awaiting payment')
+ BEING_CREATED = 'BEING_CREATED', _('Being created')
+ SCHEDULED = 'SCHEDULED', _('Scheduled') # resource selected, waiting for dispatching
+ ACTIVE = 'ACTIVE', _('Active')
+ MODIFYING = 'MODIFYING', _('Modifying') # Resource is being changed
+ DELETED = 'DELETED', _('Deleted') # Resource has been deleted
+ DISABLED = 'DISABLED', _('Disabled') # Is usable, but cannot be used for new things
+ UNUSABLE = 'UNUSABLE', _('Unusable'), # Has some kind of error
+
+
+
+###
+# General address handling
+class CountryField(models.CharField):
+ def __init__(self, *args, **kwargs):
+ kwargs.setdefault('choices', COUNTRIES)
+ kwargs.setdefault('default', 'CH')
+ kwargs.setdefault('max_length', 2)
+
+ super().__init__(*args, **kwargs)
+
+ def get_internal_type(self):
+ return "CharField"
+
+
+class UncloudAddress(models.Model):
+ full_name = models.CharField(max_length=256)
+ organization = models.CharField(max_length=256, blank=True, null=True)
+ street = models.CharField(max_length=256)
+ city = models.CharField(max_length=256)
+ postal_code = models.CharField(max_length=64)
+ country = CountryField(blank=True)
+
+ class Meta:
+ abstract = True
+
+
+###
+# UncloudNetworks are used as identifiers - such they are a base of uncloud
+
+class UncloudNetwork(models.Model):
+ """
+ Storing IP networks
+ """
+
+ network_address = models.GenericIPAddressField(null=False, unique=True)
+ network_mask = models.IntegerField(null=False,
+ validators=[MinValueValidator(0),
+ MaxValueValidator(128)]
+ )
+
+ description = models.CharField(max_length=256)
+
+ @classmethod
+ def populate_db_defaults(cls):
+ for net, desc in [
+ ( "2a0a:e5c0:11::", "uncloud Billing" ),
+ ( "2a0a:e5c0:11:1::", "uncloud Referral" ),
+ ( "2a0a:e5c0:11:2::", "uncloud Coupon" )
+ ]:
+ obj, created = cls.objects.get_or_create(network_address=net,
+ defaults= {
+ 'network_mask': 64,
+ 'description': desc
+ }
+ )
+
+
+ def save(self, *args, **kwargs):
+ if not ':' in self.network_address and self.network_mask > 32:
+ raise FieldError("Mask cannot exceed 32 for IPv4")
+
+ super().save(*args, **kwargs)
+
+
+ def __str__(self):
+ return f"{self.network_address}/{self.network_mask} {self.description}"
+
+###
+# Who is running / providing this instance of uncloud?
+
+class UncloudProvider(UncloudAddress):
+ """
+ A class resembling who is running this uncloud instance.
+ This might change over time so we allow starting/ending dates
+
+ This also defines the taxation rules.
+
+ starting/ending date define from when to when this is valid. This way
+ we can model address changes and have it correct in the bills.
+ """
+
+ # Meta:
+ # FIXMe: only allow non overlapping time frames -- how to define this as a constraint?
+ starting_date = models.DateField()
+ ending_date = models.DateField(blank=True, null=True)
+
+ billing_network = models.ForeignKey(UncloudNetwork, related_name="uncloudproviderbill", on_delete=models.CASCADE)
+ referral_network = models.ForeignKey(UncloudNetwork, related_name="uncloudproviderreferral", on_delete=models.CASCADE)
+ coupon_network = models.ForeignKey(UncloudNetwork, related_name="uncloudprovidercoupon", on_delete=models.CASCADE)
+
+
+ @classmethod
+ def get_provider(cls, when=None):
+ """
+ Find active provide at a certain time - if there was any
+ """
+
+ if not when:
+ when = timezone.now()
+
+
+ return cls.objects.get(Q(starting_date__gte=when, ending_date__lte=when) |
+ Q(starting_date__gte=when, ending_date__isnull=True))
+
+
+ @classmethod
+ def populate_db_defaults(cls):
+ obj, created = cls.objects.get_or_create(full_name="ungleich glarus ag",
+ street="Bahnhofstrasse 1",
+ postal_code="8783",
+ city="Linthal",
+ country="CH",
+ starting_date=timezone.now(),
+ billing_network=UncloudNetwork.objects.get(description="uncloud Billing"),
+ referral_network=UncloudNetwork.objects.get(description="uncloud Referral"),
+ coupon_network=UncloudNetwork.objects.get(description="uncloud Coupon")
+ )
+
+
+ def __str__(self):
+ return f"{self.full_name} {self.country}"
+
+
+class UncloudTask(models.Model):
+ """
+ Class to store dispatched tasks to be handled
+ """
+
+ task_id = models.UUIDField(primary_key=True)
diff --git a/uncloud/opennebula/migrations/0001_initial.py b/uncloud/opennebula/migrations/0001_initial.py
deleted file mode 100644
index 4c0527a..0000000
--- a/uncloud/opennebula/migrations/0001_initial.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Generated by Django 3.0.3 on 2020-02-23 17:12
-
-from django.conf import settings
-import django.contrib.postgres.fields.jsonb
-from django.db import migrations, models
-import django.db.models.deletion
-import uuid
-
-
-class Migration(migrations.Migration):
-
- initial = True
-
- dependencies = [
- migrations.swappable_dependency(settings.AUTH_USER_MODEL),
- ]
-
- operations = [
- migrations.CreateModel(
- name='VM',
- fields=[
- ('vmid', models.IntegerField(primary_key=True, serialize=False)),
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
- ('data', django.contrib.postgres.fields.jsonb.JSONField()),
- ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ],
- ),
- ]
diff --git a/uncloud/opennebula/migrations/0002_auto_20200225_1335.py b/uncloud/opennebula/migrations/0002_auto_20200225_1335.py
deleted file mode 100644
index 1554aa6..0000000
--- a/uncloud/opennebula/migrations/0002_auto_20200225_1335.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Generated by Django 3.0.3 on 2020-02-25 13:35
-
-from django.db import migrations, models
-import uuid
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('opennebula', '0001_initial'),
- ]
-
- operations = [
- migrations.RemoveField(
- model_name='vm',
- name='uuid',
- ),
- migrations.RemoveField(
- model_name='vm',
- name='vmid',
- ),
- migrations.AddField(
- model_name='vm',
- name='id',
- field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, unique=True),
- ),
- ]
diff --git a/uncloud/opennebula/migrations/0003_auto_20200225_1428.py b/uncloud/opennebula/migrations/0003_auto_20200225_1428.py
deleted file mode 100644
index 8bb3d8d..0000000
--- a/uncloud/opennebula/migrations/0003_auto_20200225_1428.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Generated by Django 3.0.3 on 2020-02-25 14:28
-
-from django.db import migrations, models
-import uuid
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('opennebula', '0002_auto_20200225_1335'),
- ]
-
- operations = [
- migrations.AlterField(
- model_name='vm',
- name='id',
- field=models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, serialize=False, unique=True),
- ),
- ]
diff --git a/uncloud/opennebula/migrations/0004_auto_20200225_1816.py b/uncloud/opennebula/migrations/0004_auto_20200225_1816.py
deleted file mode 100644
index 5b39f26..0000000
--- a/uncloud/opennebula/migrations/0004_auto_20200225_1816.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Generated by Django 3.0.3 on 2020-02-25 18:16
-
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('opennebula', '0003_auto_20200225_1428'),
- ]
-
- operations = [
- migrations.RemoveField(
- model_name='vm',
- name='id',
- ),
- migrations.AddField(
- model_name='vm',
- name='vmid',
- field=models.IntegerField(default=42, primary_key=True, serialize=False),
- preserve_default=False,
- ),
- ]
diff --git a/uncloud/opennebula/views.py b/uncloud/opennebula/views.py
deleted file mode 100644
index 89b1a52..0000000
--- a/uncloud/opennebula/views.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from rest_framework import viewsets, permissions
-
-from .models import VM
-from .serializers import OpenNebulaVMSerializer
-
-class VMViewSet(viewsets.ModelViewSet):
- permission_classes = [permissions.IsAuthenticated]
- serializer_class = OpenNebulaVMSerializer
-
- def get_queryset(self):
- if self.request.user.is_superuser:
- obj = VM.objects.all()
- else:
- obj = VM.objects.filter(owner=self.request.user)
-
- return obj
diff --git a/uncloud/uncloud/settings.py b/uncloud/settings.py
similarity index 58%
rename from uncloud/uncloud/settings.py
rename to uncloud/settings.py
index 99cf7a1..ae734dc 100644
--- a/uncloud/uncloud/settings.py
+++ b/uncloud/settings.py
@@ -11,41 +11,33 @@ https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
+import re
import ldap
-# Uncommitted file with secrets
-import uncloud.secrets
-
+from django.core.management.utils import get_random_secret_key
from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion
-# Uncommitted file with local settings i.e logging
-try:
- from uncloud.local_settings import LOGGING, DATABASES
-except ModuleNotFoundError:
- LOGGING = {}
- # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
- DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.postgresql',
- 'NAME': uncloud.secrets.POSTGRESQL_DB_NAME,
- }
- }
+LOGGING = {}
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ }
+}
+
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
-# SECURITY WARNING: keep the secret key used in production secret!
-SECRET_KEY = uncloud.secrets.SECRET_KEY
-
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
-ALLOWED_HOSTS = []
# Application definition
@@ -59,11 +51,13 @@ INSTALLED_APPS = [
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
+ 'uncloud',
'uncloud_pay',
'uncloud_auth',
+ 'uncloud_net',
'uncloud_storage',
'uncloud_vm',
- 'ungleich_service',
+ 'uncloud_service',
'opennebula'
]
@@ -119,7 +113,12 @@ AUTH_PASSWORD_VALIDATORS = [
################################################################################
# AUTH/LDAP
-AUTH_LDAP_SERVER_URI = uncloud.secrets.LDAP_SERVER_URI
+AUTH_LDAP_SERVER_URI = ""
+AUTH_LDAP_BIND_DN = ""
+AUTH_LDAP_BIND_PASSWORD = ""
+AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=example,dc=com",
+ ldap.SCOPE_SUBTREE,
+ "(uid=%(user)s)")
AUTH_LDAP_USER_ATTR_MAP = {
"first_name": "givenName",
@@ -127,13 +126,6 @@ AUTH_LDAP_USER_ATTR_MAP = {
"email": "mail"
}
-
-AUTH_LDAP_BIND_DN = uncloud.secrets.LDAP_ADMIN_DN
-AUTH_LDAP_BIND_PASSWORD = uncloud.secrets.LDAP_ADMIN_PASSWORD
-
-AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=ungleich,dc=ch", ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
-
-
################################################################################
# AUTH/Django
AUTHENTICATION_BACKENDS = [
@@ -154,7 +146,6 @@ REST_FRAMEWORK = {
}
-
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
@@ -171,5 +162,79 @@ USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
-
STATIC_URL = '/static/'
+STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static") ]
+
+# XML-RPC interface of opennebula
+OPENNEBULA_URL = 'https://opennebula.example.com:2634/RPC2'
+
+# user:pass for accessing opennebula
+OPENNEBULA_USER_PASS = 'user:password'
+
+# Stripe (Credit Card payments)
+STRIPE_KEY=""
+STRIPE_PUBLIC_KEY=""
+
+# The django secret key
+SECRET_KEY=get_random_secret_key()
+
+ALLOWED_HOSTS = []
+
+# required for hardcopy / pdf rendering: https://github.com/loftylabs/django-hardcopy
+CHROME_PATH = '/usr/bin/chromium-browser'
+
+# Username that is created by default and owns the configuration objects
+UNCLOUD_ADMIN_NAME = "uncloud-admin"
+
+LOGIN_REDIRECT_URL = '/'
+LOGOUT_REDIRECT_URL = '/'
+
+# replace these in local_settings.py
+AUTH_LDAP_SERVER_URI = "ldaps://ldap1.example.com,ldaps://ldap2.example.com"
+AUTH_LDAP_BIND_DN="uid=django,ou=system,dc=example,dc=com"
+AUTH_LDAP_BIND_PASSWORD="a very secure ldap password"
+AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=example,dc=com",
+ ldap.SCOPE_SUBTREE,
+ "(uid=%(user)s)")
+
+# where to create customers
+LDAP_CUSTOMER_DN="ou=customer,dc=example,dc=com"
+
+# def route_task(name, args, kwargs, options, task=None, **kw):
+# print(f"{name} - {args} - {kwargs}")
+# # if name == 'myapp.tasks.compress_video':
+# return {'queue': 'vpn1' }
+# # 'exchange_type': 'topic',
+# # 'routing_key': 'video.compress'}
+
+
+# CELERY_TASK_ROUTES = (route_task,)
+
+# CELERY_TASK_ROUTES = {
+# '*': {
+# 'queue': 'vpn1'
+# }
+# }
+
+
+CELERY_BROKER_URL = 'redis://:uncloud.example.com:6379/0'
+CELERY_RESULT_BACKEND = 'redis://:uncloud.example.com:6379/0'
+
+CELERY_TASK_ROUTES = {
+ re.compile(r'.*.tasks.cdist.*'): { 'queue': 'cdist' } # cdist tasks go into cdist queue
+}
+
+CELERY_BEAT_SCHEDULE = {
+ 'cleanup_tasks': {
+ 'task': 'uncloud.tasks.cleanup_tasks',
+ 'schedule': 10
+ }
+}
+
+# CELERY_TASK_CREATE_MISSING_QUEUES = False
+
+# Overwrite settings with local settings, if existing
+try:
+ from uncloud.local_settings import *
+except (ModuleNotFoundError, ImportError):
+ pass
diff --git a/uncloud/tasks.py b/uncloud/tasks.py
new file mode 100644
index 0000000..5a13ec5
--- /dev/null
+++ b/uncloud/tasks.py
@@ -0,0 +1,19 @@
+from celery import shared_task
+from celery.result import AsyncResult
+
+from .models import UncloudTask
+
+@shared_task(bind=True)
+def cleanup_tasks(self):
+ print(f"Cleanup time from {self}: {self.request.id}")
+ for task in UncloudTask.objects.all():
+ print(f"Pruning {task}...")
+
+ if str(task.task_id) == str(self.request.id):
+ print("Skipping myself")
+ continue
+
+ res = AsyncResult(id=str(task.task_id))
+ if res.ready():
+ print(res.get())
+ task.delete()
diff --git a/uncloud/templates/uncloud/base.html b/uncloud/templates/uncloud/base.html
new file mode 100644
index 0000000..034fa7c
--- /dev/null
+++ b/uncloud/templates/uncloud/base.html
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+ {% block title %}Welcome to uncloud{% endblock %}
+ {% block header %}{% endblock %}
+
+
+ {% block body %}{% endblock %}
+
+
diff --git a/uncloud/templates/uncloud/index.html b/uncloud/templates/uncloud/index.html
new file mode 100644
index 0000000..b40c3b4
--- /dev/null
+++ b/uncloud/templates/uncloud/index.html
@@ -0,0 +1,15 @@
+{% extends 'uncloud/base.html' %}
+{% block title %}{% endblock %}
+
+{% block body %}
+
+
Welcome to uncloud
+ Welcome to uncloud, checkout the following locations:
+
+
+
+
+{% endblock %}
diff --git a/uncloud/uncloud/.gitignore b/uncloud/uncloud/.gitignore
deleted file mode 100644
index ef418f5..0000000
--- a/uncloud/uncloud/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-secrets.py
diff --git a/uncloud/uncloud/__init__.py b/uncloud/uncloud/__init__.py
deleted file mode 100644
index 9e2545a..0000000
--- a/uncloud/uncloud/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Define DecimalField properties, used to represent amounts of money.
-# Used in pay and auth
-AMOUNT_MAX_DIGITS=10
-AMOUNT_DECIMALS=2
diff --git a/uncloud/uncloud/secrets_sample.py b/uncloud/uncloud/secrets_sample.py
deleted file mode 100644
index 464662f..0000000
--- a/uncloud/uncloud/secrets_sample.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Live/test key from stripe
-STRIPE_KEY = ''
-
-# XML-RPC interface of opennebula
-OPENNEBULA_URL = 'https://opennebula.ungleich.ch:2634/RPC2'
-
-# user:pass for accessing opennebula
-OPENNEBULA_USER_PASS = 'user:password'
-
-POSTGRESQL_DB_NAME="uncloud"
-
-# See https://django-auth-ldap.readthedocs.io/en/latest/authentication.html
-LDAP_ADMIN_DN=""
-LDAP_ADMIN_PASSWORD=""
-LDAP_SERVER_URI = ""
-
-# Stripe (Credit Card payments)
-STRIPE_API_key=""
-
-SECRET_KEY="dx$iqt=lc&yrp^!z5$ay^%g5lhx1y3bcu=jg(jx0yj0ogkfqvf"
diff --git a/uncloud/uncloud/urls.py b/uncloud/uncloud/urls.py
deleted file mode 100644
index 856e59c..0000000
--- a/uncloud/uncloud/urls.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""uncloud URL Configuration
-
-The `urlpatterns` list routes URLs to views. For more information please see:
- https://docs.djangoproject.com/en/3.0/topics/http/urls/
-Examples:
-Function views
- 1. Add an import: from my_app import views
- 2. Add a URL to urlpatterns: path('', views.home, name='home')
-Class-based views
- 1. Add an import: from other_app.views import Home
- 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
-Including another URLconf
- 1. Import the include() function: from django.urls import include, path
- 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
-"""
-from django.contrib import admin
-from django.urls import path, include
-
-from rest_framework import routers
-
-from uncloud_vm import views as vmviews
-from uncloud_pay import views as payviews
-from ungleich_service import views as serviceviews
-from opennebula import views as oneviews
-from uncloud_auth import views as authviews
-
-router = routers.DefaultRouter()
-
-# VM
-router.register(r'vm/snapshot', vmviews.VMSnapshotProductViewSet, basename='vmsnapshotproduct')
-router.register(r'vm/disk', vmviews.VMDiskProductViewSet, basename='vmdiskproduct')
-router.register(r'vm/image/mine', vmviews.VMDiskImageProductMineViewSet, basename='vmdiskimagemineproduct')
-router.register(r'vm/image/public', vmviews.VMDiskImageProductPublicViewSet, basename='vmdiskimagepublicproduct')
-
-# images the provider provides :-)
-# router.register(r'vm/image/official', vmviews.VMDiskImageProductPublicViewSet, basename='vmdiskimagepublicproduct')
-
-
-
-
-router.register(r'vm/vm', vmviews.VMProductViewSet, basename='vmproduct')
-
-
-# TBD
-#router.register(r'vm/disk', vmviews.VMDiskProductViewSet, basename='vmdiskproduct')
-
-# creates VM from os image
-#router.register(r'vm/ipv6onlyvm', vmviews.VMProductViewSet, basename='vmproduct')
-# ... AND adds IPv4 mapping
-#router.register(r'vm/dualstackvm', vmviews.VMProductViewSet, basename='vmproduct')
-
-# allow vm creation from own images
-
-
-# Services
-router.register(r'service/matrix', serviceviews.MatrixServiceProductViewSet, basename='matrixserviceproduct')
-
-
-# Pay
-router.register(r'payment-method', payviews.PaymentMethodViewSet, basename='payment-method')
-router.register(r'bill', payviews.BillViewSet, basename='bill')
-router.register(r'order', payviews.OrderViewSet, basename='order')
-router.register(r'payment', payviews.PaymentViewSet, basename='payment')
-router.register(r'payment-method', payviews.PaymentMethodViewSet, basename='payment-methods')
-
-# VMs
-router.register(r'vm/vm', vmviews.VMProductViewSet, basename='vm')
-
-# admin/staff urls
-router.register(r'admin/bill', payviews.AdminBillViewSet, basename='admin/bill')
-router.register(r'admin/payment', payviews.AdminPaymentViewSet, basename='admin/payment')
-router.register(r'admin/order', payviews.AdminOrderViewSet, basename='admin/order')
-router.register(r'admin/vmhost', vmviews.VMHostViewSet)
-router.register(r'admin/opennebula', oneviews.VMViewSet, basename='opennebula')
-
-# User/Account
-router.register(r'user', authviews.UserViewSet, basename='user')
-
-
-urlpatterns = [
- path('', include(router.urls)),
- path('api-auth/', include('rest_framework.urls', namespace='rest_framework')) # for login to REST API
-]
diff --git a/uncloud/uncloud_auth/migrations/0002_auto_20200318_1343.py b/uncloud/uncloud_auth/migrations/0002_auto_20200318_1343.py
deleted file mode 100644
index ad2654f..0000000
--- a/uncloud/uncloud_auth/migrations/0002_auto_20200318_1343.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-18 13:43
-
-import django.core.validators
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud_auth', '0001_initial'),
- ]
-
- operations = [
- migrations.AddField(
- model_name='user',
- name='amount',
- field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)]),
- ),
- migrations.AddField(
- model_name='user',
- name='maximum_credit',
- field=models.FloatField(default=0),
- preserve_default=False,
- ),
- ]
diff --git a/uncloud/uncloud_auth/migrations/0003_auto_20200318_1345.py b/uncloud/uncloud_auth/migrations/0003_auto_20200318_1345.py
deleted file mode 100644
index 31b1717..0000000
--- a/uncloud/uncloud_auth/migrations/0003_auto_20200318_1345.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-18 13:45
-
-import django.core.validators
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud_auth', '0002_auto_20200318_1343'),
- ]
-
- operations = [
- migrations.RemoveField(
- model_name='user',
- name='amount',
- ),
- migrations.AlterField(
- model_name='user',
- name='maximum_credit',
- field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)]),
- ),
- ]
diff --git a/uncloud/uncloud_auth/serializers.py b/uncloud/uncloud_auth/serializers.py
deleted file mode 100644
index 3627149..0000000
--- a/uncloud/uncloud_auth/serializers.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from django.contrib.auth import get_user_model
-from rest_framework import serializers
-
-from uncloud import AMOUNT_DECIMALS, AMOUNT_MAX_DIGITS
-
-class UserSerializer(serializers.ModelSerializer):
-
- class Meta:
- model = get_user_model()
- fields = ['username', 'email', 'balance', 'maximum_credit' ]
-
- balance = serializers.DecimalField(max_digits=AMOUNT_MAX_DIGITS,
- decimal_places=AMOUNT_DECIMALS)
diff --git a/uncloud/uncloud_auth/views.py b/uncloud/uncloud_auth/views.py
deleted file mode 100644
index 2f78e1f..0000000
--- a/uncloud/uncloud_auth/views.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from rest_framework import viewsets, permissions, status
-from .serializers import *
-
-class UserViewSet(viewsets.ReadOnlyModelViewSet):
- serializer_class = UserSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- if self.request.user.is_superuser:
- obj = get_user_model().objects.all()
- else:
- # This is a bit stupid: we have a user, we create a queryset by
- # matching on the username. But I don't know a "nicer" way.
- # Nico, 2020-03-18
- obj = get_user_model().objects.filter(username=self.request.user.username)
-
- return obj
diff --git a/uncloud/uncloud_net/models.py b/uncloud/uncloud_net/models.py
deleted file mode 100644
index 6d0c742..0000000
--- a/uncloud/uncloud_net/models.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from django.db import models
-
-class MACAdress(models.Model):
- prefix = 0x420000000000
diff --git a/uncloud/uncloud_pay/migrations/0001_initial.py b/uncloud/uncloud_pay/migrations/0001_initial.py
deleted file mode 100644
index 89fa586..0000000
--- a/uncloud/uncloud_pay/migrations/0001_initial.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-05 10:17
-
-from django.conf import settings
-import django.core.validators
-from django.db import migrations, models
-import django.db.models.deletion
-import uuid
-
-
-class Migration(migrations.Migration):
-
- initial = True
-
- dependencies = [
- migrations.swappable_dependency(settings.AUTH_USER_MODEL),
- ('uncloud_auth', '0001_initial'),
- ]
-
- operations = [
- migrations.CreateModel(
- name='Bill',
- fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('creation_date', models.DateTimeField(auto_now_add=True)),
- ('starting_date', models.DateTimeField()),
- ('ending_date', models.DateTimeField()),
- ('due_date', models.DateField()),
- ('valid', models.BooleanField(default=True)),
- ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ],
- ),
- migrations.CreateModel(
- name='Order',
- fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('creation_date', models.DateTimeField(auto_now_add=True)),
- ('starting_date', models.DateTimeField(auto_now_add=True)),
- ('ending_date', models.DateTimeField(blank=True, null=True)),
- ('recurring_period', models.CharField(choices=[('ONCE', 'Onetime'), ('YEAR', 'Per Year'), ('MONTH', 'Per Month'), ('MINUTE', 'Per Minute'), ('DAY', 'Per Day'), ('HOUR', 'Per Hour'), ('SECOND', 'Per Second')], default='MONTH', max_length=32)),
- ('bill', models.ManyToManyField(blank=True, editable=False, to='uncloud_pay.Bill')),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ],
- ),
- migrations.CreateModel(
- name='StripeCustomer',
- fields=[
- ('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
- ('stripe_id', models.CharField(max_length=32)),
- ],
- ),
- migrations.CreateModel(
- name='Payment',
- fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
- ('source', models.CharField(choices=[('wire', 'Wire Transfer'), ('stripe', 'Stripe'), ('voucher', 'Voucher'), ('referral', 'Referral'), ('unknown', 'Unknown')], default='unknown', max_length=256)),
- ('timestamp', models.DateTimeField(auto_now_add=True)),
- ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ],
- ),
- migrations.CreateModel(
- name='OrderRecord',
- fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('one_time_price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
- ('recurring_price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
- ('description', models.TextField()),
- ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.Order')),
- ],
- ),
- migrations.CreateModel(
- name='PaymentMethod',
- fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('source', models.CharField(choices=[('stripe', 'Stripe'), ('unknown', 'Unknown')], default='stripe', max_length=256)),
- ('description', models.TextField()),
- ('primary', models.BooleanField(default=True)),
- ('stripe_card_id', models.CharField(blank=True, max_length=32, null=True)),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ],
- options={
- 'unique_together': {('owner', 'primary')},
- },
- ),
- ]
diff --git a/uncloud/uncloud_pay/models.py b/uncloud/uncloud_pay/models.py
deleted file mode 100644
index a11c3c1..0000000
--- a/uncloud/uncloud_pay/models.py
+++ /dev/null
@@ -1,473 +0,0 @@
-from django.db import models
-from django.db.models import Q
-from django.contrib.auth import get_user_model
-from django.core.validators import MinValueValidator
-from django.utils.translation import gettext_lazy as _
-from django.utils import timezone
-from django.dispatch import receiver
-from django.core.exceptions import ObjectDoesNotExist
-import django.db.models.signals as signals
-
-import uuid
-from functools import reduce
-from math import ceil
-from datetime import timedelta
-from calendar import monthrange
-
-from decimal import Decimal
-
-import uncloud_pay.stripe
-from uncloud_pay.helpers import beginning_of_month, end_of_month
-from uncloud import AMOUNT_DECIMALS, AMOUNT_MAX_DIGITS
-
-
-
-
-# Used to generate bill due dates.
-BILL_PAYMENT_DELAY=timedelta(days=10)
-
-# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
-class RecurringPeriod(models.TextChoices):
- ONE_TIME = 'ONCE', _('Onetime')
- PER_YEAR = 'YEAR', _('Per Year')
- PER_MONTH = 'MONTH', _('Per Month')
- PER_MINUTE = 'MINUTE', _('Per Minute')
- PER_DAY = 'DAY', _('Per Day')
- PER_HOUR = 'HOUR', _('Per Hour')
- PER_SECOND = 'SECOND', _('Per Second')
-
-# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
-class ProductStatus(models.TextChoices):
- PENDING = 'PENDING', _('Pending')
- AWAITING_PAYMENT = 'AWAITING_PAYMENT', _('Awaiting payment')
- BEING_CREATED = 'BEING_CREATED', _('Being created')
- ACTIVE = 'ACTIVE', _('Active')
- DELETED = 'DELETED', _('Deleted')
-
-
-def get_balance_for_user(user):
- bills = reduce(
- lambda acc, entry: acc + entry.total,
- Bill.objects.filter(owner=user),
- 0)
- payments = reduce(
- lambda acc, entry: acc + entry.amount,
- Payment.objects.filter(owner=user),
- 0)
- return payments - bills
-
-class StripeCustomer(models.Model):
- owner = models.OneToOneField( get_user_model(),
- primary_key=True,
- on_delete=models.CASCADE)
- stripe_id = models.CharField(max_length=32)
-
-###
-# Payments and Payment Methods.
-
-class Payment(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
-
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE)
-
- amount = models.DecimalField(
- default=0.0,
- max_digits=AMOUNT_MAX_DIGITS,
- decimal_places=AMOUNT_DECIMALS,
- validators=[MinValueValidator(0)])
-
- source = models.CharField(max_length=256,
- choices = (
- ('wire', 'Wire Transfer'),
- ('stripe', 'Stripe'),
- ('voucher', 'Voucher'),
- ('referral', 'Referral'),
- ('unknown', 'Unknown')
- ),
- default='unknown')
- timestamp = models.DateTimeField(editable=False, auto_now_add=True)
-
- # WIP prepaid and service activation logic by fnux.
- ## We override save() in order to active products awaiting payment.
- #def save(self, *args, **kwargs):
- # # TODO: only run activation logic on creation, not on update.
- # unpaid_bills_before_payment = Bill.get_unpaid_for(self.owner)
- # super(Payment, self).save(*args, **kwargs) # Save payment in DB.
- # unpaid_bills_after_payment = Bill.get_unpaid_for(self.owner)
-
- # newly_paid_bills = list(
- # set(unpaid_bills_before_payment) - set(unpaid_bills_after_payment))
- # for bill in newly_paid_bills:
- # bill.activate_orders()
-
-
-class PaymentMethod(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE,
- editable=False)
- source = models.CharField(max_length=256,
- choices = (
- ('stripe', 'Stripe'),
- ('unknown', 'Unknown'),
- ),
- default='stripe')
- description = models.TextField()
- primary = models.BooleanField(default=True)
-
- # Only used for "Stripe" source
- stripe_card_id = models.CharField(max_length=32, blank=True, null=True)
-
- @property
- def stripe_card_last4(self):
- if self.source == 'stripe':
- card_request = uncloud_pay.stripe.get_card(
- StripeCustomer.objects.get(owner=self.owner).stripe_id,
- self.stripe_card_id)
- if card_request['error'] == None:
- return card_request['response_object']['last4']
- else:
- return None
- else:
- return None
-
-
- def charge(self, amount):
- if amount > 0: # Make sure we don't charge negative amount by errors...
- if self.source == 'stripe':
- stripe_customer = StripeCustomer.objects.get(owner=self.owner).stripe_id
- charge_request = uncloud_pay.stripe.charge_customer(amount, stripe_customer, self.stripe_card_id)
- if charge_request['error'] == None:
- payment = Payment(owner=self.owner, source=self.source, amount=amount)
- payment.save() # TODO: Check return status
-
- return payment
- else:
- raise Exception('Stripe error: {}'.format(charge_request['error']))
- else:
- raise Exception('This payment method is unsupported/cannot be charged.')
- else:
- raise Exception('Cannot charge negative amount.')
-
-
- def get_primary_for(user):
- methods = PaymentMethod.objects.filter(owner=user)
- for method in methods:
- # Do we want to do something with non-primary method?
- if method.primary:
- return method
-
- return None
-
- class Meta:
- unique_together = [['owner', 'primary']]
-
-###
-# Bills & Payments.
-
-class Bill(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE)
-
- creation_date = models.DateTimeField(auto_now_add=True)
- starting_date = models.DateTimeField()
- ending_date = models.DateTimeField()
- due_date = models.DateField()
-
- valid = models.BooleanField(default=True)
-
- @property
- def reference(self):
- return "{}-{}".format(
- self.owner.username,
- self.creation_date.strftime("%Y-%m-%d-%H%M"))
-
- @property
- def records(self):
- bill_records = []
- orders = Order.objects.filter(bill=self)
- for order in orders:
- for order_record in order.records:
- bill_record = BillRecord(self, order_record)
- bill_records.append(bill_record)
-
- return bill_records
-
- @property
- def total(self):
- return reduce(lambda acc, record: acc + record.amount, self.records, 0)
-
- @property
- def final(self):
- # A bill is final when its ending date is passed.
- return self.ending_date < timezone.now()
-
- @staticmethod
- def generate_for(year, month, user):
- # /!\ We exclusively work on the specified year and month.
-
- # Default values for next bill (if any). Only saved at the end of
- # this method, if relevant.
- next_bill = Bill(owner=user,
- starting_date=beginning_of_month(year, month),
- ending_date=end_of_month(year, month),
- creation_date=timezone.now(),
- due_date=timezone.now() + BILL_PAYMENT_DELAY)
-
- # Select all orders active on the request period.
- orders = Order.objects.filter(
- Q(ending_date__gt=next_bill.starting_date) | Q(ending_date__isnull=True),
- owner=user)
-
- # Check if there is already a bill covering the order and period pair:
- # * Get latest bill by ending_date: previous_bill.ending_date
- # * If previous_bill.ending_date is before next_bill.ending_date, a new
- # bill has to be generated.
- unpaid_orders = []
- for order in orders:
- try:
- previous_bill = order.bill.latest('ending_date')
- except ObjectDoesNotExist:
- previous_bill = None
-
- if previous_bill == None or previous_bill.ending_date < next_bill.ending_date:
- unpaid_orders.append(order)
-
- # Commit next_bill if it there are 'unpaid' orders.
- if len(unpaid_orders) > 0:
- next_bill.save()
-
- # It is not possible to register many-to-many relationship before
- # the two end-objects are saved in database.
- for order in unpaid_orders:
- order.bill.add(next_bill)
-
- # TODO: use logger.
- print("Generated bill {} (amount: {}) for user {}."
- .format(next_bill.uuid, next_bill.total, user))
-
- return next_bill
-
- # Return None if no bill was created.
- return None
-
- @staticmethod
- def get_unpaid_for(user):
- balance = get_balance_for(user)
- unpaid_bills = []
- # No unpaid bill if balance is positive.
- if balance >= 0:
- return []
- else:
- bills = Bill.objects.filter(
- owner=user,
- due_date__lt=timezone.now()
- ).order_by('-creation_date')
-
- # Amount to be paid by the customer.
- unpaid_balance = abs(balance)
- for bill in bills:
- if unpaid_balance < 0:
- break
-
- unpaid_balance -= bill.amount
- unpaid_bills.append(bill)
-
- return unpaid_bills
-
- @staticmethod
- def get_overdue_for(user):
- unpaid_bills = Bill.get_unpaid_for(user)
- return list(filter(lambda bill: bill.due_date > timezone.now(), unpaid_bills))
-
-class BillRecord():
- """
- Entry of a bill, dynamically generated from order records.
- """
-
- def __init__(self, bill, order_record):
- self.bill = bill
- self.order = order_record.order
- self.recurring_price = order_record.recurring_price
- self.recurring_period = order_record.recurring_period
- self.description = order_record.description
-
- if self.order.starting_date > self.bill.starting_date:
- self.one_time_price = order_record.one_time_price
- else:
- self.one_time_price = 0
-
- @property
- def recurring_count(self):
- # Compute billing delta.
- billed_until = self.bill.ending_date
- if self.order.ending_date != None and self.order.ending_date < self.order.ending_date:
- billed_until = self.order.ending_date
-
- billed_from = self.bill.starting_date
- if self.order.starting_date > self.bill.starting_date:
- billed_from = self.order.starting_date
-
- if billed_from > billed_until:
- # TODO: think about and check edges cases. This should not be
- # possible.
- raise Exception('Impossible billing delta!')
-
- billed_delta = billed_until - billed_from
-
- # TODO: refactor this thing?
- # TODO: weekly
- # TODO: yearly
- if self.recurring_period == RecurringPeriod.PER_MONTH:
- days = ceil(billed_delta / timedelta(days=1))
-
- # XXX: we assume monthly bills for now.
- if (self.bill.starting_date.year != self.bill.starting_date.year or
- self.bill.starting_date.month != self.bill.ending_date.month):
- raise Exception('Bill {} covers more than one month. Cannot bill PER_MONTH.'.
- format(self.bill.uuid))
-
- # XXX: minumal length of monthly order is to be enforced somewhere else.
- (_, days_in_month) = monthrange(
- self.bill.starting_date.year,
- self.bill.starting_date.month)
- return Decimal(days / days_in_month)
- elif self.recurring_period == RecurringPeriod.PER_DAY:
- days = ceil(billed_delta / timedelta(days=1))
- return Decimal(days)
- elif self.recurring_period == RecurringPeriod.PER_HOUR:
- hours = ceil(billed_delta / timedelta(hours=1))
- return Decimal(hours)
- elif self.recurring_period == RecurringPeriod.PER_SECOND:
- seconds = ceil(billed_delta / timedelta(seconds=1))
- return Decimal(seconds)
- elif self.recurring_period == RecurringPeriod.ONE_TIME:
- return Decimal(0)
- else:
- raise Exception('Unsupported recurring period: {}.'.
- format(record.recurring_period))
-
- @property
- def amount(self):
- return self.recurring_price * self.recurring_count + self.one_time_price
-
-###
-# Orders.
-
-# Order are assumed IMMUTABLE and used as SOURCE OF TRUST for generating
-# bills. Do **NOT** mutate then!
-class Order(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE,
- editable=False)
-
- # TODO: enforce ending_date - starting_date to be larger than recurring_period.
- creation_date = models.DateTimeField(auto_now_add=True)
- starting_date = models.DateTimeField(auto_now_add=True)
- ending_date = models.DateTimeField(blank=True,
- null=True)
-
- bill = models.ManyToManyField(Bill,
- editable=False,
- blank=True)
-
- recurring_period = models.CharField(max_length=32,
- choices = RecurringPeriod.choices,
- default = RecurringPeriod.PER_MONTH)
-
- @property
- def records(self):
- return OrderRecord.objects.filter(order=self)
-
- @property
- def one_time_price(self):
- return reduce(lambda acc, record: acc + record.one_time_price, self.records, 0)
-
- @property
- def recurring_price(self):
- return reduce(lambda acc, record: acc + record.recurring_price, self.records, 0)
-
- def add_record(self, one_time_price, recurring_price, description):
- OrderRecord.objects.create(order=self,
- one_time_price=one_time_price,
- recurring_price=recurring_price,
- description=description)
-
-
-class OrderRecord(models.Model):
- """
- Order records store billing informations for products: the actual product
- might be mutated and/or moved to another order but we do not want to loose
- the details of old orders.
-
- Used as source of trust to dynamically generate bill entries.
- """
-
- order = models.ForeignKey(Order, on_delete=models.CASCADE)
- one_time_price = models.DecimalField(default=0.0,
- max_digits=AMOUNT_MAX_DIGITS,
- decimal_places=AMOUNT_DECIMALS,
- validators=[MinValueValidator(0)])
- recurring_price = models.DecimalField(default=0.0,
- max_digits=AMOUNT_MAX_DIGITS,
- decimal_places=AMOUNT_DECIMALS,
- validators=[MinValueValidator(0)])
-
- description = models.TextField()
-
- @property
- def recurring_period(self):
- return self.order.recurring_period
-
- @property
- def starting_date(self):
- return self.order.starting_date
-
- @property
- def ending_date(self):
- return self.order.ending_date
-
-
-###
-# Products
-
-# Abstract (= no database representation) class used as parent for products
-# (e.g. uncloud_vm.models.VMProduct).
-class Product(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE,
- editable=False)
-
- description = ""
-
- status = models.CharField(max_length=32,
- choices=ProductStatus.choices,
- default=ProductStatus.PENDING)
-
- order = models.ForeignKey(Order,
- on_delete=models.CASCADE,
- editable=False,
- null=True)
-
- @property
- def recurring_price(self, recurring_period=RecurringPeriod.PER_MONTH):
- pass # To be implemented in child.
-
- @property
- def one_time_price(self):
- return 0
-
- @property
- def recurring_period(self):
- return self.order.recurring_period
-
- @staticmethod
- def allowed_recurring_periods():
- return RecurringPeriod.choices
-
- class Meta:
- abstract = True
diff --git a/uncloud/uncloud_pay/serializers.py b/uncloud/uncloud_pay/serializers.py
deleted file mode 100644
index a0a8635..0000000
--- a/uncloud/uncloud_pay/serializers.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from django.contrib.auth import get_user_model
-from rest_framework import serializers
-from .models import *
-
-###
-# Payments and Payment Methods.
-
-class PaymentSerializer(serializers.ModelSerializer):
- class Meta:
- model = Payment
- fields = ['owner', 'amount', 'source', 'timestamp']
-
-class PaymentMethodSerializer(serializers.ModelSerializer):
- stripe_card_last4 = serializers.IntegerField()
-
- class Meta:
- model = PaymentMethod
- fields = ['uuid', 'source', 'description', 'primary', 'stripe_card_last4']
-
-class ChargePaymentMethodSerializer(serializers.Serializer):
- amount = serializers.DecimalField(max_digits=10, decimal_places=2)
-
-class CreditCardSerializer(serializers.Serializer):
- number = serializers.IntegerField()
- exp_month = serializers.IntegerField()
- exp_year = serializers.IntegerField()
- cvc = serializers.IntegerField()
-
-class CreatePaymentMethodSerializer(serializers.ModelSerializer):
- credit_card = CreditCardSerializer()
-
- class Meta:
- model = PaymentMethod
- fields = ['source', 'description', 'primary', 'credit_card']
-
-###
-# Orders & Products.
-
-class OrderRecordSerializer(serializers.ModelSerializer):
- class Meta:
- model = OrderRecord
- fields = ['one_time_price', 'recurring_price', 'description']
-
-
-class OrderSerializer(serializers.ModelSerializer):
- records = OrderRecordSerializer(many=True, read_only=True)
- class Meta:
- model = Order
- fields = ['uuid', 'creation_date', 'starting_date', 'ending_date',
- 'bill', 'recurring_period', 'records', 'recurring_price', 'one_time_price']
-
-
-###
-# Bills
-
-# TODO: remove magic numbers for decimal fields
-class BillRecordSerializer(serializers.Serializer):
- order = serializers.HyperlinkedRelatedField(
- view_name='order-detail',
- read_only=True)
- description = serializers.CharField()
- recurring_period = serializers.CharField()
- recurring_price = serializers.DecimalField(max_digits=10, decimal_places=2)
- recurring_count = serializers.DecimalField(max_digits=10, decimal_places=2)
- one_time_price = serializers.DecimalField(max_digits=10, decimal_places=2)
- amount = serializers.DecimalField(max_digits=10, decimal_places=2)
-
-class BillSerializer(serializers.ModelSerializer):
- records = BillRecordSerializer(many=True, read_only=True)
- class Meta:
- model = Bill
- fields = ['reference', 'owner', 'total', 'due_date', 'creation_date',
- 'starting_date', 'ending_date', 'records', 'final']
diff --git a/uncloud/uncloud_pay/stripe.py b/uncloud/uncloud_pay/stripe.py
deleted file mode 100644
index 4f28d94..0000000
--- a/uncloud/uncloud_pay/stripe.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import stripe
-import stripe.error
-import logging
-
-from django.core.exceptions import ObjectDoesNotExist
-import uncloud_pay.models
-
-import uncloud.secrets
-
-# Static stripe configuration used below.
-CURRENCY = 'chf'
-
-stripe.api_key = uncloud.secrets.STRIPE_KEY
-
-# Helper (decorator) used to catch errors raised by stripe logic.
-def handle_stripe_error(f):
- def handle_problems(*args, **kwargs):
- response = {
- 'paid': False,
- 'response_object': None,
- 'error': None
- }
-
- common_message = "Currently it is not possible to make payments."
- try:
- response_object = f(*args, **kwargs)
- response = {
- 'response_object': response_object,
- 'error': None
- }
- return response
- except stripe.error.CardError as e:
- # Since it's a decline, stripe.error.CardError will be caught
- body = e.json_body
- err = body['error']
- response.update({'error': err['message']})
- logging.error(str(e))
- return response
- except stripe.error.RateLimitError:
- response.update(
- {'error': "Too many requests made to the API too quickly"})
- return response
- except stripe.error.InvalidRequestError as e:
- logging.error(str(e))
- response.update({'error': "Invalid parameters"})
- return response
- except stripe.error.AuthenticationError as e:
- # Authentication with Stripe's API failed
- # (maybe you changed API keys recently)
- logging.error(str(e))
- response.update({'error': common_message})
- return response
- except stripe.error.APIConnectionError as e:
- logging.error(str(e))
- response.update({'error': common_message})
- return response
- except stripe.error.StripeError as e:
- # maybe send email
- logging.error(str(e))
- response.update({'error': common_message})
- return response
- except Exception as e:
- # maybe send email
- logging.error(str(e))
- response.update({'error': common_message})
- return response
-
- return handle_problems
-
-# Convenience CC container, also used for serialization.
-class CreditCard():
- number = None
- exp_year = None
- exp_month = None
- cvc = None
-
- def __init__(self, number, exp_month, exp_year, cvc):
- self.number=number
- self.exp_year = exp_year
- self.exp_month = exp_month
- self.cvc = cvc
-
-# Actual Stripe logic.
-
-def get_customer_id_for(user):
- try:
- # .get() raise if there is no matching entry.
- return uncloud_pay.models.StripeCustomer.objects.get(owner=user).stripe_id
- except ObjectDoesNotExist:
- # No entry yet - making a new one.
- customer_request = create_customer(user.username, user.email)
- if customer_request['error'] == None:
- mapping = uncloud_pay.models.StripeCustomer.objects.create(
- owner=user,
- stripe_id=customer_request['response_object']['id']
- )
- return mapping.stripe_id
- else:
- return None
-
-@handle_stripe_error
-def create_card(customer_id, credit_card):
- return stripe.Customer.create_source(
- customer_id,
- card={
- 'number': credit_card.number,
- 'exp_month': credit_card.exp_month,
- 'exp_year': credit_card.exp_year,
- 'cvc': credit_card.cvc
- })
-
-@handle_stripe_error
-def get_card(customer_id, card_id):
- return stripe.Customer.retrieve_source(customer_id, card_id)
-
-@handle_stripe_error
-def charge_customer(amount, customer_id, card_id):
- # Amount is in CHF but stripes requires smallest possible unit.
- # See https://stripe.com/docs/api/charges/create
- adjusted_amount = int(amount * 100)
- return stripe.Charge.create(
- amount=adjusted_amount,
- currency=CURRENCY,
- customer=customer_id,
- source=card_id)
-
-@handle_stripe_error
-def create_customer(name, email):
- return stripe.Customer.create(name=name, email=email)
-
-@handle_stripe_error
-def get_customer(customer_id):
- return stripe.Customer.retrieve(customer_id)
diff --git a/uncloud/uncloud_pay/views.py b/uncloud/uncloud_pay/views.py
deleted file mode 100644
index e86a464..0000000
--- a/uncloud/uncloud_pay/views.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from django.shortcuts import render
-from django.db import transaction
-from django.contrib.auth import get_user_model
-from rest_framework import viewsets, permissions, status
-from rest_framework.response import Response
-from rest_framework.decorators import action
-
-import json
-
-from .models import *
-from .serializers import *
-from datetime import datetime
-import uncloud_pay.stripe as uncloud_stripe
-
-###
-# Standard user views:
-
-class BalanceViewSet(viewsets.ViewSet):
- # here we return a number
- # number = sum(payments) - sum(bills)
-
- #bills = Bill.objects.filter(owner=self.request.user)
- #payments = Payment.objects.filter(owner=self.request.user)
-
- # sum_paid = sum([ amount for amount payments..,. ]) # you get the picture
- # sum_to_be_paid = sum([ amount for amount bills..,. ]) # you get the picture
- pass
-
-
-class BillViewSet(viewsets.ReadOnlyModelViewSet):
- serializer_class = BillSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Bill.objects.filter(owner=self.request.user)
-
-class PaymentViewSet(viewsets.ReadOnlyModelViewSet):
- serializer_class = PaymentSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Payment.objects.filter(owner=self.request.user)
-
-class OrderViewSet(viewsets.ReadOnlyModelViewSet):
- serializer_class = OrderSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Order.objects.filter(owner=self.request.user)
-
-
-class PaymentMethodViewSet(viewsets.ModelViewSet):
- permission_classes = [permissions.IsAuthenticated]
-
- def get_serializer_class(self):
- if self.action == 'create':
- return CreatePaymentMethodSerializer
- elif self.action == 'charge':
- return ChargePaymentMethodSerializer
- else:
- return PaymentMethodSerializer
-
-
- def get_queryset(self):
- return PaymentMethod.objects.filter(owner=self.request.user)
-
- # XXX: Handling of errors is far from great down there.
- @transaction.atomic
- def create(self, request):
- serializer = self.get_serializer(data=request.data)
- serializer.is_valid(raise_exception=True)
-
- # Retrieve Stripe customer ID for user.
- customer_id = uncloud_stripe.get_customer_id_for(request.user)
- if customer_id == None:
- return Response(
- {'error': 'Could not resolve customer stripe ID.'},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
- # Register card under stripe customer.
- credit_card = uncloud_stripe.CreditCard(**serializer.validated_data.pop('credit_card'))
- card_request = uncloud_stripe.create_card(customer_id, credit_card)
- if card_request['error']:
- return Response({'stripe_error': card_request['error']}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
- card_id = card_request['response_object']['id']
-
- # Save payment method locally.
- serializer.validated_data['stripe_card_id'] = card_request['response_object']['id']
- payment_method = PaymentMethod.objects.create(owner=request.user, **serializer.validated_data)
-
- # We do not want to return the credit card details sent with the POST
- # request.
- output_serializer = PaymentMethodSerializer(payment_method)
- return Response(output_serializer.data)
-
- @action(detail=True, methods=['post'])
- def charge(self, request, pk=None):
- payment_method = self.get_object()
- serializer = self.get_serializer(data=request.data)
- serializer.is_valid(raise_exception=True)
- amount = serializer.validated_data['amount']
- try:
- payment = payment_method.charge(amount)
- output_serializer = PaymentSerializer(payment)
- return Response(output_serializer.data)
- except Exception as e:
- return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
-###
-# Admin views.
-
-class AdminPaymentViewSet(viewsets.ModelViewSet):
- serializer_class = PaymentSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Payment.objects.all()
-
- def create(self, request):
- serializer = self.get_serializer(data=request.data)
- serializer.is_valid(raise_exception=True)
- serializer.save(timestamp=datetime.now())
-
- headers = self.get_success_headers(serializer.data)
- return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
-
-class AdminBillViewSet(viewsets.ModelViewSet):
- serializer_class = BillSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Bill.objects.all()
-
- def unpaid(self, request):
- return Bill.objects.filter(owner=self.request.user, paid=False)
-
- def create(self, request):
- serializer = self.get_serializer(data=request.data)
- serializer.is_valid(raise_exception=True)
- serializer.save(creation_date=datetime.now())
-
- headers = self.get_success_headers(serializer.data)
- return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
-
-class AdminOrderViewSet(viewsets.ModelViewSet):
- serializer_class = OrderSerializer
- permission_classes = [permissions.IsAuthenticated]
-
- def get_queryset(self):
- return Order.objects.all()
diff --git a/uncloud/uncloud_storage/admin.py b/uncloud/uncloud_storage/admin.py
deleted file mode 100644
index 8c38f3f..0000000
--- a/uncloud/uncloud_storage/admin.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.contrib import admin
-
-# Register your models here.
diff --git a/uncloud/uncloud_storage/tests.py b/uncloud/uncloud_storage/tests.py
deleted file mode 100644
index 7ce503c..0000000
--- a/uncloud/uncloud_storage/tests.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.test import TestCase
-
-# Create your tests here.
diff --git a/uncloud/uncloud_storage/views.py b/uncloud/uncloud_storage/views.py
deleted file mode 100644
index 91ea44a..0000000
--- a/uncloud/uncloud_storage/views.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.shortcuts import render
-
-# Create your views here.
diff --git a/uncloud/uncloud_vm/admin.py b/uncloud/uncloud_vm/admin.py
deleted file mode 100644
index 8c38f3f..0000000
--- a/uncloud/uncloud_vm/admin.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.contrib import admin
-
-# Register your models here.
diff --git a/uncloud/uncloud_vm/management/commands/vm.py b/uncloud/uncloud_vm/management/commands/vm.py
deleted file mode 100644
index c0e2783..0000000
--- a/uncloud/uncloud_vm/management/commands/vm.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import json
-
-import uncloud.secrets as secrets
-
-from django.core.management.base import BaseCommand
-from django.contrib.auth import get_user_model
-
-from uncloud_vm.models import VMProduct, VMHost
-
-class Command(BaseCommand):
- help = 'Select VM Host for VMs'
-
- def add_arguments(self, parser):
- parser.add_argument('--schedule-vms', action='store_true')
- parser.add_argument('--start-vms-here', action='store_true')
- parser.add_argument('--check-health', action='store_true')
- parser.add_argument('--vmhostname')
- print(parser)
-
-
- def handle(self, *args, **options):
- print(args)
- print(options)
-
- if options['schedule_vms']:
- self.schedule_vms(args, option)
- if options['start_vms_here']:
- if not options['vmhostname']:
- raise Exception("Argument vmhostname is required to know which vmhost we are on")
- self.start_vms(args, options)
- if options['check_health']:
- self.check_health(args, option)
-
- def start_vms(self, *args, **options):
- vmhost = VMHost.objects.get(status='active',
- hostname=options['vmhostname'])
-
- if not vmhost:
- print("No active vmhost {} exists".format(options['vmhostname']))
- return
-
- vms_to_start = VMProduct.objects.filter(vmhost=vmhost,
- status='creating')
- for vm in vms_to_start:
-
- """ run qemu:
- check if VM is not already active / qemu running
- prepare / create the Qemu arguments
-
-
- """
-
- def schedule_vms(self, *args, **options)):
- pending_vms = VMProduct.objects.filter(vmhost__isnull=True)
- vmhosts = VMHost.objects.filter(status='active')
-
- for vm in pending_vms:
- print(vm)
-
- found_vmhost = False
- for vmhost in vmhosts:
- if vmhost.available_cores >= vm.cores and vmhost.available_ram_in_gb >= vm.ram_in_gb:
- vm.vmhost = vmhost
- vm.status = "creating"
- vm.save()
- found_vmhost = True
- print("Scheduled VM {} on VMHOST {}".format(vm, vmhost))
- break
-
- if not found_vmhost:
- print("Error: cannot schedule VM {}, no suitable host found".format(vm))
-
- def check_health(self, *args, **options):
- pending_vms = VMProduct.objects.filter(vmhost__isnull=True)
- vmhosts = VMHost.objects.filter(status='active')
-
- # 1. Check that all active hosts reported back N seconds ago
- # 2. Check that no VM is running on a dead host
- # 3. Migrate VMs if necessary
- # 4. Check that no VMs have been pending for longer than Y seconds
-
- # If VM snapshots exist without a VM -> notify user (?)
-
-
- print("Nothing is good, you should implement me")
diff --git a/uncloud/uncloud_vm/migrations/0002_auto_20200305_1321.py b/uncloud/uncloud_vm/migrations/0002_auto_20200305_1321.py
deleted file mode 100644
index 2711b33..0000000
--- a/uncloud/uncloud_vm/migrations/0002_auto_20200305_1321.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-05 13:21
-
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud_vm', '0001_initial'),
- ]
-
- operations = [
- migrations.AlterField(
- model_name='vmdiskimageproduct',
- name='storage_class',
- field=models.CharField(choices=[('HDD', 'HDD'), ('SSD', 'SSD')], default='SSD', max_length=32),
- ),
- migrations.AlterField(
- model_name='vmproduct',
- name='name',
- field=models.CharField(blank=True, max_length=32, null=True),
- ),
- ]
diff --git a/uncloud/uncloud_vm/migrations/0003_remove_vmhost_vms.py b/uncloud/uncloud_vm/migrations/0003_remove_vmhost_vms.py
deleted file mode 100644
index 70ee863..0000000
--- a/uncloud/uncloud_vm/migrations/0003_remove_vmhost_vms.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-05 13:58
-
-from django.db import migrations
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud_vm', '0002_auto_20200305_1321'),
- ]
-
- operations = [
- migrations.RemoveField(
- model_name='vmhost',
- name='vms',
- ),
- ]
diff --git a/uncloud/uncloud_vm/migrations/0004_remove_vmproduct_vmid.py b/uncloud/uncloud_vm/migrations/0004_remove_vmproduct_vmid.py
deleted file mode 100644
index 5f44b57..0000000
--- a/uncloud/uncloud_vm/migrations/0004_remove_vmproduct_vmid.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-17 14:40
-
-from django.db import migrations
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ('uncloud_vm', '0003_remove_vmhost_vms'),
- ]
-
- operations = [
- migrations.RemoveField(
- model_name='vmproduct',
- name='vmid',
- ),
- ]
diff --git a/uncloud/uncloud_vm/serializers.py b/uncloud/uncloud_vm/serializers.py
deleted file mode 100644
index f759d01..0000000
--- a/uncloud/uncloud_vm/serializers.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from django.contrib.auth import get_user_model
-
-from rest_framework import serializers
-
-from .models import VMHost, VMProduct, VMSnapshotProduct, VMDiskProduct, VMDiskImageProduct
-from uncloud_pay.models import RecurringPeriod
-
-GB_SSD_PER_DAY=0.012
-GB_HDD_PER_DAY=0.0006
-
-GB_SSD_PER_DAY=0.012
-GB_HDD_PER_DAY=0.0006
-
-
-class VMHostSerializer(serializers.ModelSerializer):
- vms = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
-
- class Meta:
- model = VMHost
- fields = '__all__'
- read_only_fields = [ 'vms' ]
-
-
-class VMDiskProductSerializer(serializers.ModelSerializer):
- class Meta:
- model = VMDiskProduct
- fields = '__all__'
-
-class VMDiskImageProductSerializer(serializers.ModelSerializer):
- class Meta:
- model = VMDiskImageProduct
- fields = '__all__'
-
-class VMProductSerializer(serializers.ModelSerializer):
- class Meta:
- model = VMProduct
- fields = ['uuid', 'order', 'owner', 'status', 'name',
- 'cores', 'ram_in_gb', 'recurring_period',
- 'snapshots' ]
- read_only_fields = ['uuid', 'order', 'owner', 'status']
-
- # Custom field used at creation (= ordering) only.
- recurring_period = serializers.ChoiceField(
- choices=VMProduct.allowed_recurring_periods())
-
- snapshots = serializers.PrimaryKeyRelatedField(many=True,
- read_only=True)
-
-
-class DCLVMProductSerializer(serializers.HyperlinkedModelSerializer):
- """
- Create an interface similar to standard DCL
- """
-
- # Custom field used at creation (= ordering) only.
- recurring_period = serializers.ChoiceField(
- choices=VMProduct.allowed_recurring_periods())
-
- os_disk_uuid = serializers.UUIDField()
- # os_disk_size =
-
- class Meta:
- model = VMProduct
-
-class ManagedVMProductSerializer(serializers.ModelSerializer):
- """
- Managed VM serializer used in ungleich_service app.
- """
- class Meta:
- model = VMProduct
- fields = [ 'cores', 'ram_in_gb']
-
-class VMSnapshotProductSerializer(serializers.ModelSerializer):
- class Meta:
- model = VMSnapshotProduct
- fields = '__all__'
-
-
- # verify that vm.owner == user.request
- def validate_vm(self, value):
- if not value.owner == self.context['request'].user:
- raise serializers.ValidationError("VM {} not found for owner {}.".format(value,
- self.context['request'].user))
- disks = VMDiskProduct.objects.filter(vm=value)
-
- if len(disks) == 0:
- raise serializers.ValidationError("VM {} does not have any disks, cannot snapshot".format(value.uuid))
-
- return value
-
- pricing = {}
- pricing['per_gb_ssd'] = 0.012
- pricing['per_gb_hdd'] = 0.0006
- pricing['recurring_period'] = 'per_day'
diff --git a/uncloud/uncloud_vm/tests.py b/uncloud/uncloud_vm/tests.py
deleted file mode 100644
index 8d7994f..0000000
--- a/uncloud/uncloud_vm/tests.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import datetime
-
-import parsedatetime
-
-from django.test import TestCase
-from django.contrib.auth import get_user_model
-from django.utils import timezone
-from django.core.exceptions import ValidationError
-
-from uncloud_vm.models import VMDiskImageProduct, VMDiskProduct, VMProduct, VMHost
-from uncloud_pay.models import Order
-
-User = get_user_model()
-cal = parsedatetime.Calendar()
-
-
-# If you want to check the test database using some GUI/cli tool
-# then use the following connecting parameters
-
-# host: localhost
-# database: test_uncloud
-# user: root
-# password:
-# port: 5432
-
-class VMTestCase(TestCase):
- @classmethod
- def setUpClass(cls):
- # Setup vm host
- cls.vm_host, created = VMHost.objects.get_or_create(
- hostname='serverx.placey.ungleich.ch', physical_cores=32, usable_cores=320,
- usable_ram_in_gb=512.0, status='active'
- )
- super().setUpClass()
-
- def setUp(self) -> None:
- # Setup two users as it is common to test with different user
- self.user = User.objects.create_user(
- username='testuser', email='test@test.com', first_name='Test', last_name='User'
- )
- self.user2 = User.objects.create_user(
- username='Meow', email='meow123@test.com', first_name='Meow', last_name='Cat'
- )
- super().setUp()
-
- def create_sample_vm(self, owner):
- one_month_later, parse_status = cal.parse("1 month later")
- return VMProduct.objects.create(
- vmhost=self.vm_host, cores=2, ram_in_gb=4, owner=owner,
- order=Order.objects.create(
- owner=owner,
- creation_date=datetime.datetime.now(tz=timezone.utc),
- starting_date=datetime.datetime.now(tz=timezone.utc),
- ending_date=datetime.datetime(*one_month_later[:6], tzinfo=timezone.utc),
- recurring_price=4.0, one_time_price=5.0, recurring_period='per_month'
- )
- )
-
- def test_disk_product(self):
- """Ensures that a VMDiskProduct can only be created from a VMDiskImageProduct
- that is in status 'active'"""
-
- vm = self.create_sample_vm(owner=self.user)
-
- pending_disk_image = VMDiskImageProduct.objects.create(
- owner=self.user, name='pending_disk_image', is_os_image=True, is_public=True, size_in_gb=10,
- status='pending'
- )
- try:
- vm_disk_product = VMDiskProduct.objects.create(
- owner=self.user, vm=vm, image=pending_disk_image, size_in_gb=10
- )
- except ValidationError:
- vm_disk_product = None
-
- self.assertIsNone(
- vm_disk_product,
- msg='VMDiskProduct created with disk image whose status is not active.'
- )
-
- def test_vm_disk_product_creation(self):
- """Ensure that a user can only create a VMDiskProduct for an existing VM"""
-
- disk_image = VMDiskImageProduct.objects.create(
- owner=self.user, name='disk_image', is_os_image=True, is_public=True, size_in_gb=10,
- status='active'
- )
-
- with self.assertRaises(ValidationError, msg='User created a VMDiskProduct for non-existing VM'):
- # Create VMProduct object but don't save it in database
- vm = VMProduct()
-
- vm_disk_product = VMDiskProduct.objects.create(
- owner=self.user, vm=vm, image=disk_image, size_in_gb=10
- )
-
- def test_vm_disk_product_creation_for_someone_else(self):
- """Ensure that a user can only create a VMDiskProduct for his/her own VM"""
-
- # Create a VM which is ownership of self.user2
- someone_else_vm = self.create_sample_vm(owner=self.user2)
-
- # 'self.user' would try to create a VMDiskProduct for 'user2's VM
- with self.assertRaises(ValidationError, msg='User created a VMDiskProduct for someone else VM.'):
- vm_disk_product = VMDiskProduct.objects.create(
- owner=self.user, vm=someone_else_vm,
- size_in_gb=10,
- image=VMDiskImageProduct.objects.create(
- owner=self.user, name='disk_image', is_os_image=True, is_public=True, size_in_gb=10,
- status='active'
- )
- )
diff --git a/uncloud/ungleich_service/admin.py b/uncloud/ungleich_service/admin.py
deleted file mode 100644
index 8c38f3f..0000000
--- a/uncloud/ungleich_service/admin.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.contrib import admin
-
-# Register your models here.
diff --git a/uncloud/ungleich_service/migrations/0001_initial.py b/uncloud/ungleich_service/migrations/0001_initial.py
deleted file mode 100644
index 5b843c8..0000000
--- a/uncloud/ungleich_service/migrations/0001_initial.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Generated by Django 3.0.3 on 2020-03-17 11:45
-
-from django.conf import settings
-from django.db import migrations, models
-import django.db.models.deletion
-import uuid
-
-
-class Migration(migrations.Migration):
-
- initial = True
-
- dependencies = [
- ('uncloud_vm', '0003_remove_vmhost_vms'),
- migrations.swappable_dependency(settings.AUTH_USER_MODEL),
- ('uncloud_pay', '0001_initial'),
- ]
-
- operations = [
- migrations.CreateModel(
- name='MatrixServiceProduct',
- fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('status', models.CharField(choices=[('PENDING', 'Pending'), ('AWAITING_PAYMENT', 'Awaiting payment'), ('BEING_CREATED', 'Being created'), ('ACTIVE', 'Active'), ('DELETED', 'Deleted')], default='PENDING', max_length=32)),
- ('domain', models.CharField(default='domain.tld', max_length=255)),
- ('order', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.Order')),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMProduct')),
- ],
- options={
- 'abstract': False,
- },
- ),
- ]
diff --git a/uncloud/ungleich_service/models.py b/uncloud/ungleich_service/models.py
deleted file mode 100644
index 9d6a8ac..0000000
--- a/uncloud/ungleich_service/models.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import uuid
-
-from django.db import models
-from uncloud_pay.models import Product, RecurringPeriod
-from uncloud_vm.models import VMProduct
-
-class MatrixServiceProduct(Product):
- monthly_managment_fee = 20
-
- description = "Managed Matrix HomeServer"
-
- # Specific to Matrix-as-a-Service
- vm = models.ForeignKey(
- VMProduct, on_delete=models.CASCADE
- )
- domain = models.CharField(max_length=255, default='domain.tld')
-
- def recurring_price(self, recurring_period=RecurringPeriod.PER_MONTH):
- if recurring_period == RecurringPeriod.PER_MONTH:
- return self.monthly_managment_fee
- else:
- raise Exception('Invalid recurring period for VM Product pricing.')
-
- @staticmethod
- def allowed_recurring_periods():
- return list(filter(
- lambda pair: pair[0] in [RecurringPeriod.PER_MONTH],
- RecurringPeriod.choices))
-
- @property
- def one_time_price(self):
- return 30
diff --git a/uncloud/ungleich_service/serializers.py b/uncloud/ungleich_service/serializers.py
deleted file mode 100644
index b4038b7..0000000
--- a/uncloud/ungleich_service/serializers.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from rest_framework import serializers
-from .models import MatrixServiceProduct
-from uncloud_vm.serializers import ManagedVMProductSerializer
-from uncloud_vm.models import VMProduct
-from uncloud_pay.models import RecurringPeriod
-
-class MatrixServiceProductSerializer(serializers.ModelSerializer):
- vm = ManagedVMProductSerializer()
-
- # Custom field used at creation (= ordering) only.
- recurring_period = serializers.ChoiceField(
- choices=MatrixServiceProduct.allowed_recurring_periods())
-
- class Meta:
- model = MatrixServiceProduct
- fields = ['uuid', 'order', 'owner', 'status', 'vm', 'domain', 'recurring_period']
- read_only_fields = ['uuid', 'order', 'owner', 'status']
diff --git a/uncloud/ungleich_service/tests.py b/uncloud/ungleich_service/tests.py
deleted file mode 100644
index 7ce503c..0000000
--- a/uncloud/ungleich_service/tests.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from django.test import TestCase
-
-# Create your tests here.
diff --git a/uncloud/ungleich_service/views.py b/uncloud/ungleich_service/views.py
deleted file mode 100644
index 47c15e2..0000000
--- a/uncloud/ungleich_service/views.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from rest_framework import viewsets, permissions
-from rest_framework.response import Response
-from django.db import transaction
-
-from .models import MatrixServiceProduct
-from .serializers import MatrixServiceProductSerializer
-
-from uncloud_pay.helpers import ProductViewSet
-from uncloud_pay.models import Order
-from uncloud_vm.models import VMProduct
-
-class MatrixServiceProductViewSet(ProductViewSet):
- permission_classes = [permissions.IsAuthenticated]
- serializer_class = MatrixServiceProductSerializer
-
- def get_queryset(self):
- return MatrixServiceProduct.objects.filter(owner=self.request.user)
-
- @transaction.atomic
- def create(self, request):
- # Extract serializer data.
- serializer = self.get_serializer(data=request.data)
- serializer.is_valid(raise_exception=True)
- order_recurring_period = serializer.validated_data.pop("recurring_period")
-
- # Create base order.
- order = Order.objects.create(
- recurring_period=order_recurring_period,
- owner=request.user
- )
- order.save()
-
- # Create unerderlying VM.
- # TODO: move this logic to a method for use with other
- # products.
- vm_data = serializer.validated_data.pop('vm')
- vm_data['owner'] = request.user
- vm_data['order'] = order
- vm = VMProduct.objects.create(**vm_data)
-
- # XXX: Move this to some kind of on_create hook in parent
- # Product class?
- order.add_record(
- vm.one_time_price,
- vm.recurring_price(order.recurring_period),
- vm.description)
-
- # Create service.
- service = serializer.save(
- order=order,
- owner=self.request.user,
- vm=vm)
-
- # XXX: Move this to some kind of on_create hook in parent
- # Product class?
- order.add_record(
- service.one_time_price,
- service.recurring_price(order.recurring_period),
- service.description)
-
- return Response(serializer.data)
diff --git a/uncloud/urls.py b/uncloud/urls.py
new file mode 100644
index 0000000..169be7f
--- /dev/null
+++ b/uncloud/urls.py
@@ -0,0 +1,94 @@
+"""uncloud URL Configuration
+
+The `urlpatterns` list routes URLs to views. For more information please see:
+ https://docs.djangoproject.com/en/3.0/topics/http/urls/
+"""
+
+from django.contrib import admin
+from django.urls import path, include
+from django.conf import settings
+from django.conf.urls.static import static
+
+from rest_framework import routers
+from rest_framework.schemas import get_schema_view
+
+#from opennebula import views as oneviews
+from uncloud import views as uncloudviews
+from uncloud_auth import views as authviews
+from uncloud_net import views as netviews
+from uncloud_pay import views as payviews
+from uncloud_vm import views as vmviews
+from uncloud_service import views as serviceviews
+
+router = routers.DefaultRouter()
+
+# Beta endpoints
+router.register(r'beta/vm', vmviews.NicoVMProductViewSet, basename='nicovmproduct')
+
+# VM
+router.register(r'v1/vm/snapshot', vmviews.VMSnapshotProductViewSet, basename='vmsnapshotproduct')
+router.register(r'v1/vm/diskimage', vmviews.VMDiskImageProductViewSet, basename='vmdiskimageproduct')
+router.register(r'v1/vm/disk', vmviews.VMDiskProductViewSet, basename='vmdiskproduct')
+router.register(r'v1/vm/vm', vmviews.VMProductViewSet, basename='vmproduct')
+
+
+# creates VM from os image
+#router.register(r'vm/ipv6onlyvm', vmviews.VMProductViewSet, basename='vmproduct')
+# ... AND adds IPv4 mapping
+#router.register(r'vm/dualstackvm', vmviews.VMProductViewSet, basename='vmproduct')
+
+# Services
+router.register(r'v1/service/matrix', serviceviews.MatrixServiceProductViewSet, basename='matrixserviceproduct')
+router.register(r'v1/service/generic', serviceviews.GenericServiceProductViewSet, basename='genericserviceproduct')
+
+
+
+# Pay
+router.register(r'v1/my/address', payviews.BillingAddressViewSet, basename='billingaddress')
+router.register(r'v1/my/bill', payviews.BillViewSet, basename='bill')
+router.register(r'v1/my/order', payviews.OrderViewSet, basename='order')
+router.register(r'v1/my/payment', payviews.PaymentViewSet, basename='payment')
+router.register(r'v1/my/payment-method', payviews.PaymentMethodViewSet, basename='payment-method')
+
+# admin/staff urls
+router.register(r'v1/admin/bill', payviews.AdminBillViewSet, basename='admin/bill')
+router.register(r'v1/admin/payment', payviews.AdminPaymentViewSet, basename='admin/payment')
+router.register(r'v1/admin/order', payviews.AdminOrderViewSet, basename='admin/order')
+router.register(r'v1/admin/vmhost', vmviews.VMHostViewSet)
+router.register(r'v1/admin/vmcluster', vmviews.VMClusterViewSet)
+#router.register(r'v1/admin/vpnpool', netviews.VPNPoolViewSet)
+#router.register(r'v1/admin/opennebula', oneviews.VMViewSet, basename='opennebula')
+
+# User/Account
+router.register(r'v1/my/user', authviews.UserViewSet, basename='user')
+router.register(r'v1/admin/user', authviews.AdminUserViewSet, basename='useradmin')
+router.register(r'v1/user/register', authviews.AccountManagementViewSet, basename='user/register')
+
+
+################################################################################
+# v2
+
+# Net
+router.register(r'v2/net/wireguardvpn', netviews.WireGuardVPNViewSet, basename='wireguardvpnnetwork')
+router.register(r'v2/net/wireguardvpnsizes', netviews.WireGuardVPNSizes, basename='wireguardvpnnetworksizes')
+
+
+
+urlpatterns = [
+ path(r'api/', include(router.urls)),
+
+ path('api-auth/', include('rest_framework.urls', namespace='rest_framework')), # for login to REST API
+ path('openapi', get_schema_view(
+ title="uncloud",
+ description="uncloud API",
+ version="1.0.0"
+ ), name='openapi-schema'),
+
+ # web/ = stuff to view in the browser
+# path('web/vpn/create/', netviews.WireGuardVPNCreateView.as_view(), name="vpncreate"),
+ path('login/', authviews.LoginView.as_view(), name="login"),
+ path('logout/', authviews.LogoutView.as_view(), name="logout"),
+ path('admin/', admin.site.urls),
+ path('cc/reg/', payviews.RegisterCard.as_view(), name="cc_register"),
+ path('', uncloudviews.UncloudIndex.as_view(), name="uncloudindex"),
+]
diff --git a/uncloud/views.py b/uncloud/views.py
new file mode 100644
index 0000000..198abd0
--- /dev/null
+++ b/uncloud/views.py
@@ -0,0 +1,4 @@
+from django.views.generic.base import TemplateView
+
+class UncloudIndex(TemplateView):
+ template_name = "uncloud/index.html"
diff --git a/uncloud/uncloud/wsgi.py b/uncloud/wsgi.py
similarity index 100%
rename from uncloud/uncloud/wsgi.py
rename to uncloud/wsgi.py
diff --git a/uncloud_auth/__init__.py b/uncloud_auth/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud/uncloud_auth/admin.py b/uncloud_auth/admin.py
similarity index 100%
rename from uncloud/uncloud_auth/admin.py
rename to uncloud_auth/admin.py
diff --git a/uncloud/uncloud_auth/apps.py b/uncloud_auth/apps.py
similarity index 100%
rename from uncloud/uncloud_auth/apps.py
rename to uncloud_auth/apps.py
diff --git a/uncloud_auth/management/commands/make-admin.py b/uncloud_auth/management/commands/make-admin.py
new file mode 100644
index 0000000..9157439
--- /dev/null
+++ b/uncloud_auth/management/commands/make-admin.py
@@ -0,0 +1,21 @@
+from django.core.management.base import BaseCommand
+from django.contrib.auth import get_user_model
+import sys
+
+class Command(BaseCommand):
+ help = 'Give Admin rights to existing user'
+
+ def add_arguments(self, parser):
+ parser.add_argument('username', type=str)
+ parser.add_argument('--superuser', action='store_true')
+
+ def handle(self, *args, **options):
+ user = get_user_model().objects.get(username=options['username'])
+ user.is_staff = True
+
+ if options['superuser']:
+ user.is_superuser = True
+
+ user.save()
+
+ print(f"{user.username} is now admin (superuser={user.is_superuser})")
diff --git a/uncloud/uncloud_auth/migrations/0001_initial.py b/uncloud_auth/migrations/0001_initial.py
similarity index 88%
rename from uncloud/uncloud_auth/migrations/0001_initial.py
rename to uncloud_auth/migrations/0001_initial.py
index a1f8d00..b263dc6 100644
--- a/uncloud/uncloud_auth/migrations/0001_initial.py
+++ b/uncloud_auth/migrations/0001_initial.py
@@ -1,7 +1,8 @@
-# Generated by Django 3.0.3 on 2020-03-03 16:49
+# Generated by Django 3.1 on 2020-12-13 10:38
import django.contrib.auth.models
import django.contrib.auth.validators
+import django.core.validators
from django.db import migrations, models
import django.utils.timezone
@@ -11,7 +12,7 @@ class Migration(migrations.Migration):
initial = True
dependencies = [
- ('auth', '0011_update_proxy_permissions'),
+ ('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
@@ -23,12 +24,13 @@ class Migration(migrations.Migration):
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
- ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
+ ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
+ ('maximum_credit', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
diff --git a/uncloud_auth/migrations/__init__.py b/uncloud_auth/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud/uncloud_auth/models.py b/uncloud_auth/models.py
similarity index 81%
rename from uncloud/uncloud_auth/models.py
rename to uncloud_auth/models.py
index c3a0912..90463e1 100644
--- a/uncloud/uncloud_auth/models.py
+++ b/uncloud_auth/models.py
@@ -4,8 +4,6 @@ from django.core.validators import MinValueValidator
from uncloud import AMOUNT_DECIMALS, AMOUNT_MAX_DIGITS
-from uncloud_pay.models import get_balance_for_user
-
class User(AbstractUser):
"""
We use the standard user and add a maximum credit that is allowed
@@ -17,7 +15,3 @@ class User(AbstractUser):
max_digits=AMOUNT_MAX_DIGITS,
decimal_places=AMOUNT_DECIMALS,
validators=[MinValueValidator(0)])
-
- @property
- def balance(self):
- return get_balance_for_user(self)
diff --git a/uncloud_auth/serializers.py b/uncloud_auth/serializers.py
new file mode 100644
index 0000000..c3f6694
--- /dev/null
+++ b/uncloud_auth/serializers.py
@@ -0,0 +1,72 @@
+from django.contrib.auth import get_user_model
+from django.db import transaction
+from ldap3.core.exceptions import LDAPEntryAlreadyExistsResult
+from rest_framework import serializers
+
+from uncloud import AMOUNT_DECIMALS, AMOUNT_MAX_DIGITS
+from uncloud_pay.models import BillingAddress
+
+from .ungleich_ldap import LdapManager
+
+
+class UserSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = get_user_model()
+ read_only_fields = [ 'username', 'balance', 'maximum_credit' ]
+ fields = read_only_fields + [ 'email' ] # , 'primary_billing_address' ]
+
+ def validate(self, data):
+ """
+ Ensure that the primary billing address belongs to the user
+ """
+ # The following is raising exceptions probably, it is WIP somewhere
+ # if 'primary_billing_address' in data:
+ # if not data['primary_billing_address'].owner == self.instance:
+ # raise serializers.ValidationError('Invalid data')
+
+ return data
+
+ def update(self, instance, validated_data):
+ ldap_manager = LdapManager()
+ return_val, _ = ldap_manager.change_user_details(
+ instance.username, {'mail': validated_data.get('email')}
+ )
+ if not return_val:
+ raise serializers.ValidationError('Couldn\'t update email')
+ instance.email = validated_data.get('email')
+ instance.save()
+ return instance
+
+
+class UserRegistrationSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = get_user_model()
+ fields = ['username', 'first_name', 'last_name', 'email', 'password']
+ extra_kwargs = {
+ 'password': {'style': {'input_type': 'password'}},
+ 'first_name': {'allow_blank': False, 'required': True},
+ 'last_name': {'allow_blank': False, 'required': True},
+ 'email': {'allow_blank': False, 'required': True},
+ }
+
+ def create(self, validated_data):
+ ldap_manager = LdapManager()
+ try:
+ data = {
+ 'user': validated_data['username'],
+ 'password': validated_data['password'],
+ 'email': validated_data['email'],
+ 'firstname': validated_data['first_name'],
+ 'lastname': validated_data['last_name'],
+ }
+ ldap_manager.create_user(**data)
+ except LDAPEntryAlreadyExistsResult:
+ raise serializers.ValidationError(
+ {'username': ['A user with that username already exists.']}
+ )
+ else:
+ return get_user_model().objects.create_user(**validated_data)
+
+
+class ImportUserSerializer(serializers.Serializer):
+ username = serializers.CharField()
diff --git a/uncloud_auth/templates/uncloud_auth/login.html b/uncloud_auth/templates/uncloud_auth/login.html
new file mode 100644
index 0000000..04f9a15
--- /dev/null
+++ b/uncloud_auth/templates/uncloud_auth/login.html
@@ -0,0 +1,13 @@
+{% extends 'uncloud/base.html' %}
+
+{% block body %}
+
+
+
+
+
+{% endblock %}
diff --git a/uncloud_auth/uldap.py b/uncloud_auth/uldap.py
new file mode 100644
index 0000000..aa90c77
--- /dev/null
+++ b/uncloud_auth/uldap.py
@@ -0,0 +1,42 @@
+import ldap
+# from django.conf import settings
+
+AUTH_LDAP_SERVER_URI = "ldaps://ldap1.ungleich.ch,ldaps://ldap2.ungleich.ch"
+AUTH_LDAP_BIND_DN="uid=django-create,ou=system,dc=ungleich,dc=ch"
+AUTH_LDAP_BIND_PASSWORD="kS#e+v\zjKn]L!,RIu2}V+DUS"
+# AUTH_LDAP_USER_SEARCH = LDAPSearch("dc=ungleich,dc=ch",
+# ldap.SCOPE_SUBTREE,
+# "(uid=%(user)s)")
+
+
+
+ldap_object = ldap.initialize(AUTH_LDAP_SERVER_URI)
+cancelid = ldap_object.bind(AUTH_LDAP_BIND_DN, AUTH_LDAP_BIND_PASSWORD)
+
+res = ldap_object.search_s("dc=ungleich,dc=ch", ldap.SCOPE_SUBTREE, "(uid=nico)")
+print(res)
+
+# class LDAP(object):
+# """
+# Managing users in LDAP
+
+# Requires the following settings?
+
+# LDAP_USER_DN: where to create users in the tree
+
+# LDAP_ADMIN_DN: which DN to use for managing users
+# LDAP_ADMIN_PASSWORD: which password to used
+
+# This module will reuse information from djagno_auth_ldap, including:
+
+# AUTH_LDAP_SERVER_URI
+
+# """
+# def __init__(self):
+# pass
+
+# def create_user(self):
+# pass
+
+# def change_password(self):
+# pass
diff --git a/uncloud_auth/ungleich_ldap.py b/uncloud_auth/ungleich_ldap.py
new file mode 100644
index 0000000..f22b423
--- /dev/null
+++ b/uncloud_auth/ungleich_ldap.py
@@ -0,0 +1,284 @@
+import base64
+import hashlib
+import logging
+import random
+
+import ldap3
+from django.conf import settings
+
+logger = logging.getLogger(__name__)
+
+
+class LdapManager:
+ __instance = None
+ def __new__(cls):
+ if LdapManager.__instance is None:
+ LdapManager.__instance = object.__new__(cls)
+ return LdapManager.__instance
+
+ def __init__(self):
+ """
+ Initialize the LDAP subsystem.
+ """
+ self.rng = random.SystemRandom()
+ self.server = ldap3.Server(settings.AUTH_LDAP_SERVER)
+
+
+ def get_admin_conn(self):
+ """
+ Return a bound :class:`ldap3.Connection` instance which has write
+ permissions on the dn in which the user accounts reside.
+ """
+ conn = self.get_conn(user=settings.LDAP_ADMIN_DN,
+ password=settings.LDAP_ADMIN_PASSWORD,
+ raise_exceptions=True)
+ conn.bind()
+ return conn
+
+
+ def get_conn(self, **kwargs):
+ """
+ Return an unbound :class:`ldap3.Connection` which talks to the configured
+ LDAP server.
+
+ The *kwargs* are passed to the constructor of :class:`ldap3.Connection` and
+ can be used to set *user*, *password* and other useful arguments.
+ """
+ return ldap3.Connection(self.server, **kwargs)
+
+
+ def _ssha_password(self, password):
+ """
+ Apply the SSHA password hashing scheme to the given *password*.
+ *password* must be a :class:`bytes` object, containing the utf-8
+ encoded password.
+
+ Return a :class:`bytes` object containing ``ascii``-compatible data
+ which can be used as LDAP value, e.g. after armoring it once more using
+ base64 or decoding it to unicode from ``ascii``.
+ """
+ SALT_BYTES = 15
+
+ sha1 = hashlib.sha1()
+ salt = self.rng.getrandbits(SALT_BYTES * 8).to_bytes(SALT_BYTES,
+ "little")
+ sha1.update(password)
+ sha1.update(salt)
+
+ digest = sha1.digest()
+ passwd = b"{SSHA}" + base64.b64encode(digest + salt)
+ return passwd
+
+
+ def create_user(self, user, password, firstname, lastname, email):
+ conn = self.get_admin_conn()
+ uidNumber = self._get_max_uid() + 1
+ logger.debug("uidNumber={uidNumber}".format(uidNumber=uidNumber))
+ user_exists = True
+ while user_exists:
+ user_exists, _ = self.check_user_exists(
+ "",
+ '(&(objectClass=inetOrgPerson)(objectClass=posixAccount)'
+ '(objectClass=top)(uidNumber={uidNumber}))'.format(
+ uidNumber=uidNumber
+ )
+ )
+ if user_exists:
+ logger.debug(
+ "{uid} exists. Trying next.".format(uid=uidNumber)
+ )
+ uidNumber += 1
+ logger.debug("{uid} does not exist. Using it".format(uid=uidNumber))
+ self._set_max_uid(uidNumber)
+ try:
+ uid = user # user.encode("utf-8")
+ conn.add("uid={uid},{customer_dn}".format(
+ uid=uid, customer_dn=settings.LDAP_CUSTOMER_DN
+ ),
+ ["inetOrgPerson", "posixAccount", "ldapPublickey"],
+ {
+ "uid": [uid],
+ "sn": [lastname.encode("utf-8")],
+ "givenName": [firstname.encode("utf-8")],
+ "cn": [uid],
+ "displayName": ["{} {}".format(firstname, lastname).encode("utf-8")],
+ "uidNumber": [str(uidNumber)],
+ "gidNumber": [str(settings.LDAP_CUSTOMER_GROUP_ID)],
+ "loginShell": ["/bin/bash"],
+ "homeDirectory": ["/home/{}".format(user).encode("utf-8")],
+ "mail": email.encode("utf-8"),
+ "userPassword": [self._ssha_password(
+ password.encode("utf-8")
+ )]
+ }
+ )
+ logger.debug('Created user %s %s' % (user.encode('utf-8'),
+ uidNumber))
+ except Exception as ex:
+ logger.debug('Could not create user %s' % user.encode('utf-8'))
+ logger.error("Exception: " + str(ex))
+ raise
+ finally:
+ conn.unbind()
+
+
+ def change_password(self, uid, new_password):
+ """
+ Changes the password of the user identified by user_dn
+
+ :param uid: str The uid that identifies the user
+ :param new_password: str The new password string
+ :return: True if password was changed successfully False otherwise
+ """
+ conn = self.get_admin_conn()
+
+ # Make sure the user exists first to change his/her details
+ user_exists, entries = self.check_user_exists(
+ uid=uid,
+ search_base=settings.ENTIRE_SEARCH_BASE
+ )
+ return_val = False
+ if user_exists:
+ try:
+ return_val = conn.modify(
+ entries[0].entry_dn,
+ {
+ "userpassword": (
+ ldap3.MODIFY_REPLACE,
+ [self._ssha_password(new_password.encode("utf-8"))]
+ )
+ }
+ )
+ except Exception as ex:
+ logger.error("Exception: " + str(ex))
+ else:
+ logger.error("User {} not found".format(uid))
+
+ conn.unbind()
+ return return_val
+
+ def change_user_details(self, uid, details):
+ """
+ Updates the user details as per given values in kwargs of the user
+ identified by user_dn.
+
+ Assumes that all attributes passed in kwargs are valid.
+
+ :param uid: str The uid that identifies the user
+ :param details: dict A dictionary containing the new values
+ :return: True if user details were updated successfully False otherwise
+ """
+ conn = self.get_admin_conn()
+
+ # Make sure the user exists first to change his/her details
+ user_exists, entries = self.check_user_exists(
+ uid=uid,
+ search_base=settings.ENTIRE_SEARCH_BASE
+ )
+
+ return_val = False
+ if user_exists:
+ details_dict = {k: (ldap3.MODIFY_REPLACE, [v.encode("utf-8")]) for
+ k, v in details.items()}
+ try:
+ return_val = conn.modify(entries[0].entry_dn, details_dict)
+ msg = "success"
+ except Exception as ex:
+ msg = str(ex)
+ logger.error("Exception: " + msg)
+ finally:
+ conn.unbind()
+ else:
+ msg = "User {} not found".format(uid)
+ logger.error(msg)
+ conn.unbind()
+ return return_val, msg
+
+ def check_user_exists(self, uid, search_filter="", attributes=None,
+ search_base=settings.LDAP_CUSTOMER_DN):
+ """
+ Check if the user with the given uid exists in the customer group.
+
+ :param uid: str representing the user
+ :param search_filter: str representing the filter condition to find
+ users. If its empty, the search finds the user with
+ the given uid.
+ :param attributes: list A list of str representing all the attributes
+ to be obtained in the result entries
+ :param search_base: str
+ :return: tuple (bool, [ldap3.abstract.entry.Entry ..])
+ A bool indicating if the user exists
+ A list of all entries obtained in the search
+ """
+ conn = self.get_admin_conn()
+ entries = []
+ try:
+ result = conn.search(
+ search_base=search_base,
+ search_filter=search_filter if len(search_filter)> 0 else
+ '(uid={uid})'.format(uid=uid),
+ attributes=attributes
+ )
+ entries = conn.entries
+ finally:
+ conn.unbind()
+ return result, entries
+
+ def delete_user(self, uid):
+ """
+ Deletes the user with the given uid from ldap
+
+ :param uid: str representing the user
+ :return: True if the delete was successful False otherwise
+ """
+ conn = self.get_admin_conn()
+ try:
+ return_val = conn.delete(
+ ("uid={uid}," + settings.LDAP_CUSTOMER_DN).format(uid=uid),
+ )
+ msg = "success"
+ except Exception as ex:
+ msg = str(ex)
+ logger.error("Exception: " + msg)
+ return_val = False
+ finally:
+ conn.unbind()
+ return return_val, msg
+
+ def _set_max_uid(self, max_uid):
+ """
+ a utility function to save max_uid value to a file
+
+ :param max_uid: an integer representing the max uid
+ :return:
+ """
+ with open(settings.LDAP_MAX_UID_FILE_PATH, 'w+') as handler:
+ handler.write(str(max_uid))
+
+ def _get_max_uid(self):
+ """
+ A utility function to read the max uid value that was previously set
+
+ :return: An integer representing the max uid value that was previously
+ set
+ """
+ try:
+ with open(settings.LDAP_MAX_UID_FILE_PATH, 'r+') as handler:
+ try:
+ return_value = int(handler.read())
+ except ValueError as ve:
+ logger.error(
+ "Error reading int value from {}. {}"
+ "Returning default value {} instead".format(
+ settings.LDAP_MAX_UID_PATH,
+ str(ve),
+ settings.LDAP_DEFAULT_START_UID
+ )
+ )
+ return_value = settings.LDAP_DEFAULT_START_UID
+ return return_value
+ except FileNotFoundError as fnfe:
+ logger.error("File not found : " + str(fnfe))
+ return_value = settings.LDAP_DEFAULT_START_UID
+ logger.error("So, returning UID={}".format(return_value))
+ return return_value
diff --git a/uncloud_auth/views.py b/uncloud_auth/views.py
new file mode 100644
index 0000000..9310a4c
--- /dev/null
+++ b/uncloud_auth/views.py
@@ -0,0 +1,77 @@
+from django.contrib.auth import views as auth_views
+from django.contrib.auth import logout
+
+from django_auth_ldap.backend import LDAPBackend
+from rest_framework import mixins, permissions, status, viewsets
+from rest_framework.decorators import action
+from rest_framework.response import Response
+
+from .serializers import *
+
+
+class LoginView(auth_views.LoginView):
+ template_name = 'uncloud_auth/login.html'
+
+class LogoutView(auth_views.LogoutView):
+ pass
+# template_name = 'uncloud_auth/logo.html'
+
+
+class UserViewSet(viewsets.GenericViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+ serializer_class = UserSerializer
+
+ def get_queryset(self):
+ return self.request.user
+
+ def list(self, request, format=None):
+ # This is a bit stupid: we have a user, we create a queryset by
+ # matching on the username. But I don't know a "nicer" way.
+ # Nico, 2020-03-18
+ user = request.user
+ serializer = self.get_serializer(user, context = {'request': request})
+ return Response(serializer.data)
+
+ @action(detail=False, methods=['post'])
+ def change_email(self, request):
+ serializer = self.get_serializer(
+ request.user, data=request.data, context={'request': request}
+ )
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ return Response(serializer.data)
+
+
+class AccountManagementViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
+ serializer_class = UserRegistrationSerializer
+
+ def create(self, request, *args, **kwargs):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ self.perform_create(serializer)
+ headers = self.get_success_headers(serializer.data)
+ return Response(
+ serializer.data, status=status.HTTP_201_CREATED, headers=headers
+ )
+
+
+class AdminUserViewSet(viewsets.ReadOnlyModelViewSet):
+ permission_classes = [permissions.IsAdminUser]
+
+ def get_serializer_class(self):
+ if self.action == 'import_from_ldap':
+ return ImportUserSerializer
+ else:
+ return UserSerializer
+
+ def get_queryset(self):
+ return get_user_model().objects.all()
+
+ @action(detail=False, methods=['post'], url_path='import_from_ldap')
+ def import_from_ldap(self, request, pk=None):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ ldap_username = serializer.validated_data.pop("username")
+ user = LDAPBackend().populate_user(ldap_username)
+
+ return Response(UserSerializer(user, context = {'request': request}).data)
diff --git a/uncloud_net/__init__.py b/uncloud_net/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud_net/admin.py b/uncloud_net/admin.py
new file mode 100644
index 0000000..ca6aaa1
--- /dev/null
+++ b/uncloud_net/admin.py
@@ -0,0 +1,7 @@
+from django.contrib import admin
+
+from .models import *
+
+
+for m in [ ReverseDNSEntry, WireGuardVPNPool, WireGuardVPN ]:
+ admin.site.register(m)
diff --git a/uncloud/uncloud_net/apps.py b/uncloud_net/apps.py
similarity index 100%
rename from uncloud/uncloud_net/apps.py
rename to uncloud_net/apps.py
diff --git a/uncloud_net/forms.py b/uncloud_net/forms.py
new file mode 100644
index 0000000..ad4e013
--- /dev/null
+++ b/uncloud_net/forms.py
@@ -0,0 +1,11 @@
+from django import forms
+
+from .models import *
+from .selectors import *
+
+class WireGuardVPNForm(forms.ModelForm):
+ network_size = forms.ChoiceField(choices=allowed_vpn_network_reservation_size)
+
+ class Meta:
+ model = WireGuardVPN
+ fields = [ "wireguard_public_key" ]
diff --git a/uncloud_net/management/commands/vpn.py b/uncloud_net/management/commands/vpn.py
new file mode 100644
index 0000000..9fdc80d
--- /dev/null
+++ b/uncloud_net/management/commands/vpn.py
@@ -0,0 +1,44 @@
+import sys
+from datetime import datetime
+
+from django.core.management.base import BaseCommand
+
+from django.contrib.auth import get_user_model
+
+from opennebula.models import VM as VMModel
+from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct, VMCluster
+
+import logging
+log = logging.getLogger(__name__)
+
+
+
+peer_template="""
+# {username}
+[Peer]
+PublicKey = {public_key}
+AllowedIPs = {vpnnetwork}
+"""
+
+class Command(BaseCommand):
+ help = 'General uncloud commands'
+
+ def add_arguments(self, parser):
+ parser.add_argument('--hostname',
+ action='store_true',
+ help='Name of this VPN Host',
+ required=True)
+
+ def handle(self, *args, **options):
+ if options['bootstrap']:
+ self.bootstrap()
+
+ self.create_vpn_config(options['hostname'])
+
+ def create_vpn_config(self, hostname):
+ configs = []
+
+ for pool in VPNPool.objects.filter(vpn_hostname=hostname):
+ configs.append(pool_config)
+
+ print(configs)
diff --git a/uncloud_net/migrations/0001_initial.py b/uncloud_net/migrations/0001_initial.py
new file mode 100644
index 0000000..6794156
--- /dev/null
+++ b/uncloud_net/migrations/0001_initial.py
@@ -0,0 +1,62 @@
+# Generated by Django 3.1 on 2020-12-13 13:42
+
+from django.conf import settings
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='MACAdress',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='WireGuardVPNPool',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('network', models.GenericIPAddressField(unique=True)),
+ ('network_mask', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(128)])),
+ ('subnetwork_mask', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(128)])),
+ ('vpn_server_hostname', models.CharField(max_length=256)),
+ ('wireguard_private_key', models.CharField(max_length=48)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='WireGuardVPNFreeLeases',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('pool_index', models.IntegerField(unique=True)),
+ ('vpnpool', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_net.wireguardvpnpool')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='WireGuardVPN',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('pool_index', models.IntegerField(unique=True)),
+ ('wireguard_public_key', models.CharField(max_length=48)),
+ ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ('vpnpool', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_net.wireguardvpnpool')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ReverseDNSEntry',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('ip_address', models.GenericIPAddressField(unique=True)),
+ ('name', models.CharField(max_length=253)),
+ ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ],
+ ),
+ ]
diff --git a/uncloud_net/migrations/0002_wireguardvpnpool_wireguard_public_key.py b/uncloud_net/migrations/0002_wireguardvpnpool_wireguard_public_key.py
new file mode 100644
index 0000000..479aba1
--- /dev/null
+++ b/uncloud_net/migrations/0002_wireguardvpnpool_wireguard_public_key.py
@@ -0,0 +1,19 @@
+# Generated by Django 3.1 on 2020-12-13 17:04
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_net', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='wireguardvpnpool',
+ name='wireguard_public_key',
+ field=models.CharField(default='', max_length=48),
+ preserve_default=False,
+ ),
+ ]
diff --git a/uncloud_net/migrations/0003_wireguardvpnpool_wg_name.py b/uncloud_net/migrations/0003_wireguardvpnpool_wg_name.py
new file mode 100644
index 0000000..9ecf52c
--- /dev/null
+++ b/uncloud_net/migrations/0003_wireguardvpnpool_wg_name.py
@@ -0,0 +1,19 @@
+# Generated by Django 3.1 on 2020-12-13 17:31
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_net', '0002_wireguardvpnpool_wireguard_public_key'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='wireguardvpnpool',
+ name='wg_name',
+ field=models.CharField(default='wg0', max_length=15),
+ preserve_default=False,
+ ),
+ ]
diff --git a/uncloud_net/migrations/0004_auto_20201213_1734.py b/uncloud_net/migrations/0004_auto_20201213_1734.py
new file mode 100644
index 0000000..24e46e7
--- /dev/null
+++ b/uncloud_net/migrations/0004_auto_20201213_1734.py
@@ -0,0 +1,17 @@
+# Generated by Django 3.1 on 2020-12-13 17:34
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_net', '0003_wireguardvpnpool_wg_name'),
+ ]
+
+ operations = [
+ migrations.AddConstraint(
+ model_name='wireguardvpnpool',
+ constraint=models.UniqueConstraint(fields=('wg_name', 'vpn_server_hostname'), name='unique_interface_name_per_host'),
+ ),
+ ]
diff --git a/uncloud_net/migrations/0005_auto_20201220_1837.py b/uncloud_net/migrations/0005_auto_20201220_1837.py
new file mode 100644
index 0000000..1dbabe6
--- /dev/null
+++ b/uncloud_net/migrations/0005_auto_20201220_1837.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.1 on 2020-12-20 18:37
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_net', '0004_auto_20201213_1734'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='wireguardvpn',
+ name='wireguard_public_key',
+ field=models.CharField(max_length=48, unique=True),
+ ),
+ ]
diff --git a/uncloud_net/migrations/0006_auto_20201224_1626.py b/uncloud_net/migrations/0006_auto_20201224_1626.py
new file mode 100644
index 0000000..c0dd2ef
--- /dev/null
+++ b/uncloud_net/migrations/0006_auto_20201224_1626.py
@@ -0,0 +1,17 @@
+# Generated by Django 3.1 on 2020-12-24 16:26
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_net', '0005_auto_20201220_1837'),
+ ]
+
+ operations = [
+ migrations.AddConstraint(
+ model_name='wireguardvpn',
+ constraint=models.UniqueConstraint(fields=('vpnpool', 'wireguard_public_key'), name='wg_key_unique_per_pool'),
+ ),
+ ]
diff --git a/uncloud_net/migrations/__init__.py b/uncloud_net/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud_net/models.py b/uncloud_net/models.py
new file mode 100644
index 0000000..0c8b02a
--- /dev/null
+++ b/uncloud_net/models.py
@@ -0,0 +1,192 @@
+import uuid
+import ipaddress
+
+from django.db import models
+from django.contrib.auth import get_user_model
+from django.core.validators import MinValueValidator, MaxValueValidator
+from django.core.exceptions import FieldError, ValidationError
+
+from uncloud_pay.models import Order
+
+class WireGuardVPNPool(models.Model):
+ """
+ Network address pools from which VPNs can be created
+ """
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(fields=['wg_name', 'vpn_server_hostname' ],
+ name='unique_interface_name_per_host')
+ ]
+
+
+ # Linux interface naming is restricing to max 15 characters
+ wg_name = models.CharField(max_length=15)
+
+ network = models.GenericIPAddressField(unique=True)
+ network_mask = models.IntegerField(validators=[MinValueValidator(0),
+ MaxValueValidator(128)])
+
+ subnetwork_mask = models.IntegerField(validators=[
+ MinValueValidator(0),
+ MaxValueValidator(128)
+ ])
+
+ vpn_server_hostname = models.CharField(max_length=256)
+ wireguard_private_key = models.CharField(max_length=48)
+ wireguard_public_key = models.CharField(max_length=48)
+
+ @property
+ def max_pool_index(self):
+ """
+ Return the highest possible network / last network id
+ """
+
+ bits = self.subnetwork_mask - self.network_mask
+
+ return (2**bits)-1
+
+ @property
+ def ip_network(self):
+ return ipaddress.ip_network(f"{self.network}/{self.network_mask}")
+
+ def __str__(self):
+ return f"{self.ip_network} (subnets: /{self.subnetwork_mask})"
+
+ @property
+ def wireguard_config(self):
+ wireguard_config = [ f"[Interface]\nListenPort = 51820\nPrivateKey = {self.wireguard_private_key}\n" ]
+
+ peers = []
+
+ for vpn in self.wireguardvpn_set.all():
+ public_key = vpn.wireguard_public_key
+ peer_network = f"{vpn.address}/{self.subnetwork_mask}"
+ owner = vpn.owner
+
+ peers.append(f"# Owner: {owner}\n[Peer]\nPublicKey = {public_key}\nAllowedIPs = {peer_network}\n\n")
+
+ wireguard_config.extend(peers)
+
+ return "\n".join(wireguard_config)
+
+
+class WireGuardVPN(models.Model):
+ """
+ Created VPNNetworks
+ """
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE)
+ vpnpool = models.ForeignKey(WireGuardVPNPool,
+ on_delete=models.CASCADE)
+
+ pool_index = models.IntegerField(unique=True)
+
+ wireguard_public_key = models.CharField(max_length=48, unique=True)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(fields=['vpnpool', 'wireguard_public_key'],
+ name='wg_key_unique_per_pool')
+ ]
+
+
+ @property
+ def network_mask(self):
+ return self.vpnpool.subnetwork_mask
+
+ @property
+ def vpn_server(self):
+ return self.vpnpool.vpn_server_hostname
+
+ @property
+ def vpn_server_public_key(self):
+ return self.vpnpool.wireguard_public_key
+
+ @property
+ def address(self):
+ """
+ Locate the correct subnet in the supernet
+
+ First get the network itself
+
+ """
+
+ net = self.vpnpool.ip_network
+ subnet = net[(2**(128-self.vpnpool.subnetwork_mask)) * self.pool_index]
+
+ return str(subnet)
+
+ def __str__(self):
+ return f"{self.address} ({self.pool_index})"
+
+
+class WireGuardVPNFreeLeases(models.Model):
+ """
+ Previously used VPNNetworks
+ """
+ vpnpool = models.ForeignKey(WireGuardVPNPool,
+ on_delete=models.CASCADE)
+
+ pool_index = models.IntegerField(unique=True)
+
+################################################################################
+
+class MACAdress(models.Model):
+ default_prefix = 0x420000000000
+
+
+class ReverseDNSEntry(models.Model):
+ """
+ A reverse DNS entry
+ """
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE)
+
+ ip_address = models.GenericIPAddressField(null=False, unique=True)
+
+ name = models.CharField(max_length=253, null=False)
+
+ @property
+ def reverse_pointer(self):
+ return ipaddress.ip_address(self.ip_address).reverse_pointer
+
+ def implement(self):
+ """
+ The implement function implements the change
+ """
+
+ # Get all DNS entries (?) / update this DNS entry
+ # convert to DNS name
+ #
+ pass
+
+
+ def save(self, *args, **kwargs):
+ # Product.objects.filter(config__parameters__contains='reverse_dns_network')
+ # FIXME: check if order is still active / not replaced
+
+ allowed = False
+ product = None
+
+ for order in Order.objects.filter(config__parameters__reverse_dns_network__isnull=False,
+ owner=self.owner):
+ network = order.config['parameters']['reverse_dns_network']
+
+ net = ipaddress.ip_network(network)
+ addr = ipaddress.ip_address(self.ip_address)
+
+ if addr in net:
+ allowed = True
+ product = order.product
+ break
+
+
+ if not allowed:
+ raise ValidationError(f"User {self.owner} does not have the right to create reverse DNS entry for {self.ip_address}")
+
+ super().save(*args, **kwargs)
+
+
+ def __str__(self):
+ return f"{self.ip_address} - {self.name}"
diff --git a/uncloud_net/selectors.py b/uncloud_net/selectors.py
new file mode 100644
index 0000000..6e12e8b
--- /dev/null
+++ b/uncloud_net/selectors.py
@@ -0,0 +1,43 @@
+from django.db import transaction
+from django.db.models import Count, F
+from .models import *
+
+def get_suitable_pools(subnetwork_mask):
+ """
+ Find suitable pools for a certain network size.
+
+ First, filter for all pools that offer the requested subnetwork_size.
+
+ Then find those pools that are not fully exhausted:
+
+ The number of available networks in a pool is 2^(subnetwork_size-network_size.
+
+ The number of available networks in a pool is given by the number of VPNNetworkreservations.
+
+ """
+
+ return WireGuardVPNPool.objects.annotate(
+ num_reservations=Count('wireguardvpn'),
+ max_reservations=2**(F('subnetwork_mask')-F('network_mask'))).filter(
+ num_reservations__lt=F('max_reservations'),
+ subnetwork_mask=subnetwork_mask)
+
+
+def allowed_vpn_network_reservation_size():
+ """
+ Find all possible sizes of subnetworks that are available.
+
+ Select all pools with free networks.
+
+ Get their subnetwork sizes, reduce to a set
+
+ """
+
+ pools = WireGuardVPNPool.objects.annotate(num_reservations=Count('wireguardvpn'),
+ max_reservations=2**(F('subnetwork_mask')-F('network_mask'))).filter(
+ num_reservations__lt=F('max_reservations'))
+
+ # Need to return set of tuples, see
+ # https://docs.djangoproject.com/en/3.1/ref/models/fields/#field-choices
+# return set([ (pool.subnetwork_mask, pool.subnetwork_mask) for pool in pools ])
+ return set([pool.subnetwork_mask for pool in pools ])
diff --git a/uncloud_net/serializers.py b/uncloud_net/serializers.py
new file mode 100644
index 0000000..09baa59
--- /dev/null
+++ b/uncloud_net/serializers.py
@@ -0,0 +1,57 @@
+import base64
+
+from django.contrib.auth import get_user_model
+from django.utils.translation import gettext_lazy as _
+from rest_framework import serializers
+
+from .models import *
+from .services import *
+from .selectors import *
+
+
+class WireGuardVPNSerializer(serializers.ModelSerializer):
+ address = serializers.CharField(read_only=True)
+ vpn_server = serializers.CharField(read_only=True)
+ vpn_server_public_key = serializers.CharField(read_only=True)
+ network_mask = serializers.IntegerField()
+
+ class Meta:
+ model = WireGuardVPN
+ fields = [ 'wireguard_public_key', 'address', 'network_mask', 'vpn_server',
+ 'vpn_server_public_key' ]
+
+ extra_kwargs = {
+ 'network_mask': {'write_only': True }
+ }
+
+
+ def validate_network_mask(self, value):
+ msg = _(f"No pool for network size {value}")
+ sizes = allowed_vpn_network_reservation_size()
+
+ if not value in sizes:
+ raise serializers.ValidationError(msg)
+
+ return value
+
+ def validate_wireguard_public_key(self, value):
+ msg = _("Supplied key is not a valid wireguard public key")
+
+ """
+ Verify wireguard key.
+ See https://lists.zx2c4.com/pipermail/wireguard/2020-December/006221.html
+ """
+
+ try:
+ decoded_key = base64.standard_b64decode(value)
+ except Exception as e:
+ raise serializers.ValidationError(msg)
+
+ if not len(decoded_key) == 32:
+ raise serializers.ValidationError(msg)
+
+ return value
+
+
+class WireGuardVPNSizesSerializer(serializers.Serializer):
+ size = serializers.IntegerField(min_value=0, max_value=128)
diff --git a/uncloud_net/services.py b/uncloud_net/services.py
new file mode 100644
index 0000000..4f80c44
--- /dev/null
+++ b/uncloud_net/services.py
@@ -0,0 +1,47 @@
+from django.db import transaction
+
+from .models import *
+from .selectors import *
+from .tasks import *
+
+@transaction.atomic
+def create_wireguard_vpn(owner, public_key, network_mask):
+
+ pool = get_suitable_pools(network_mask)[0]
+ count = pool.wireguardvpn_set.count()
+
+ # Try re-using previously used networks first
+ try:
+ free_lease = WireGuardVPNFreeLeases.objects.get(vpnpool=pool)
+
+ vpn = WireGuardVPN.objects.create(owner=owner,
+ vpnpool=pool,
+ pool_index=free_lease.pool_index,
+ wireguard_public_key=public_key)
+
+ free_lease.delete()
+
+ except WireGuardVPNFreeLeases.DoesNotExist:
+
+ # First object
+ if count == 0:
+ vpn = WireGuardVPN.objects.create(owner=owner,
+ vpnpool=pool,
+ pool_index=0,
+ wireguard_public_key=public_key)
+
+ else: # Select last network and try +1 it
+ last_net = WireGuardVPN.objects.filter(vpnpool=pool).order_by('pool_index').last()
+
+ next_index = last_net.pool_index + 1
+
+ if next_index <= pool.max_pool_index:
+ vpn = WireGuardVPN.objects.create(owner=owner,
+ vpnpool=pool,
+ pool_index=next_index,
+ wireguard_public_key=public_key)
+
+
+
+ configure_wireguard_server(pool)
+ return vpn
diff --git a/uncloud_net/tasks.py b/uncloud_net/tasks.py
new file mode 100644
index 0000000..78ae80c
--- /dev/null
+++ b/uncloud_net/tasks.py
@@ -0,0 +1,60 @@
+from celery import shared_task
+from .models import *
+
+from uncloud.models import UncloudTask
+
+import os
+import subprocess
+import logging
+import uuid
+
+log = logging.getLogger(__name__)
+
+@shared_task
+def whereami():
+ print(os.uname())
+ return os.uname()
+
+def configure_wireguard_server(wireguardvpnpool):
+ """
+ - Create wireguard config (DB query -> string)
+ - Submit config to cdist worker
+ - Change config locally on worker / commit / shared
+
+ """
+
+ config = wireguardvpnpool.wireguard_config
+ server = wireguardvpnpool.vpn_server_hostname
+
+ log.info(f"Configuring VPN server {server} (async)")
+
+ task_id = uuid.UUID(cdist_configure_wireguard_server.apply_async((config, server)).id)
+ UncloudTask.objects.create(task_id=task_id)
+
+
+@shared_task
+def cdist_configure_wireguard_server(config, server):
+ """
+ Create config and configure server.
+
+ To be executed on the cdist workers.
+ """
+
+ dirname= "/home/app/.cdist/type/__ungleich_wireguard/files/"
+ fname = os.path.join(dirname,server)
+
+ log.info(f"Configuring VPN server {server} (on cdist host)")
+ with open(fname, "w") as fd:
+ fd.write(config)
+
+ log.debug("git committing wireguard changes")
+ subprocess.run(f"cd {dirname} && git pull && git add {server} && git commit -m 'Updating config for {server}' && git push",
+ shell=True, check=True)
+
+ log.debug(f"Configuring VPN server {server} with cdist")
+ subprocess.run(f"cdist config {server}", shell=True, check=True)
+
+ # FIXME:
+ # ensure logs are on the server
+ # ensure exit codes are known
+ return True
diff --git a/uncloud_net/templates/uncloud_net/wireguardvpn_form.html b/uncloud_net/templates/uncloud_net/wireguardvpn_form.html
new file mode 100644
index 0000000..1463f41
--- /dev/null
+++ b/uncloud_net/templates/uncloud_net/wireguardvpn_form.html
@@ -0,0 +1,25 @@
+{% extends 'uncloud/base.html' %}
+
+{% block body %}
+
+
+
+
+ Create a VPN Network
+
+ Create a new wireguard based VPN network.
+
+
+
+
+
+
+
+
+
+
+{% endblock %}
diff --git a/uncloud_net/tests.py b/uncloud_net/tests.py
new file mode 100644
index 0000000..4491551
--- /dev/null
+++ b/uncloud_net/tests.py
@@ -0,0 +1,102 @@
+from django.test import TestCase
+from rest_framework.test import APIRequestFactory, force_authenticate
+
+from rest_framework.reverse import reverse
+from django.contrib.auth import get_user_model
+from django.core.exceptions import ValidationError, FieldError
+
+from .views import *
+from .models import *
+
+from uncloud_pay.models import BillingAddress, Order
+from uncloud.models import UncloudNetwork
+
+class UncloudNetworkTests(TestCase):
+ def test_invalid_IPv4_network(self):
+ with self.assertRaises(FieldError):
+ UncloudNetwork.objects.create(network_address="192.168.1.0",
+ network_mask=33)
+
+class VPNTests(TestCase):
+ def setUp(self):
+ self.user = get_user_model().objects.create_user('django-test-user', 'noreply@ungleich.ch')
+ self.admin_user = get_user_model().objects.create_user('django-test-adminuser',
+ 'noreply-admin@ungleich.ch')
+
+
+
+ self.admin_user.is_staff = True
+ self.admin_user.save()
+
+ self.pool_network = '2001:db8::'
+ self.pool_network2 = '2001:db8:1::'
+ self.pool_network_size = '48'
+ self.pool_subnetwork_size = '64'
+ self.pool_vpn_hostname = 'vpn.example.org'
+ self.pool_wireguard_private_key = 'MOz8kk0m4jhNtAXlge0qzexZh1MipIhu4HJwtdvZ2EY='
+
+ self.vpn_wireguard_public_key = 'B2b78eWBIXPMM1x4DDjkCDZepS0qDgcLN3T3PjcgXkY='
+
+ self.vpnpool = VPNPool.objects.get_or_create(network=self.pool_network,
+ network_size=self.pool_network_size,
+ subnetwork_size=self.pool_subnetwork_size,
+ vpn_hostname=self.pool_vpn_hostname,
+ wireguard_private_key=self.pool_wireguard_private_key
+ )
+
+ self.factory = APIRequestFactory()
+
+
+ def test_create_vpnpool(self):
+ url = reverse("vpnpool-list")
+ view = VPNPoolViewSet.as_view({'post': 'create'})
+ request = self.factory.post(url, { 'network': self.pool_network2,
+ 'network_size': self.pool_network_size,
+ 'subnetwork_size': self.pool_subnetwork_size,
+ 'vpn_hostname': self.pool_vpn_hostname,
+ 'wireguard_private_key': self.pool_wireguard_private_key
+
+ })
+ force_authenticate(request, user=self.admin_user)
+ response = view(request)
+
+ # This raises an exception if the request was not successful
+ # No assert needed
+ pool = VPNPool.objects.get(network=self.pool_network2)
+
+ # def test_create_vpn(self):
+ # url = reverse("vpnnetwork-list")
+ # view = VPNNetworkViewSet.as_view({'post': 'create'})
+ # request = self.factory.post(url, { 'network_size': self.pool_subnetwork_size,
+ # 'wireguard_public_key': self.vpn_wireguard_public_key
+
+ # })
+ # force_authenticate(request, user=self.user)
+
+
+ # # we don't have a billing address -> should raise an error
+ # # with self.assertRaises(ValidationError):
+ # # response = view(request)
+
+ # addr = BillingAddress.objects.get_or_create(
+ # owner=self.user,
+ # active=True,
+ # defaults={'organization': 'ungleich',
+ # 'name': 'Nico Schottelius',
+ # 'street': 'Hauptstrasse 14',
+ # 'city': 'Luchsingen',
+ # 'postal_code': '8775',
+ # 'country': 'CH' }
+ # )
+
+ # # This should work now
+ # response = view(request)
+
+ # # Verify that an order was created successfully - there should only be one order at
+ # # this point in time
+ # order = Order.objects.get(owner=self.user)
+
+
+ def tearDown(self):
+ self.user.delete()
+ self.admin_user.delete()
diff --git a/uncloud_net/views.py b/uncloud_net/views.py
new file mode 100644
index 0000000..77ba952
--- /dev/null
+++ b/uncloud_net/views.py
@@ -0,0 +1,70 @@
+from django.views.generic.edit import CreateView
+from django.contrib.auth.mixins import LoginRequiredMixin
+from django.contrib.messages.views import SuccessMessageMixin
+from rest_framework.response import Response
+
+from django.shortcuts import render
+
+from rest_framework import viewsets, permissions
+
+from .models import *
+from .serializers import *
+from .selectors import *
+from .services import *
+from .forms import *
+from .tasks import *
+
+class WireGuardVPNViewSet(viewsets.ModelViewSet):
+ serializer_class = WireGuardVPNSerializer
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ if self.request.user.is_superuser:
+ obj = WireGuardVPN.objects.all()
+ else:
+ obj = WireGuardVPN.objects.filter(owner=self.request.user)
+
+ return obj
+
+ def create(self, request):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+
+ vpn = create_wireguard_vpn(
+ owner=self.request.user,
+ public_key=serializer.validated_data['wireguard_public_key'],
+ network_mask=serializer.validated_data['network_mask']
+ )
+ configure_wireguard_server(vpn.vpnpool)
+ return Response(WireGuardVPNSerializer(vpn).data)
+
+
+class WireGuardVPNCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
+ model = WireGuardVPN
+
+ login_url = '/login/'
+ success_url = '/'
+ success_message = "%(network) was created successfully"
+
+ form_class = WireGuardVPNForm
+
+ def get_success_message(self, cleaned_data):
+ return self.success_message % dict(cleaned_data,
+ the_prefix = self.object.prefix)
+
+class WireGuardVPNSizes(viewsets.ViewSet):
+ def list(self, request):
+ sizes = allowed_vpn_network_reservation_size()
+ print(sizes)
+
+ sizes = [ { 'size': size } for size in sizes ]
+ print(sizes)
+
+ return Response(WireGuardVPNSizesSerializer(sizes, many=True).data)
+
+
+
+# class VPNPoolViewSet(viewsets.ModelViewSet):
+# serializer_class = VPNPoolSerializer
+# permission_classes = [permissions.IsAdminUser]
+# queryset = VPNPool.objects.all()
diff --git a/uncloud_pay/__init__.py b/uncloud_pay/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/uncloud_pay/__init__.py
@@ -0,0 +1 @@
+
diff --git a/uncloud_pay/admin.py b/uncloud_pay/admin.py
new file mode 100644
index 0000000..2c72274
--- /dev/null
+++ b/uncloud_pay/admin.py
@@ -0,0 +1,92 @@
+from django.contrib import admin
+from django.template.response import TemplateResponse
+from django.urls import path
+from django.shortcuts import render
+from django.conf.urls import url
+
+from uncloud_pay.views import BillViewSet
+from hardcopy import bytestring_to_pdf
+from django.core.files.temp import NamedTemporaryFile
+from django.http import FileResponse
+from django.template.loader import render_to_string
+
+
+from uncloud_pay.models import *
+
+
+class BillRecordInline(admin.TabularInline):
+ model = BillRecord
+
+class RecurringPeriodInline(admin.TabularInline):
+ model = ProductToRecurringPeriod
+
+class ProductAdmin(admin.ModelAdmin):
+ inlines = [ RecurringPeriodInline ]
+
+class BillAdmin(admin.ModelAdmin):
+ inlines = [ BillRecordInline ]
+
+ def get_urls(self):
+ """
+ Create URLs for PDF view
+ """
+
+ info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
+ pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
+
+ url_patterns = [
+ pat(r'^([0-9]+)/as_pdf/$', self.as_pdf),
+ pat(r'^([0-9]+)/as_html/$', self.as_html),
+ ] + super().get_urls()
+
+ return url_patterns
+
+ def as_pdf(self, request, object_id):
+ bill = self.get_object(request, object_id=object_id)
+ print(bill)
+
+ if bill is None:
+ raise self._get_404_exception(object_id)
+
+ output_file = NamedTemporaryFile()
+ bill_html = render_to_string("bill.html.j2", {'bill': bill,
+ 'bill_records': bill.billrecord_set.all()
+ })
+
+ bytestring_to_pdf(bill_html.encode('utf-8'), output_file)
+ response = FileResponse(output_file, content_type="application/pdf")
+ response['Content-Disposition'] = f'filename="bill_{bill}.pdf"'
+
+ return response
+
+ def as_html(self, request, object_id):
+ bill = self.get_object(request, object_id=object_id)
+
+ if bill is None:
+ raise self._get_404_exception(object_id)
+
+ return render(request, 'bill.html.j2',
+ {'bill': bill,
+ 'bill_records': bill.billrecord_set.all()
+ })
+
+
+ bill_html = render_to_string("bill.html.j2", {'bill': bill,
+ 'bill_records': bill.billrecord_set.all()
+ })
+
+ bytestring_to_pdf(bill_html.encode('utf-8'), output_file)
+ response = FileResponse(output_file, content_type="application/pdf")
+
+ response['Content-Disposition'] = f'filename="bill_{bill}.pdf"'
+
+ return HttpResponse(template.render(context, request))
+ return response
+
+
+admin.site.register(Bill, BillAdmin)
+admin.site.register(ProductToRecurringPeriod)
+admin.site.register(Product, ProductAdmin)
+
+for m in [ Order, BillRecord, BillingAddress, RecurringPeriod, VATRate, StripeCustomer ]:
+ admin.site.register(m)
diff --git a/uncloud/uncloud_pay/apps.py b/uncloud_pay/apps.py
similarity index 100%
rename from uncloud/uncloud_pay/apps.py
rename to uncloud_pay/apps.py
diff --git a/uncloud/uncloud_pay/helpers.py b/uncloud_pay/helpers.py
similarity index 100%
rename from uncloud/uncloud_pay/helpers.py
rename to uncloud_pay/helpers.py
diff --git a/uncloud_pay/management/commands/.gitignore b/uncloud_pay/management/commands/.gitignore
new file mode 100644
index 0000000..cf5c7fa
--- /dev/null
+++ b/uncloud_pay/management/commands/.gitignore
@@ -0,0 +1,2 @@
+# Customer tests
+customer-*.py
diff --git a/uncloud_pay/management/commands/add-opennebula-vm-orders.py b/uncloud_pay/management/commands/add-opennebula-vm-orders.py
new file mode 100644
index 0000000..e0b6758
--- /dev/null
+++ b/uncloud_pay/management/commands/add-opennebula-vm-orders.py
@@ -0,0 +1,152 @@
+import datetime
+import sys
+
+from django.contrib.auth import get_user_model
+from django.core.management.base import BaseCommand
+from django.utils import timezone
+
+from uncloud_pay.models import (
+ BillingAddress
+)
+from uncloud_vm.models import (
+ VMDiskType, VMProduct
+)
+
+
+def vm_price_2020(cpu=1, ram=2, v6only=False):
+ if v6only:
+ discount = 9
+ else:
+ discount = 0
+
+ return cpu * 3 + ram * 4 - discount
+
+
+def disk_price_2020(size_in_gb, disk_type):
+ if disk_type == VMDiskType.CEPH_SSD:
+ price = 3.5 / 10
+ elif disk_type == VMDiskType.CEPH_HDD:
+ price = 1.5 / 100
+ else:
+ raise Exception("not yet defined price")
+
+ return size_in_gb * price
+
+
+class Command(BaseCommand):
+ help = 'Adding VMs / creating orders for user'
+
+ def add_arguments(self, parser):
+ parser.add_argument('--username', type=str, required=True)
+
+ def handle(self, *args, **options):
+ user = get_user_model().objects.get(username=options['username'])
+
+ addr, created = BillingAddress.objects.get_or_create(
+ owner=user,
+ active=True,
+ defaults={'organization': 'Undefined organisation',
+ 'full_name': 'Undefined name',
+ 'street': 'Undefined Street',
+ 'city': 'Undefined city',
+ 'postal_code': '8750',
+ 'country': 'CH',
+ 'active': True
+ }
+ )
+
+ # 25206 + SSD
+ vm25206 = VMProduct.objects.create(name="one-25206", cores=1,
+ ram_in_gb=4, owner=user)
+ vm25206.create_order_at(
+ timezone.make_aware(datetime.datetime(2020, 3, 3)))
+
+ # vm25206_ssd = VMDiskProduct.objects.create(vm=vm25206, owner=user, size_in_gb=30)
+ # vm25206_ssd.create_order_at(timezone.make_aware(datetime.datetime(2020,3,3)))
+
+ # change 1
+ vm25206.cores = 2
+ vm25206.ram_in_gb = 8
+ vm25206.save()
+ vm25206.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 4, 17)))
+
+ sys.exit(0)
+
+ # change 2
+ # vm25206_ssd.size_in_gb = 50
+ # vm25206_ssd.save()
+ # vm25206_ssd.create_or_update_order(when_to_start=timezone.make_aware(datetime.datetime(2020,8,5)))
+
+ # 25206 done.
+
+ # 25615
+ vm25615 = VMProduct.objects.create(name="one-25615", cores=1,
+ ram_in_gb=4, owner=user)
+ vm25615.create_order_at(
+ timezone.make_aware(datetime.datetime(2020, 3, 3)))
+
+ # Change 2020-04-17
+ vm25615.cores = 2
+ vm25615.ram_in_gb = 8
+ vm25615.save()
+ vm25615.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 4, 17)))
+
+ # vm25615_ssd = VMDiskProduct(vm=vm25615, owner=user, size_in_gb=30)
+ # vm25615_ssd.create_order_at(timezone.make_aware(datetime.datetime(2020,3,3)))
+ # vm25615_ssd.save()
+
+ vm25208 = VMProduct.objects.create(name="one-25208", cores=1,
+ ram_in_gb=4, owner=user)
+ vm25208.create_order_at(
+ timezone.make_aware(datetime.datetime(2020, 3, 5)))
+
+ vm25208.cores = 2
+ vm25208.ram_in_gb = 8
+ vm25208.save()
+ vm25208.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 4, 17)))
+
+ Bill.create_next_bills_for_user(user, ending_date=end_of_month(
+ timezone.make_aware(datetime.datetime(2020, 7, 31))))
+
+ sys.exit(0)
+
+ vm25615_ssd.size_in_gb = 50
+ vm25615_ssd.save()
+ vm25615_ssd.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 8, 5)))
+
+ vm25208_ssd = VMDiskProduct.objects.create(vm=vm25208,
+ owner=user,
+ size_in_gb=30)
+
+ vm25208_ssd.size_in_gb = 50
+ vm25208_ssd.save()
+ vm25208_ssd.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 8, 5)))
+
+ # 25207
+ vm25207 = VMProduct.objects.create(name="OpenNebula 25207",
+ cores=1,
+ ram_in_gb=4,
+ owner=user)
+
+ vm25207_ssd = VMDiskProduct.objects.create(vm=vm25207,
+ owner=user,
+ size_in_gb=30)
+
+ vm25207_ssd.size_in_gb = 50
+ vm25207_ssd.save()
+ vm25207_ssd.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 8, 5)))
+
+ vm25207.cores = 2
+ vm25207.ram_in_gb = 8
+ vm25207.save()
+ vm25207.create_or_update_order(
+ when_to_start=timezone.make_aware(datetime.datetime(2020, 6, 19)))
+
+ # FIXES: check starting times (they are slightly different)
+ # add vm 25236
diff --git a/uncloud_pay/management/commands/bootstrap-user.py b/uncloud_pay/management/commands/bootstrap-user.py
new file mode 100644
index 0000000..b78e80c
--- /dev/null
+++ b/uncloud_pay/management/commands/bootstrap-user.py
@@ -0,0 +1,40 @@
+from django.core.management.base import BaseCommand
+from django.contrib.auth import get_user_model
+import datetime
+
+from uncloud_pay.models import *
+
+class Command(BaseCommand):
+ help = 'Bootstrap user (for testing)'
+
+ def add_arguments(self, parser):
+ parser.add_argument('--username', type=str, required=True)
+
+ def handle(self, *args, **options):
+ user = get_user_model().objects.get(username=options['username'])
+
+ addr = BillingAddress.objects.get_or_create(
+ owner=user,
+ active=True,
+ defaults={'organization': 'ungleich',
+ 'name': 'Nico Schottelius',
+ 'street': 'Hauptstrasse 14',
+ 'city': 'Luchsingen',
+ 'postal_code': '8775',
+ 'country': 'CH' }
+ )
+
+
+ bills = Bill.objects.filter(owner=user)
+
+ # not even one bill? create!
+ if bills:
+ bill = bills[0]
+ else:
+ bill = Bill.objects.create(owner=user)
+
+ # find any order that is associated to this bill
+ orders = Order.objects.filter(owner=user)
+
+ print(f"Addr: {addr}")
+ print(f"Bill: {bill}")
diff --git a/uncloud/uncloud_pay/management/commands/charge-negative-balance.py b/uncloud_pay/management/commands/charge-negative-balance.py
similarity index 95%
rename from uncloud/uncloud_pay/management/commands/charge-negative-balance.py
rename to uncloud_pay/management/commands/charge-negative-balance.py
index 24d53bf..8ee8736 100644
--- a/uncloud/uncloud_pay/management/commands/charge-negative-balance.py
+++ b/uncloud_pay/management/commands/charge-negative-balance.py
@@ -1,6 +1,6 @@
from django.core.management.base import BaseCommand
from uncloud_auth.models import User
-from uncloud_pay.models import Order, Bill, PaymentMethod, get_balance_for
+from uncloud_pay.models import Order, Bill, PaymentMethod, get_balance_for_user
from datetime import timedelta
from django.utils import timezone
@@ -15,7 +15,7 @@ class Command(BaseCommand):
users = User.objects.all()
print("Processing {} users.".format(users.count()))
for user in users:
- balance = get_balance_for(user)
+ balance = get_balance_for_user(user)
if balance < 0:
print("User {} has negative balance ({}), charging.".format(user.username, balance))
payment_method = PaymentMethod.get_primary_for(user)
diff --git a/uncloud/uncloud_pay/management/commands/generate-bills.py b/uncloud_pay/management/commands/generate-bills.py
similarity index 100%
rename from uncloud/uncloud_pay/management/commands/generate-bills.py
rename to uncloud_pay/management/commands/generate-bills.py
diff --git a/uncloud/uncloud_pay/management/commands/handle-overdue-bills.py b/uncloud_pay/management/commands/handle-overdue-bills.py
similarity index 100%
rename from uncloud/uncloud_pay/management/commands/handle-overdue-bills.py
rename to uncloud_pay/management/commands/handle-overdue-bills.py
diff --git a/uncloud_pay/management/commands/import-vat-rates.py b/uncloud_pay/management/commands/import-vat-rates.py
new file mode 100644
index 0000000..46848cd
--- /dev/null
+++ b/uncloud_pay/management/commands/import-vat-rates.py
@@ -0,0 +1,35 @@
+from django.core.management.base import BaseCommand
+from uncloud_pay.models import VATRate
+
+import urllib
+import csv
+import sys
+import io
+
+class Command(BaseCommand):
+ help = '''Imports VAT Rates. Assume vat rates of format https://github.com/kdeldycke/vat-rates/blob/master/vat_rates.csv'''
+ vat_url = "https://raw.githubusercontent.com/ungleich/vat-rates/main/vat_rates.csv"
+
+
+ def add_arguments(self, parser):
+ parser.add_argument('--vat-url', default=self.vat_url)
+
+ def handle(self, *args, **options):
+ vat_url = options['vat_url']
+ url_open = urllib.request.urlopen(vat_url)
+
+ # map to fileio using stringIO
+ csv_file = io.StringIO(url_open.read().decode('utf-8'))
+ reader = csv.DictReader(csv_file)
+
+ for row in reader:
+# print(row)
+ obj, created = VATRate.objects.get_or_create(
+ starting_date=row["start_date"],
+ ending_date=row["stop_date"] if row["stop_date"] != "" else None,
+ territory_codes=row["territory_codes"],
+ currency_code=row["currency_code"],
+ rate=row["rate"],
+ rate_type=row["rate_type"],
+ description=row["description"]
+ )
diff --git a/uncloud_pay/migrations/0001_initial.py b/uncloud_pay/migrations/0001_initial.py
new file mode 100644
index 0000000..b1b68c5
--- /dev/null
+++ b/uncloud_pay/migrations/0001_initial.py
@@ -0,0 +1,181 @@
+# Generated by Django 3.1 on 2020-12-13 10:38
+
+from django.conf import settings
+import django.core.validators
+from django.db import migrations, models
+import django.db.models.deletion
+import django.utils.timezone
+import uncloud.models
+import uncloud_pay.models
+
+
+class Migration(migrations.Migration):
+
+ initial = True
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ('uncloud_auth', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Bill',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('creation_date', models.DateTimeField(auto_now_add=True)),
+ ('starting_date', models.DateTimeField(default=uncloud_pay.models.start_of_this_month)),
+ ('ending_date', models.DateTimeField()),
+ ('due_date', models.DateField(default=uncloud_pay.models.default_payment_delay)),
+ ('is_final', models.BooleanField(default=False)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='BillingAddress',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('full_name', models.CharField(max_length=256)),
+ ('organization', models.CharField(blank=True, max_length=256, null=True)),
+ ('street', models.CharField(max_length=256)),
+ ('city', models.CharField(max_length=256)),
+ ('postal_code', models.CharField(max_length=64)),
+ ('country', uncloud.models.CountryField(blank=True, choices=[('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AF', 'Afghanistan'), ('AG', 'Antigua & Barbuda'), ('AI', 'Anguilla'), ('AL', 'Albania'), ('AM', 'Armenia'), ('AN', 'Netherlands Antilles'), ('AO', 'Angola'), ('AQ', 'Antarctica'), ('AR', 'Argentina'), ('AS', 'American Samoa'), ('AT', 'Austria'), ('AU', 'Australia'), ('AW', 'Aruba'), ('AZ', 'Azerbaijan'), ('BA', 'Bosnia and Herzegovina'), ('BB', 'Barbados'), ('BD', 'Bangladesh'), ('BE', 'Belgium'), ('BF', 'Burkina Faso'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BI', 'Burundi'), ('BJ', 'Benin'), ('BM', 'Bermuda'), ('BN', 'Brunei Darussalam'), ('BO', 'Bolivia'), ('BR', 'Brazil'), ('BS', 'Bahama'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CF', 'Central African Republic'), ('CG', 'Congo'), ('CH', 'Switzerland'), ('CI', 'Ivory Coast'), ('CK', 'Cook Iislands'), ('CL', 'Chile'), ('CM', 'Cameroon'), ('CN', 'China'), ('CO', 'Colombia'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CV', 'Cape Verde'), ('CX', 'Christmas Island'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DK', 'Denmark'), ('DM', 'Dominica'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EE', 'Estonia'), ('EG', 'Egypt'), ('EH', 'Western Sahara'), ('ER', 'Eritrea'), ('ES', 'Spain'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FM', 'Micronesia'), ('FO', 'Faroe Islands'), ('FR', 'France'), ('FX', 'France, Metropolitan'), ('GA', 'Gabon'), ('GB', 'United Kingdom (Great Britain)'), ('GD', 'Grenada'), ('GE', 'Georgia'), ('GF', 'French Guiana'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GL', 'Greenland'), ('GM', 'Gambia'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GS', 'South Georgia and the South Sandwich Islands'), ('GT', 'Guatemala'), ('GU', 'Guam'), ('GW', 'Guinea-Bissau'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard & McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IE', 'Ireland'), ('IL', 'Israel'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IQ', 'Iraq'), ('IR', 'Islamic Republic of Iran'), ('IS', 'Iceland'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KM', 'Comoros'), ('KN', 'St. Kitts and Nevis'), ('KP', "Korea, Democratic People's Republic of"), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('KY', 'Cayman Islands'), ('KZ', 'Kazakhstan'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LR', 'Liberia'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('LY', 'Libyan Arab Jamahiriya'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MH', 'Marshall Islands'), ('ML', 'Mali'), ('MN', 'Mongolia'), ('MM', 'Myanmar'), ('MO', 'Macau'), ('MP', 'Northern Mariana Islands'), ('MQ', 'Martinique'), ('MR', 'Mauritania'), ('MS', 'Monserrat'), ('MT', 'Malta'), ('MU', 'Mauritius'), ('MV', 'Maldives'), ('MW', 'Malawi'), ('MX', 'Mexico'), ('MY', 'Malaysia'), ('MZ', 'Mozambique'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NU', 'Niue'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PA', 'Panama'), ('PE', 'Peru'), ('PF', 'French Polynesia'), ('PG', 'Papua New Guinea'), ('PH', 'Philippines'), ('PK', 'Pakistan'), ('PL', 'Poland'), ('PM', 'St. Pierre & Miquelon'), ('PN', 'Pitcairn'), ('PR', 'Puerto Rico'), ('PT', 'Portugal'), ('PW', 'Palau'), ('PY', 'Paraguay'), ('QA', 'Qatar'), ('RE', 'Reunion'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SB', 'Solomon Islands'), ('SC', 'Seychelles'), ('SD', 'Sudan'), ('SE', 'Sweden'), ('SG', 'Singapore'), ('SH', 'St. Helena'), ('SI', 'Slovenia'), ('SJ', 'Svalbard & Jan Mayen Islands'), ('SK', 'Slovakia'), ('SL', 'Sierra Leone'), ('SM', 'San Marino'), ('SN', 'Senegal'), ('SO', 'Somalia'), ('SR', 'Suriname'), ('ST', 'Sao Tome & Principe'), ('SV', 'El Salvador'), ('SY', 'Syrian Arab Republic'), ('SZ', 'Swaziland'), ('TC', 'Turks & Caicos Islands'), ('TD', 'Chad'), ('TF', 'French Southern Territories'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TN', 'Tunisia'), ('TO', 'Tonga'), ('TP', 'East Timor'), ('TR', 'Turkey'), ('TT', 'Trinidad & Tobago'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UA', 'Ukraine'), ('UG', 'Uganda'), ('UM', 'United States Minor Outlying Islands'), ('US', 'United States of America'), ('UY', 'Uruguay'), ('UZ', 'Uzbekistan'), ('VA', 'Vatican City State (Holy See)'), ('VC', 'St. Vincent & the Grenadines'), ('VE', 'Venezuela'), ('VG', 'British Virgin Islands'), ('VI', 'United States Virgin Islands'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis & Futuna Islands'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('YT', 'Mayotte'), ('YU', 'Yugoslavia'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZR', 'Zaire'), ('ZW', 'Zimbabwe')], default='CH', max_length=2)),
+ ('vat_number', models.CharField(blank=True, default='', max_length=100)),
+ ('active', models.BooleanField(default=False)),
+ ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Product',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('name', models.CharField(max_length=256, unique=True)),
+ ('description', models.CharField(max_length=1024)),
+ ('config', models.JSONField()),
+ ('currency', models.CharField(choices=[('CHF', 'Swiss Franc'), ('EUR', 'Euro'), ('USD', 'US Dollar')], default='CHF', max_length=32)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RecurringPeriod',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('name', models.CharField(max_length=100, unique=True)),
+ ('duration_seconds', models.IntegerField(unique=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='StripeCustomer',
+ fields=[
+ ('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='uncloud_auth.user')),
+ ('stripe_id', models.CharField(max_length=32)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='VATRate',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('starting_date', models.DateField(blank=True, null=True)),
+ ('ending_date', models.DateField(blank=True, null=True)),
+ ('territory_codes', models.TextField(blank=True, default='')),
+ ('currency_code', models.CharField(max_length=10)),
+ ('rate', models.FloatField()),
+ ('rate_type', models.TextField(blank=True, default='')),
+ ('description', models.TextField(blank=True, default='')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ProductToRecurringPeriod',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('is_default', models.BooleanField(default=False)),
+ ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.product')),
+ ('recurring_period', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.recurringperiod')),
+ ],
+ ),
+ migrations.AddField(
+ model_name='product',
+ name='recurring_periods',
+ field=models.ManyToManyField(through='uncloud_pay.ProductToRecurringPeriod', to='uncloud_pay.RecurringPeriod'),
+ ),
+ migrations.CreateModel(
+ name='PaymentMethod',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('source', models.CharField(choices=[('stripe', 'Stripe'), ('unknown', 'Unknown')], default='stripe', max_length=256)),
+ ('description', models.TextField()),
+ ('primary', models.BooleanField(default=False, editable=False)),
+ ('stripe_payment_method_id', models.CharField(blank=True, max_length=32, null=True)),
+ ('stripe_setup_intent_id', models.CharField(blank=True, max_length=32, null=True)),
+ ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Payment',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
+ ('source', models.CharField(choices=[('wire', 'Wire Transfer'), ('stripe', 'Stripe'), ('voucher', 'Voucher'), ('referral', 'Referral'), ('unknown', 'Unknown')], default='unknown', max_length=256)),
+ ('timestamp', models.DateTimeField(auto_now_add=True)),
+ ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Order',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('description', models.TextField()),
+ ('config', models.JSONField()),
+ ('creation_date', models.DateTimeField(auto_now_add=True)),
+ ('starting_date', models.DateTimeField(default=django.utils.timezone.now)),
+ ('ending_date', models.DateTimeField(blank=True, null=True)),
+ ('one_time_price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
+ ('recurring_price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0)])),
+ ('currency', models.CharField(choices=[('CHF', 'Swiss Franc'), ('EUR', 'Euro'), ('USD', 'US Dollar')], default='CHF', max_length=32)),
+ ('should_be_billed', models.BooleanField(default=True)),
+ ('billing_address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.billingaddress')),
+ ('depends_on', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='parent_of', to='uncloud_pay.order')),
+ ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
+ ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.product')),
+ ('recurring_period', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.recurringperiod')),
+ ('replaces', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='replaced_by', to='uncloud_pay.order')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='BillRecord',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('creation_date', models.DateTimeField(auto_now_add=True)),
+ ('starting_date', models.DateTimeField()),
+ ('ending_date', models.DateTimeField()),
+ ('is_recurring_record', models.BooleanField()),
+ ('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.bill')),
+ ('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.order')),
+ ],
+ ),
+ migrations.AddField(
+ model_name='bill',
+ name='billing_address',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.billingaddress'),
+ ),
+ migrations.AddField(
+ model_name='bill',
+ name='owner',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
+ ),
+ migrations.AddConstraint(
+ model_name='producttorecurringperiod',
+ constraint=models.UniqueConstraint(condition=models.Q(is_default=True), fields=('product',), name='one_default_recurring_period_per_product'),
+ ),
+ migrations.AddConstraint(
+ model_name='producttorecurringperiod',
+ constraint=models.UniqueConstraint(fields=('product', 'recurring_period'), name='recurring_period_once_per_product'),
+ ),
+ migrations.AddConstraint(
+ model_name='billingaddress',
+ constraint=models.UniqueConstraint(condition=models.Q(active=True), fields=('owner',), name='one_active_billing_address_per_user'),
+ ),
+ migrations.AddConstraint(
+ model_name='bill',
+ constraint=models.UniqueConstraint(fields=('owner', 'starting_date', 'ending_date'), name='one_bill_per_month_per_user'),
+ ),
+ ]
diff --git a/uncloud_pay/migrations/__init__.py b/uncloud_pay/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud_pay/models.py b/uncloud_pay/models.py
new file mode 100644
index 0000000..18e6f85
--- /dev/null
+++ b/uncloud_pay/models.py
@@ -0,0 +1,1263 @@
+import logging
+import itertools
+import datetime
+from math import ceil
+from calendar import monthrange
+from decimal import Decimal
+from functools import reduce
+
+from django.db import models
+from django.db.models import Q
+from django.contrib.auth import get_user_model
+from django.contrib.contenttypes.fields import GenericForeignKey
+from django.contrib.contenttypes.models import ContentType
+from django.utils.translation import gettext_lazy as _
+from django.core.validators import MinValueValidator
+from django.utils import timezone
+from django.core.exceptions import ObjectDoesNotExist, ValidationError
+from django.conf import settings
+
+import uncloud_pay.stripe
+from uncloud import AMOUNT_DECIMALS, AMOUNT_MAX_DIGITS
+from uncloud.models import UncloudAddress
+
+# Used to generate bill due dates.
+BILL_PAYMENT_DELAY=datetime.timedelta(days=10)
+
+# Initialize logger.
+logger = logging.getLogger(__name__)
+
+def start_of_month(a_day):
+ """ Returns first of the month of a given datetime object"""
+ return a_day.replace(day=1,hour=0,minute=0,second=0, microsecond=0)
+
+def end_of_month(a_day):
+ """ Returns first of the month of a given datetime object"""
+
+ _, last_day = monthrange(a_day.year, a_day.month)
+ return a_day.replace(day=last_day,hour=23,minute=59,second=59, microsecond=0)
+
+def start_of_this_month():
+ """ Returns first of this month"""
+ a_day = timezone.now()
+ return a_day.replace(day=1,hour=0,minute=0,second=0, microsecond=0)
+
+def end_of_this_month():
+ """ Returns first of this month"""
+ a_day = timezone.now()
+
+ _, last_day = monthrange(a_day.year, a_day.month)
+ return a_day.replace(day=last_day,hour=23,minute=59,second=59, microsecond=0)
+
+def end_before(a_date):
+ """ Return suitable datetimefield for ending just before a_date """
+ return a_date - datetime.timedelta(seconds=1)
+
+def start_after(a_date):
+ """ Return suitable datetimefield for starting just after a_date """
+ return a_date + datetime.timedelta(seconds=1)
+
+def default_payment_delay():
+ return timezone.now() + BILL_PAYMENT_DELAY
+
+class Currency(models.TextChoices):
+ """
+ Possible currencies to be billed
+ """
+ CHF = 'CHF', _('Swiss Franc')
+ EUR = 'EUR', _('Euro')
+ USD = 'USD', _('US Dollar')
+
+
+def get_balance_for_user(user):
+ bills = reduce(
+ lambda acc, entry: acc + entry.total,
+ Bill.objects.filter(owner=user),
+ 0)
+ payments = reduce(
+ lambda acc, entry: acc + entry.amount,
+ Payment.objects.filter(owner=user),
+ 0)
+ return payments - bills
+
+###
+# Stripe
+
+class StripeCustomer(models.Model):
+ owner = models.OneToOneField( get_user_model(),
+ primary_key=True,
+ on_delete=models.CASCADE)
+ stripe_id = models.CharField(max_length=32)
+
+ def __str__(self):
+ return self.owner.username
+
+###
+# Payments and Payment Methods.
+
+class Payment(models.Model):
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE)
+
+ amount = models.DecimalField(
+ default=0.0,
+ max_digits=AMOUNT_MAX_DIGITS,
+ decimal_places=AMOUNT_DECIMALS,
+ validators=[MinValueValidator(0)])
+
+ source = models.CharField(max_length=256,
+ choices = (
+ ('wire', 'Wire Transfer'),
+ ('stripe', 'Stripe'),
+ ('voucher', 'Voucher'),
+ ('referral', 'Referral'),
+ ('unknown', 'Unknown')
+ ),
+ default='unknown')
+ timestamp = models.DateTimeField(editable=False, auto_now_add=True)
+
+ # We override save() in order to active products awaiting payment.
+ def save(self, *args, **kwargs):
+ # _state.adding is switched to false after super(...) call.
+ being_created = self._state.adding
+
+ unpaid_bills_before_payment = Bill.get_unpaid_for(self.owner)
+ super(Payment, self).save(*args, **kwargs) # Save payment in DB.
+ unpaid_bills_after_payment = Bill.get_unpaid_for(self.owner)
+
+ newly_paid_bills = list(
+ set(unpaid_bills_before_payment) - set(unpaid_bills_after_payment))
+ for bill in newly_paid_bills:
+ bill.activate_products()
+
+
+class PaymentMethod(models.Model):
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE,
+ editable=False)
+ source = models.CharField(max_length=256,
+ choices = (
+ ('stripe', 'Stripe'),
+ ('unknown', 'Unknown'),
+ ),
+ default='stripe')
+ description = models.TextField()
+ primary = models.BooleanField(default=False, editable=False)
+
+ # Only used for "Stripe" source
+ stripe_payment_method_id = models.CharField(max_length=32, blank=True, null=True)
+ stripe_setup_intent_id = models.CharField(max_length=32, blank=True, null=True)
+
+ @property
+ def stripe_card_last4(self):
+ if self.source == 'stripe' and self.active:
+ payment_method = uncloud_pay.stripe.get_payment_method(
+ self.stripe_payment_method_id)
+ return payment_method.card.last4
+ else:
+ return None
+
+ @property
+ def active(self):
+ if self.source == 'stripe' and self.stripe_payment_method_id != None:
+ return True
+ else:
+ return False
+
+ def charge(self, amount):
+ if not self.active:
+ raise Exception('This payment method is inactive.')
+
+ if amount < 0: # Make sure we don't charge negative amount by errors...
+ raise Exception('Cannot charge negative amount.')
+
+ if self.source == 'stripe':
+ stripe_customer = StripeCustomer.objects.get(owner=self.owner).stripe_id
+ stripe_payment = uncloud_pay.stripe.charge_customer(
+ amount, stripe_customer, self.stripe_payment_method_id)
+ if 'paid' in stripe_payment and stripe_payment['paid'] == False:
+ raise Exception(stripe_payment['error'])
+ else:
+ payment = Payment.objects.create(
+ owner=self.owner, source=self.source, amount=amount)
+
+ return payment
+ else:
+ raise Exception('This payment method is unsupported/cannot be charged.')
+
+ def set_as_primary_for(self, user):
+ methods = PaymentMethod.objects.filter(owner=user, primary=True)
+ for method in methods:
+ print(method)
+ method.primary = False
+ method.save()
+
+ self.primary = True
+ self.save()
+
+ def get_primary_for(user):
+ methods = PaymentMethod.objects.filter(owner=user)
+ for method in methods:
+ # Do we want to do something with non-primary method?
+ if method.active and method.primary:
+ return method
+
+ return None
+
+ class Meta:
+ # TODO: limit to one primary method per user.
+ # unique_together is no good since it won't allow more than one
+ # non-primary method.
+ pass
+
+# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
+class RecurringPeriodDefaultChoices(models.IntegerChoices):
+ """
+ This is an old class and being superseeded by the database model below
+ """
+ PER_365D = 365*24*3600, _('Per 365 days')
+ PER_30D = 30*24*3600, _('Per 30 days')
+ PER_WEEK = 7*24*3600, _('Per Week')
+ PER_DAY = 24*3600, _('Per Day')
+ PER_HOUR = 3600, _('Per Hour')
+ PER_MINUTE = 60, _('Per Minute')
+ PER_SECOND = 1, _('Per Second')
+ ONE_TIME = 0, _('Onetime')
+
+# RecurringPeriods
+class RecurringPeriod(models.Model):
+ """
+ Available recurring periods.
+ By default seeded from RecurringPeriodChoices
+ """
+
+ name = models.CharField(max_length=100, unique=True)
+ duration_seconds = models.IntegerField(unique=True)
+
+ @classmethod
+ def populate_db_defaults(cls):
+ for (seconds, name) in RecurringPeriodDefaultChoices.choices:
+ obj, created = cls.objects.get_or_create(name=name,
+ defaults={ 'duration_seconds': seconds })
+
+ @staticmethod
+ def secs_to_name(secs):
+ name = ""
+ days = 0
+ hours = 0
+
+ if secs > 24*3600:
+ days = secs // (24*3600)
+ secs -= (days*24*3600)
+
+ if secs > 3600:
+ hours = secs // 3600
+ secs -= hours*3600
+
+ return f"{days} days {hours} hours {secs} seconds"
+
+ def __str__(self):
+ duration = self.secs_to_name(self.duration_seconds)
+
+ return f"{self.name} ({duration})"
+
+
+###
+# Bills.
+
+class BillingAddress(UncloudAddress):
+ owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
+ vat_number = models.CharField(max_length=100, default="", blank=True)
+ active = models.BooleanField(default=False)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(fields=['owner'],
+ condition=Q(active=True),
+ name='one_active_billing_address_per_user')
+ ]
+
+ @classmethod
+ def populate_db_defaults(cls):
+ """
+ Ensure we have at least one billing address that is associated with the uncloud-admin.
+
+ This way we are sure that an UncloudProvider can be created.
+
+ Cannot use get_or_create as that looks for exactly one.
+
+ """
+
+ owner = get_user_model().objects.get(username=settings.UNCLOUD_ADMIN_NAME)
+ billing_address = cls.objects.filter(owner=owner).first()
+
+ if not billing_address:
+ billing_address = cls.objects.create(owner=owner,
+ organization="uncloud admins",
+ name="Uncloud Admin",
+ street="Uncloudstreet. 42",
+ city="Luchsingen",
+ postal_code="8775",
+ country="CH",
+ active=True)
+
+
+ @staticmethod
+ def get_address_for(user):
+ return BillingAddress.objects.get(owner=user, active=True)
+
+ def __str__(self):
+ return "{} - {}, {}, {} {}, {}".format(
+ self.owner,
+ self.full_name, self.street, self.postal_code, self.city,
+ self.country)
+
+###
+# VAT
+
+class VATRate(models.Model):
+ starting_date = models.DateField(blank=True, null=True)
+ ending_date = models.DateField(blank=True, null=True)
+ territory_codes = models.TextField(blank=True, default='')
+ currency_code = models.CharField(max_length=10)
+ rate = models.FloatField()
+ rate_type = models.TextField(blank=True, default='')
+ description = models.TextField(blank=True, default='')
+
+ @staticmethod
+ def get_for_country(country_code):
+ vat_rate = None
+ try:
+ vat_rate = VATRate.objects.get(
+ territory_codes=country_code, start_date__isnull=False, stop_date=None
+ )
+ return vat_rate.rate
+ except VATRate.DoesNotExist as dne:
+ logger.debug(str(dne))
+ logger.debug("Did not find VAT rate for %s, returning 0" % country_code)
+ return 0
+
+
+ def __str__(self):
+ return f"{self.territory_codes}: {self.starting_date} - {self.ending_date}: {self.rate_type}"
+
+###
+# Products
+
+class Product(models.Model):
+ """
+ A product is something a user can order. To record the pricing, we
+ create order that define a state in time.
+
+ A product can have *one* one_time_order and/or *one*
+ recurring_order.
+
+ If either of them needs to be updated, a new order of the same
+ type will be created and links to the previous order.
+
+ """
+
+ name = models.CharField(max_length=256, unique=True)
+ description = models.CharField(max_length=1024)
+ config = models.JSONField()
+ recurring_periods = models.ManyToManyField(RecurringPeriod, through='ProductToRecurringPeriod')
+ currency = models.CharField(max_length=32, choices=Currency.choices, default=Currency.CHF)
+
+ @property
+ def default_recurring_period(self):
+ """
+ Return the default recurring Period
+ """
+ return self.recurring_periods.get(producttorecurringperiod__is_default=True)
+
+ @classmethod
+ def populate_db_defaults(cls):
+ recurring_period = RecurringPeriod.objects.get(name="Per 30 days")
+
+ obj, created = cls.objects.get_or_create(name="Dual Stack Virtual Machine v1",
+ description="A standard virtual machine",
+ currency=Currency.CHF,
+ config={
+ 'features': {
+ 'cores':
+ { 'min': 1,
+ 'max': 48,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 3
+ },
+ 'ram_gb':
+ { 'min': 1,
+ 'max': 256,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 4
+ },
+ 'ssd_gb':
+ { 'min': 10,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 0.35
+ },
+ 'hdd_gb':
+ { 'min': 0,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 15/1000
+ },
+ 'additional_ipv4_address':
+ { 'min': 0,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 8
+ },
+ }
+ }
+ )
+
+ obj.recurring_periods.add(recurring_period, through_defaults= { 'is_default': True })
+
+ obj, created = cls.objects.get_or_create(name="Dual Stack Virtual Machine v2",
+ description="A standard virtual machine",
+ currency=Currency.CHF,
+ config={
+ 'features': {
+ 'base':
+ { 'min': 1,
+ 'max': 1,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 1
+ },
+ 'cores':
+ { 'min': 1,
+ 'max': 48,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 3
+ },
+ 'ram_gb':
+ { 'min': 1,
+ 'max': 256,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 4
+ },
+ 'ssd_gb':
+ { 'min': 10,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 0.35
+ },
+ 'hdd_gb':
+ { 'min': 0,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 15/1000
+ },
+ 'additional_ipv4_address':
+ { 'min': 0,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 9
+ },
+ }
+ }
+ )
+
+ obj.recurring_periods.add(recurring_period, through_defaults= { 'is_default': True })
+
+ obj, created = cls.objects.get_or_create(name="reverse DNS",
+ description="Reverse DNS network",
+ currency=Currency.CHF,
+ config={
+ 'parameters': [
+ 'network'
+ ]
+ })
+ obj.recurring_periods.add(recurring_period, through_defaults= { 'is_default': True })
+
+
+ def __str__(self):
+ return f"{self.name} - {self.description}"
+
+ @property
+ def recurring_orders(self):
+ return self.orders.order_by('id').exclude(recurring_period=RecurringPeriod.objects.get(name="ONE_TIME"))
+
+ @property
+ def last_recurring_order(self):
+ return self.recurring_orders.last()
+
+ @property
+ def one_time_orders(self):
+ return self.orders.order_by('id').filter(recurring_period=RecurringPeriod.objects.get(name="ONE_TIME"))
+
+ @property
+ def last_one_time_order(self):
+ return self.one_time_orders.last()
+
+ def create_order(self, when_to_start=None, recurring_period=None):
+ billing_address = BillingAddress.get_address_for(self.owner)
+
+ if not billing_address:
+ raise ValidationError("Cannot order without a billing address")
+
+ if not when_to_start:
+ when_to_start = timezone.now()
+
+ if not recurring_period:
+ recurring_period = self.default_recurring_period
+
+
+ # Create one time order if we did not create one already
+ if self.one_time_price > 0 and not self.last_one_time_order:
+ one_time_order = Order.objects.create(owner=self.owner,
+ billing_address=billing_address,
+ starting_date=when_to_start,
+ price=self.one_time_price,
+ recurring_period=RecurringPeriod.objects.get(name="ONE_TIME"),
+ description=str(self))
+ self.orders.add(one_time_order)
+ else:
+ one_time_order = None
+
+ if recurring_period != RecurringPeriod.objects.get(name="ONE_TIME"):
+ if one_time_order:
+ recurring_order = Order.objects.create(owner=self.owner,
+ billing_address=billing_address,
+ starting_date=when_to_start,
+ price=self.recurring_price,
+ recurring_period=recurring_period,
+ depends_on=one_time_order,
+ description=str(self))
+ else:
+ recurring_order = Order.objects.create(owner=self.owner,
+ billing_address=billing_address,
+ starting_date=when_to_start,
+ price=self.recurring_price,
+ recurring_period=recurring_period,
+ description=str(self))
+ self.orders.add(recurring_order)
+
+
+ # FIXME: this could/should be part of Order (?)
+ def create_or_update_recurring_order(self, when_to_start=None, recurring_period=None):
+ if not self.recurring_price:
+ return
+
+ if not recurring_period:
+ recurring_period = self.default_recurring_period
+
+ if not when_to_start:
+ when_to_start = timezone.now()
+
+ if self.last_recurring_order:
+ if self.recurring_price < self.last_recurring_order.price:
+
+ if when_to_start < self.last_recurring_order.next_cancel_or_downgrade_date:
+ when_to_start = start_after(self.last_recurring_order.next_cancel_or_downgrade_date)
+
+ when_to_end = end_before(when_to_start)
+
+ new_order = Order.objects.create(owner=self.owner,
+ billing_address=self.last_recurring_order.billing_address,
+ starting_date=when_to_start,
+ price=self.recurring_price,
+ recurring_period=recurring_period,
+ description=str(self),
+ replaces=self.last_recurring_order)
+
+ self.last_recurring_order.replace_with(new_order)
+ self.orders.add(new_order)
+ else:
+ self.create_order(when_to_start, recurring_period)
+
+ @property
+ def is_recurring(self):
+ return self.recurring_price > 0
+
+ @property
+ def billing_address(self):
+ return self.order.billing_address
+
+ def discounted_price_by_period(self, requested_period):
+ """
+ Each product has a standard recurring period for which
+ we define a pricing. I.e. VPN is usually year, VM is usually monthly.
+
+ The user can opt-in to use a different period, which influences the price:
+ The longer a user commits, the higher the discount.
+
+ Products can also be limited in the available periods. For instance
+ a VPN only makes sense to be bought for at least one day.
+
+ Rules are as follows:
+
+ given a standard recurring period of ..., changing to ... modifies price ...
+
+
+ # One month for free if buying / year, compared to a month: about 8.33% discount
+ per_year -> per_month -> /11
+ per_month -> per_year -> *11
+
+ # Month has 30.42 days on average. About 7.9% discount to go monthly
+ per_month -> per_day -> /28
+ per_day -> per_month -> *28
+
+ # Day has 24h, give one for free
+ per_day -> per_hour -> /23
+ per_hour -> per_day -> /23
+
+
+ Examples
+
+ VPN @ 120CHF/y becomes
+ - 10.91 CHF/month (130.91 CHF/year)
+ - 0.39 CHF/day (142.21 CHF/year)
+
+ VM @ 15 CHF/month becomes
+ - 165 CHF/month (13.75 CHF/month)
+ - 0.54 CHF/day (16.30 CHF/month)
+
+ """
+
+ # FIXME: This logic needs to be phased out / replaced by product specific (?)
+ # proportions. Maybe using the RecurringPeriod table to link the possible discounts/add ups
+
+ if self.default_recurring_period == RecurringPeriod.PER_365D:
+ if requested_period == RecurringPeriod.PER_365D:
+ return self.recurring_price
+ if requested_period == RecurringPeriod.PER_30D:
+ return self.recurring_price/11.
+ if requested_period == RecurringPeriod.PER_DAY:
+ return self.recurring_price/11./28.
+
+ elif self.default_recurring_period == RecurringPeriod.PER_30D:
+ if requested_period == RecurringPeriod.PER_365D:
+ return self.recurring_price*11
+ if requested_period == RecurringPeriod.PER_30D:
+ return self.recurring_price
+ if requested_period == RecurringPeriod.PER_DAY:
+ return self.recurring_price/28.
+
+ elif self.default_recurring_period == RecurringPeriod.PER_DAY:
+ if requested_period == RecurringPeriod.PER_365D:
+ return self.recurring_price*11*28
+ if requested_period == RecurringPeriod.PER_30D:
+ return self.recurring_price*28
+ if requested_period == RecurringPeriod.PER_DAY:
+ return self.recurring_price
+ else:
+ # FIXME: use the right type of exception here!
+ raise Exception("Did not implement the discounter for this case")
+
+
+ def save(self, *args, **kwargs):
+ # try:
+ # ba = BillingAddress.get_address_for(self.owner)
+ # except BillingAddress.DoesNotExist:
+ # raise ValidationError("User does not have a billing address")
+
+ # if not ba.active:
+ # raise ValidationError("User does not have an active billing address")
+
+
+ # Verify the required JSON fields
+
+ super().save(*args, **kwargs)
+
+
+
+###
+# Orders.
+
+class Order(models.Model):
+ """
+ Order are assumed IMMUTABLE and used as SOURCE OF TRUST for generating
+ bills. Do **NOT** mutate then!
+
+ An one time order is "closed" (does not need to be billed anymore)
+ if it has one bill record. Having more than one is a programming
+ error.
+
+ A recurring order is closed if it has been replaced
+ (replaces__isnull=False) AND the ending_date is set AND it was
+ billed the last time it needed to be billed (how to check the last
+ item?)
+
+ BOTH are closed, if they are ended/closed AND have been fully
+ charged.
+
+ Fully charged == fully billed: sum_of_order_usage == sum_of_bill_records
+
+ """
+
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE,
+ editable=True)
+
+ billing_address = models.ForeignKey(BillingAddress,
+ on_delete=models.CASCADE)
+
+ description = models.TextField()
+
+ product = models.ForeignKey(Product, blank=False, null=False, on_delete=models.CASCADE)
+ config = models.JSONField()
+
+ creation_date = models.DateTimeField(auto_now_add=True)
+ starting_date = models.DateTimeField(default=timezone.now)
+ ending_date = models.DateTimeField(blank=True, null=True)
+
+ recurring_period = models.ForeignKey(RecurringPeriod,
+ on_delete=models.CASCADE,
+ editable=True)
+
+ one_time_price = models.DecimalField(default=0.0,
+ max_digits=AMOUNT_MAX_DIGITS,
+ decimal_places=AMOUNT_DECIMALS,
+ validators=[MinValueValidator(0)])
+
+ recurring_price = models.DecimalField(default=0.0,
+ max_digits=AMOUNT_MAX_DIGITS,
+ decimal_places=AMOUNT_DECIMALS,
+ validators=[MinValueValidator(0)])
+
+ currency = models.CharField(max_length=32, choices=Currency.choices, default=Currency.CHF)
+
+ replaces = models.ForeignKey('self',
+ related_name='replaced_by',
+ on_delete=models.CASCADE,
+ blank=True,
+ null=True)
+
+ depends_on = models.ForeignKey('self',
+ related_name='parent_of',
+ on_delete=models.CASCADE,
+ blank=True,
+ null=True)
+
+ should_be_billed = models.BooleanField(default=True)
+
+ @property
+ def earliest_ending_date(self):
+ """
+ Recurring orders cannot end before finishing at least one recurring period.
+
+ One time orders have a recurring period of 0, so this work universally
+ """
+
+ return self.starting_date + datetime.timedelta(seconds=self.recurring_period.duration_seconds)
+
+
+ def next_cancel_or_downgrade_date(self, until_when=None):
+ """
+ Return the next proper ending date after n times the
+ recurring_period, where n is an integer that applies for downgrading
+ or cancelling.
+ """
+
+ if not until_when:
+ until_when = timezone.now()
+
+ if until_when < self.starting_date:
+ raise ValidationError("Cannot end before start of start of order")
+
+ if self.recurring_period.duration_seconds > 0:
+ delta = until_when - self.starting_date
+
+ num_times = ceil(delta.total_seconds() / self.recurring_period.duration_seconds)
+
+ next_date = self.starting_date + datetime.timedelta(seconds=num_times * self.recurring_period.duration_seconds)
+ else:
+ next_date = self.starting_date
+
+ return next_date
+
+ def get_ending_date_for_bill(self, bill):
+ """
+ Determine the ending date given a specific bill
+ """
+
+ # If the order is quit, charge the final amount / finish (????)
+ # Probably not a good idea -- FIXME :continue until usual
+ if self.ending_date:
+ this_ending_date = self.ending_date
+ else:
+ if self.next_cancel_or_downgrade_date(bill.ending_date) > bill.ending_date:
+ this_ending_date = self.next_cancel_or_downgrade_date(bill.ending_date)
+ else:
+ this_ending_date = bill.ending_date
+
+ return this_ending_date
+
+
+ @property
+ def count_billed(self):
+ """
+ How many times this order was billed so far.
+ This logic is mainly thought to be for recurring bills, but also works for one time bills
+ """
+
+ return sum([ br.quantity for br in self.bill_records.all() ])
+
+ def count_used(self, when=None):
+ """
+ How many times this order was billed so far.
+ This logic is mainly thought to be for recurring bills, but also works for one time bills
+ """
+
+ if self.is_one_time:
+ return 1
+
+ if not when:
+ when = timezone.now()
+
+ # Cannot be used after it ended
+ if self.ending_date and when > self.ending_date:
+ when = self.ending_date
+
+ return (when - self.starting_date) / self.default_recurring_period
+
+ @property
+ def all_usage_billed(self, when=None):
+ """
+ Returns true if this order does not need any further billing
+ ever. In other words: is this order "closed"?
+ """
+
+ if self.count_billed == self.count_used(when):
+ return True
+ else:
+ return False
+
+ @property
+ def is_closed(self):
+ if self.all_usage_billed and self.ending_date:
+ return True
+ else:
+ return False
+
+ @property
+ def is_recurring(self):
+ return not self.recurring_period == RecurringPeriod.objects.get(name="ONE_TIME")
+
+ @property
+ def is_one_time(self):
+ return not self.is_recurring
+
+ def replace_with(self, new_order):
+ new_order.replaces = self
+ self.ending_date = end_before(new_order.starting_date)
+ self.save()
+
+ def update_order(self, config, starting_date=None):
+ """
+ Updating an order means creating a new order and reference the previous order
+ """
+
+ if not starting_date:
+ starting_date = timezone.now()
+
+ new_order = self.__class__(owner=self.owner,
+ billing_address=self.billing_address,
+ description=self.description,
+ product=self.product,
+ config=config,
+ starting_date=starting_date,
+ currency=self.currency
+ )
+
+ (new_order.one_time_price, new_order.recurring_price, new_order.config) = new_order.calculate_prices_and_config()
+
+
+
+ new_order.replaces = self
+ new_order.save()
+
+ self.ending_date = end_before(new_order.starting_date)
+ self.save()
+
+ return new_order
+
+
+ def create_bill_record(self, bill):
+ br = None
+
+ # Note: check for != 0 not > 0, as we allow discounts to be expressed with < 0
+ if self.one_time_price != 0 and self.billrecord_set.count() == 0:
+ br = BillRecord.objects.create(bill=bill,
+ order=self,
+ starting_date=self.starting_date,
+ ending_date=self.starting_date,
+ is_recurring_record=False)
+
+ if self.recurring_price != 0:
+ br = BillRecord.objects.filter(bill=bill, order=self, is_recurring_record=True).first()
+
+ if br:
+ self.update_bill_record_for_recurring_order(br, bill)
+ else:
+ br = self.create_new_bill_record_for_recurring_order(bill)
+
+ return br
+
+ def update_bill_record_for_recurring_order(self,
+ bill_record,
+ bill):
+ """
+ Possibly update a bill record according to the information in the bill
+ """
+
+ # If the order has an ending date set, we might need to adjust the bill_record
+ if self.ending_date:
+ if bill_record_for_this_bill.ending_date != self.ending_date:
+ bill_record_for_this_bill.ending_date = self.ending_date
+
+ else:
+ # recurring, not terminated, should go until at least end of bill
+ if bill_record_for_this_bill.ending_date < bill.ending_date:
+ bill_record_for_this_bill.ending_date = bill.ending_date
+
+ bill_record_for_this_bill.save()
+
+ def create_new_bill_record_for_recurring_order(self, bill):
+ """
+ Create a new bill record
+ """
+
+ last_bill_record = BillRecord.objects.filter(order=self, is_recurring_record=True).order_by('id').last()
+
+ starting_date=self.starting_date
+
+ if last_bill_record:
+ # We already charged beyond the end of this bill's period
+ if last_bill_record.ending_date >= bill.ending_date:
+ return
+
+ # This order is terminated or replaced
+ if self.ending_date:
+ # And the last bill record already covered us -> nothing to be done anymore
+ if last_bill_record.ending_date == self.ending_date:
+ return
+
+ starting_date = start_after(last_bill_record.ending_date)
+
+ ending_date = self.get_ending_date_for_bill(bill)
+
+ return BillRecord.objects.create(bill=bill,
+ order=self,
+ starting_date=starting_date,
+ ending_date=ending_date,
+ is_recurring_record=True)
+
+ def calculate_prices_and_config(self):
+ one_time_price = 0
+ recurring_price = 0
+
+ if self.config:
+ config = self.config
+
+ if 'features' not in self.config:
+ self.config['features'] = {}
+
+ else:
+ config = {
+ 'features': {}
+ }
+
+ # FIXME: adjust prices to the selected recurring_period to the
+
+ if 'features' in self.product.config:
+ for feature in self.product.config['features']:
+
+ # Set min to 0 if not specified
+ min_val = self.product.config['features'][feature].get('min', 0)
+
+ # We might not even have 'features' cannot use .get() on it
+ try:
+ value = self.config['features'][feature]
+ except (KeyError, TypeError):
+ value = self.product.config['features'][feature]['min']
+
+ # Set max to current value if not specified
+ max_val = self.product.config['features'][feature].get('max', value)
+
+
+ if value < min_val or value > max_val:
+ raise ValidationError(f"Feature '{feature}' must be at least {min_val} and at maximum {max_val}. Value is: {value}")
+
+ one_time_price += self.product.config['features'][feature]['one_time_price_per_unit'] * value
+ recurring_price += self.product.config['features'][feature]['recurring_price_per_unit'] * value
+ config['features'][feature] = value
+
+ return (one_time_price, recurring_price, config)
+
+ def check_parameters(self):
+ if 'parameters' in self.product.config:
+ for parameter in self.product.config['parameters']:
+ if not parameter in self.config['parameters']:
+ raise ValidationError(f"Required parameter '{parameter}' is missing.")
+
+
+ def save(self, *args, **kwargs):
+ # Calculate the price of the order when we create it
+ # IMMUTABLE fields -- need to create new order to modify them
+ # However this is not enforced here...
+ if self._state.adding:
+ (self.one_time_price, self.recurring_price, self.config) = self.calculate_prices_and_config()
+
+ if self.recurring_period_id is None:
+ self.recurring_period = self.product.default_recurring_period
+
+ try:
+ prod_period = self.product.recurring_periods.get(producttorecurringperiod__recurring_period=self.recurring_period)
+ except ObjectDoesNotExist:
+ raise ValidationError(f"Recurring Period {self.recurring_period} not allowed for product {self.product}")
+
+ self.check_parameters()
+
+ if self.ending_date and self.ending_date < self.starting_date:
+ raise ValidationError("End date cannot be before starting date")
+
+
+ super().save(*args, **kwargs)
+
+
+ def __str__(self):
+ try:
+ conf = " ".join([ f"{key}:{val}" for key,val in self.config['features'].items() if val != 0 ])
+ except KeyError:
+ conf = ""
+
+ return f"Order {self.id}: {self.description} {conf}"
+
+class Bill(models.Model):
+ """
+ A bill is a representation of usage at a specific time
+ """
+ owner = models.ForeignKey(get_user_model(),
+ on_delete=models.CASCADE)
+
+ creation_date = models.DateTimeField(auto_now_add=True)
+ starting_date = models.DateTimeField(default=start_of_this_month)
+ ending_date = models.DateTimeField()
+ due_date = models.DateField(default=default_payment_delay)
+
+
+ billing_address = models.ForeignKey(BillingAddress,
+ on_delete=models.CASCADE,
+ editable=True,
+ null=False)
+
+ # FIXME: editable=True -> is in the admin, but also editable in DRF
+ # Maybe filter fields in the serializer?
+
+ is_final = models.BooleanField(default=False)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(fields=['owner',
+ 'starting_date',
+ 'ending_date' ],
+ name='one_bill_per_month_per_user')
+ ]
+
+ def close(self):
+ """
+ Close/finish a bill
+ """
+
+ self.is_final = True
+ self.save()
+
+ @property
+ def sum(self):
+ bill_records = BillRecord.objects.filter(bill=self)
+ return sum([ br.sum for br in bill_records ])
+
+ @property
+ def vat_rate(self):
+ """
+ Handling VAT is a tricky business - thus we only implement the cases
+ that we clearly now and leave it open to fellow developers to implement
+ correct handling for other cases.
+
+ Case CH:
+
+ - If the customer is in .ch -> apply standard rate
+ - If the customer is in EU AND private -> apply country specific rate
+ - If the customer is in EU AND business -> do not apply VAT
+ - If the customer is outside EU and outside CH -> do not apply VAT
+ """
+
+ provider = UncloudProvider.objects.get()
+
+ # Assume always VAT inside the country
+ if provider.country == self.billing_address.country:
+ vat_rate = VATRate.objects.get(country=provider.country,
+ when=self.ending_date)
+ elif self.billing_address.country in EU:
+ # FIXME: need to check for validated vat number
+ if self.billing_address.vat_number:
+ return 0
+ else:
+ return VATRate.objects.get(country=self.biling_address.country,
+ when=self.ending_date)
+ else: # non-EU, non-national
+ return 0
+
+
+ @classmethod
+ def create_bills_for_all_users(cls):
+ """
+ Create next bill for each user
+ """
+
+ for owner in get_user_model().objects.all():
+ cls.create_next_bills_for_user(owner)
+
+ @classmethod
+ def create_next_bills_for_user(cls, owner, ending_date=None):
+ """
+ Create one bill per billing address, as the VAT rates might be different
+ for each address
+ """
+
+ bills = []
+
+ for billing_address in BillingAddress.objects.filter(owner=owner):
+ bills.append(cls.create_next_bill_for_user_address(billing_address, ending_date))
+
+ return bills
+
+ @classmethod
+ def create_next_bill_for_user_address(cls, billing_address, ending_date=None):
+ """
+ Create the next bill for a specific billing address of a user
+ """
+
+ owner = billing_address.owner
+
+ all_orders = Order.objects.filter(owner=owner,
+ billing_address=billing_address).order_by('id')
+
+ bill = cls.get_or_create_bill(billing_address, ending_date=ending_date)
+
+ for order in all_orders:
+ order.create_bill_record(bill)
+
+ return bill
+
+
+ @classmethod
+ def get_or_create_bill(cls, billing_address, ending_date=None):
+ """
+ Get / reuse last bill if it is not yet closed
+
+ Create bill, if there is no bill or if bill is closed.
+ """
+
+ last_bill = cls.objects.filter(billing_address=billing_address).order_by('id').last()
+
+ all_orders = Order.objects.filter(billing_address=billing_address).order_by('id')
+ first_order = all_orders.first()
+
+ bill = None
+
+ # Get date & bill from previous bill, if it exists
+ if last_bill:
+ if not last_bill.is_final:
+ bill = last_bill
+ starting_date = last_bill.starting_date
+ ending_date = bill.ending_date
+ else:
+ starting_date = last_bill.ending_date + datetime.timedelta(seconds=1)
+ else:
+ # Might be an idea to make this the start of the month, too
+ if first_order:
+ starting_date = first_order.starting_date
+ else:
+ starting_date = timezone.now()
+
+ if not ending_date:
+ ending_date = end_of_month(starting_date)
+
+ if not bill:
+ bill = cls.objects.create(
+ owner=billing_address.owner,
+ starting_date=starting_date,
+ ending_date=ending_date,
+ billing_address=billing_address)
+
+
+ return bill
+
+ def __str__(self):
+ return f"Bill {self.owner}-{self.id}"
+
+
+class BillRecord(models.Model):
+ """
+ Entry of a bill, dynamically generated from an order.
+ """
+
+ bill = models.ForeignKey(Bill, on_delete=models.CASCADE)
+ order = models.ForeignKey(Order, on_delete=models.CASCADE)
+
+ creation_date = models.DateTimeField(auto_now_add=True)
+ starting_date = models.DateTimeField()
+ ending_date = models.DateTimeField()
+
+ is_recurring_record = models.BooleanField(blank=False, null=False)
+
+ @property
+ def quantity(self):
+ """ Determine the quantity by the duration"""
+ if not self.is_recurring_record:
+ return 1
+
+ record_delta = self.ending_date - self.starting_date
+
+ return record_delta.total_seconds()/self.order.recurring_period.duration_seconds
+
+ @property
+ def sum(self):
+ if self.is_recurring_record:
+ return self.order.recurring_price * Decimal(self.quantity)
+ else:
+ return self.order.one_time_price
+
+ @property
+ def price(self):
+ if self.is_recurring_record:
+ return self.order.recurring_price
+ else:
+ return self.order.one_time_price
+
+ def __str__(self):
+ if self.is_recurring_record:
+ bill_line = f"{self.starting_date} - {self.ending_date}: {self.quantity} x {self.order}"
+ else:
+ bill_line = f"{self.starting_date}: {self.order}"
+
+ return bill_line
+
+ def save(self, *args, **kwargs):
+ if self.ending_date < self.starting_date:
+ raise ValidationError("End date cannot be before starting date")
+
+ super().save(*args, **kwargs)
+
+
+class ProductToRecurringPeriod(models.Model):
+ """
+ Intermediate manytomany mapping class that allows storing the default recurring period
+ for a product
+ """
+
+ recurring_period = models.ForeignKey(RecurringPeriod, on_delete=models.CASCADE)
+ product = models.ForeignKey(Product, on_delete=models.CASCADE)
+
+ is_default = models.BooleanField(default=False)
+
+ class Meta:
+ constraints = [
+ models.UniqueConstraint(fields=['product'],
+ condition=Q(is_default=True),
+ name='one_default_recurring_period_per_product'),
+ models.UniqueConstraint(fields=['product', 'recurring_period'],
+ name='recurring_period_once_per_product')
+ ]
+
+ def __str__(self):
+ return f"{self.product} - {self.recurring_period} (default: {self.is_default})"
diff --git a/uncloud_pay/serializers.py b/uncloud_pay/serializers.py
new file mode 100644
index 0000000..9214105
--- /dev/null
+++ b/uncloud_pay/serializers.py
@@ -0,0 +1,112 @@
+from django.contrib.auth import get_user_model
+from rest_framework import serializers
+from uncloud_auth.serializers import UserSerializer
+from django.utils.translation import gettext_lazy as _
+
+from .models import *
+
+###
+# Payments and Payment Methods.
+
+class PaymentSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Payment
+ fields = '__all__'
+
+class PaymentMethodSerializer(serializers.ModelSerializer):
+ stripe_card_last4 = serializers.IntegerField()
+
+ class Meta:
+ model = PaymentMethod
+ fields = ['uuid', 'source', 'description', 'primary', 'stripe_card_last4', 'active']
+
+class UpdatePaymentMethodSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = PaymentMethod
+ fields = ['description', 'primary']
+
+class ChargePaymentMethodSerializer(serializers.Serializer):
+ amount = serializers.DecimalField(max_digits=10, decimal_places=2)
+
+class CreatePaymentMethodSerializer(serializers.ModelSerializer):
+ please_visit = serializers.CharField(read_only=True)
+ class Meta:
+ model = PaymentMethod
+ fields = ['source', 'description', 'primary', 'please_visit']
+
+###
+# Orders & Products.
+
+class OrderSerializer(serializers.ModelSerializer):
+ owner = serializers.PrimaryKeyRelatedField(queryset=get_user_model().objects.all())
+
+ def __init__(self, *args, **kwargs):
+ # Don't pass the 'fields' arg up to the superclass
+ admin = kwargs.pop('admin', None)
+
+ # Instantiate the superclass normally
+ super(OrderSerializer, self).__init__(*args, **kwargs)
+
+ # Only allows owner in admin mode.
+ if not admin:
+ self.fields.pop('owner')
+
+ def create(self, validated_data):
+ billing_address = BillingAddress.get_preferred_address_for(validated_data["owner"])
+ instance = Order(billing_address=billing_address, **validated_data)
+ instance.save()
+
+ return instance
+
+ def validate_owner(self, value):
+ if BillingAddress.get_preferred_address_for(value) == None:
+ raise serializers.ValidationError("Owner does not have a valid billing address.")
+
+ return value
+
+ class Meta:
+ model = Order
+ read_only_fields = ['replaced_by', 'depends_on']
+ fields = ['uuid', 'owner', 'description', 'creation_date', 'starting_date', 'ending_date',
+ 'bill', 'recurring_period', 'recurring_price', 'one_time_price'] + read_only_fields
+
+
+###
+# Bills
+
+# TODO: remove magic numbers for decimal fields
+class BillRecordSerializer(serializers.Serializer):
+ order = serializers.HyperlinkedRelatedField(
+ view_name='order-detail',
+ read_only=True)
+ description = serializers.CharField()
+ one_time_price = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+ recurring_price = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+# recurring_period = serializers.ChoiceField()
+ recurring_count = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+ vat_rate = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+ vat_amount = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+ amount = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+ total = serializers.DecimalField(AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS)
+
+class BillingAddressSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = BillingAddress
+ fields = ['uuid', 'organization', 'name', 'street', 'city', 'postal_code', 'country', 'vat_number']
+
+class BillSerializer(serializers.ModelSerializer):
+ billing_address = BillingAddressSerializer(read_only=True)
+ records = BillRecordSerializer(many=True, read_only=True)
+
+ class Meta:
+ model = Bill
+ fields = ['uuid', 'reference', 'owner', 'amount', 'vat_amount', 'total',
+ 'due_date', 'creation_date', 'starting_date', 'ending_date',
+ 'records', 'final', 'billing_address']
+
+# We do not want users to mutate the country / VAT number of an address, as it
+# will change VAT on existing bills.
+class UpdateBillingAddressSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = BillingAddress
+ fields = ['uuid', 'street', 'city', 'postal_code']
diff --git a/uncloud_pay/stripe.py b/uncloud_pay/stripe.py
new file mode 100644
index 0000000..2ed4ef2
--- /dev/null
+++ b/uncloud_pay/stripe.py
@@ -0,0 +1,114 @@
+import stripe
+import stripe.error
+import logging
+
+from django.core.exceptions import ObjectDoesNotExist
+from django.conf import settings
+
+import uncloud_pay.models
+
+# Static stripe configuration used below.
+CURRENCY = 'chf'
+
+# README: We use the Payment Intent API as described on
+# https://stripe.com/docs/payments/save-and-reuse
+
+# For internal use only.
+stripe.api_key = settings.STRIPE_KEY
+
+# Helper (decorator) used to catch errors raised by stripe logic.
+# Catch errors that should not be displayed to the end user, raise again.
+def handle_stripe_error(f):
+ def handle_problems(*args, **kwargs):
+ response = {
+ 'paid': False,
+ 'response_object': None,
+ 'error': None
+ }
+
+ common_message = "Currently it is not possible to make payments. Please try agin later."
+ try:
+ response_object = f(*args, **kwargs)
+ return response_object
+ except stripe.error.CardError as e:
+ # Since it's a decline, stripe.error.CardError will be caught
+ body = e.json_body
+ logging.error(str(e))
+
+ raise e # For error handling.
+ except stripe.error.RateLimitError:
+ logging.error("Too many requests made to the API too quickly.")
+ raise Exception(common_message)
+ except stripe.error.InvalidRequestError as e:
+ logging.error(str(e))
+ raise Exception('Invalid parameters.')
+ except stripe.error.AuthenticationError as e:
+ # Authentication with Stripe's API failed
+ # (maybe you changed API keys recently)
+ logging.error(str(e))
+ raise Exception(common_message)
+ except stripe.error.APIConnectionError as e:
+ logging.error(str(e))
+ raise Exception(common_message)
+ except stripe.error.StripeError as e:
+ # XXX: maybe send email
+ logging.error(str(e))
+ raise Exception(common_message)
+ except Exception as e:
+ # maybe send email
+ logging.error(str(e))
+ raise Exception(common_message)
+
+ return handle_problems
+
+# Actual Stripe logic.
+
+def public_api_key():
+ return settings.STRIPE_PUBLIC_KEY
+
+def get_customer_id_for(user):
+ try:
+ # .get() raise if there is no matching entry.
+ return uncloud_pay.models.StripeCustomer.objects.get(owner=user).stripe_id
+ except ObjectDoesNotExist:
+ # No entry yet - making a new one.
+ try:
+ customer = create_customer(user.username, user.email)
+ uncloud_stripe_mapping = uncloud_pay.models.StripeCustomer.objects.create(
+ owner=user, stripe_id=customer.id)
+ return uncloud_stripe_mapping.stripe_id
+ except Exception as e:
+ return None
+
+@handle_stripe_error
+def create_setup_intent(customer_id):
+ return stripe.SetupIntent.create(customer=customer_id)
+
+@handle_stripe_error
+def get_setup_intent(setup_intent_id):
+ return stripe.SetupIntent.retrieve(setup_intent_id)
+
+def get_payment_method(payment_method_id):
+ return stripe.PaymentMethod.retrieve(payment_method_id)
+
+@handle_stripe_error
+def charge_customer(amount, customer_id, card_id):
+ # Amount is in CHF but stripes requires smallest possible unit.
+ # https://stripe.com/docs/api/payment_intents/create#create_payment_intent-amount
+ adjusted_amount = int(amount * 100)
+ return stripe.PaymentIntent.create(
+ amount=adjusted_amount,
+ currency=CURRENCY,
+ customer=customer_id,
+ payment_method=card_id,
+ off_session=True,
+ confirm=True,
+ )
+
+@handle_stripe_error
+def create_customer(name, email):
+ return stripe.Customer.create(name=name, email=email)
+
+@handle_stripe_error
+def get_customer(customer_id):
+ return stripe.Customer.retrieve(customer_id)
diff --git a/uncloud_pay/templates/bill.html.j2 b/uncloud_pay/templates/bill.html.j2
new file mode 100644
index 0000000..c227f43
--- /dev/null
+++ b/uncloud_pay/templates/bill.html.j2
@@ -0,0 +1,1061 @@
+{% load static %}
+
+
+
+
+
+
+
+
+ {{ bill }}
+
+
+
+
+
+
+
+
+

+
+
+
+
+ ungleich glarus ag
+
Bahnhofstrasse 1
+
8783 Linthal
+
Switzerland
+
+
+
+ {{ bill.billing_address.organization }}
+ {{ bill.billing_address.name }}
+ {{ bill.owner.email }}
+ {{ bill.billing_address.street }}
+ {{ bill.billing_address.country }} {{ bill.billing_address.postal_code }} {{ bill.billing_address.city }}
+
+
+
+
+ {{ bill.starting_date|date:"c" }} -
+ {{ bill.ending_date|date:"c" }}
+
Bill id: {{ bill }}
+
Due: {{ bill.due_date }}
+
+
+
+
+
+
Invoice
+
+
+
+
+ | Detail |
+ Price/Unit |
+ Units |
+ Total price |
+
+
+
+ {% for record in bill_records %}
+
+ | {{ record.starting_date|date:"c" }}
+ - {{ record.ending_date|date:"c" }}
+ {{ record.order }}
+ |
+ {{ record.price|floatformat:2 }} |
+ {{ record.quantity|floatformat:2 }} |
+ {{ record.sum|floatformat:2 }} |
+
+ {% endfor %}
+
+
+
+
+ Total (excl. VAT)
+ {{ bill.amount }}
+
+
+ VAT 7.7%
+ {{ bill.vat_amount|floatformat:2 }}
+
+
+
+
+ Total amount to be paid
+ {{ bill.sum|floatformat:2 }}
+
+
+
+
+
+
diff --git a/uncloud_pay/templates/error.html.j2 b/uncloud_pay/templates/error.html.j2
new file mode 100644
index 0000000..ba9209c
--- /dev/null
+++ b/uncloud_pay/templates/error.html.j2
@@ -0,0 +1,18 @@
+
+
+
+ Error
+
+
+
+
+
+
diff --git a/uncloud_pay/templates/stripe-payment.html.j2 b/uncloud_pay/templates/stripe-payment.html.j2
new file mode 100644
index 0000000..6c59740
--- /dev/null
+++ b/uncloud_pay/templates/stripe-payment.html.j2
@@ -0,0 +1,76 @@
+
+
+
+ Stripe Card Registration
+
+
+
+
+
+
+
+
+
+
Registering Stripe Credit Card
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/uncloud_pay/templates/uncloud_pay/stripe.html b/uncloud_pay/templates/uncloud_pay/stripe.html
new file mode 100644
index 0000000..3051bf0
--- /dev/null
+++ b/uncloud_pay/templates/uncloud_pay/stripe.html
@@ -0,0 +1,72 @@
+{% extends 'uncloud/base.html' %}
+
+{% block header %}
+
+
+{% endblock %}
+
+{% block body %}
+
+
Registering Stripe Credit Card
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{% endblock %}
diff --git a/uncloud_pay/tests.py b/uncloud_pay/tests.py
new file mode 100644
index 0000000..ca91cc9
--- /dev/null
+++ b/uncloud_pay/tests.py
@@ -0,0 +1,465 @@
+from django.test import TestCase
+from django.contrib.auth import get_user_model
+from datetime import datetime, date, timedelta
+from django.utils import timezone
+
+from .models import *
+from uncloud_service.models import GenericServiceProduct
+
+import json
+
+chocolate_product_config = {
+ 'features': {
+ 'gramm':
+ { 'min': 100,
+ 'max': 5000,
+ 'one_time_price_per_unit': 0.2,
+ 'recurring_price_per_unit': 0
+ },
+ },
+}
+
+chocolate_order_config = {
+ 'features': {
+ 'gramm': 500,
+ }
+}
+
+chocolate_one_time_price = chocolate_order_config['features']['gramm'] * chocolate_product_config['features']['gramm']['one_time_price_per_unit']
+
+vm_product_config = {
+ 'features': {
+ 'cores':
+ { 'min': 1,
+ 'max': 48,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 4
+ },
+ 'ram_gb':
+ { 'min': 1,
+ 'max': 256,
+ 'one_time_price_per_unit': 0,
+ 'recurring_price_per_unit': 4
+ },
+ },
+}
+
+vm_order_config = {
+ 'features': {
+ 'cores': 2,
+ 'ram_gb': 2
+ }
+}
+
+vm_order_downgrade_config = {
+ 'features': {
+ 'cores': 1,
+ 'ram_gb': 1
+ }
+}
+
+vm_order_upgrade_config = {
+ 'features': {
+ 'cores': 4,
+ 'ram_gb': 4
+ }
+}
+
+
+class ProductTestCase(TestCase):
+ """
+ Test products and products <-> order interaction
+ """
+
+ def setUp(self):
+ self.user = get_user_model().objects.create(
+ username='random_user',
+ email='jane.random@domain.tld')
+
+ self.ba = BillingAddress.objects.create(
+ owner=self.user,
+ organization = 'Test org',
+ street="unknown",
+ city="unknown",
+ postal_code="somewhere else",
+ active=True)
+
+ RecurringPeriod.populate_db_defaults()
+ self.default_recurring_period = RecurringPeriod.objects.get(name="Per 30 days")
+
+ def test_create_product(self):
+ """
+ Create a sample product
+ """
+
+ p = Product.objects.create(name="Testproduct",
+ description="Only for testing",
+ config=vm_product_config)
+
+ p.recurring_periods.add(self.default_recurring_period,
+ through_defaults= { 'is_default': True })
+
+
+class OrderTestCase(TestCase):
+ """
+ The heart of ordering products
+ """
+
+ def setUp(self):
+ self.user = get_user_model().objects.create(
+ username='random_user',
+ email='jane.random@domain.tld')
+
+ self.ba = BillingAddress.objects.create(
+ owner=self.user,
+ organization = 'Test org',
+ street="unknown",
+ city="unknown",
+ postal_code="somewhere else",
+ active=True)
+
+ self.product = Product.objects.create(name="Testproduct",
+ description="Only for testing",
+ config=vm_product_config)
+
+ RecurringPeriod.populate_db_defaults()
+ self.default_recurring_period = RecurringPeriod.objects.get(name="Per 30 days")
+
+ self.product.recurring_periods.add(self.default_recurring_period,
+ through_defaults= { 'is_default': True })
+
+
+ def test_order_invalid_recurring_period(self):
+ """
+ Order a products with a recurringperiod that is not added to the product
+ """
+
+ o = Order.objects.create(owner=self.user,
+ billing_address=self.ba,
+ product=self.product,
+ config=vm_order_config)
+
+
+ def test_order_product(self):
+ """
+ Order a product, ensure the order has correct price setup
+ """
+
+ o = Order.objects.create(owner=self.user,
+ billing_address=self.ba,
+ product=self.product)
+
+ self.assertEqual(o.one_time_price, 0)
+ self.assertEqual(o.recurring_price, 16)
+
+ def test_change_order(self):
+ """
+ Change an order and ensure that
+ - a new order is created
+ - the price is correct in the new order
+ """
+ order1 = Order.objects.create(owner=self.user,
+ billing_address=self.ba,
+ product=self.product,
+ config=vm_order_config)
+
+
+ self.assertEqual(order1.one_time_price, 0)
+ self.assertEqual(order1.recurring_price, 16)
+
+
+class ModifyOrderTestCase(TestCase):
+ """
+ Test typical order flows like
+ - cancelling
+ - downgrading
+ - upgrading
+ """
+
+ def setUp(self):
+ self.user = get_user_model().objects.create(
+ username='random_user',
+ email='jane.random@domain.tld')
+
+ self.ba = BillingAddress.objects.create(
+ owner=self.user,
+ organization = 'Test org',
+ street="unknown",
+ city="unknown",
+ postal_code="somewhere else",
+ active=True)
+
+ self.product = Product.objects.create(name="Testproduct",
+ description="Only for testing",
+ config=vm_product_config)
+
+ RecurringPeriod.populate_db_defaults()
+ self.default_recurring_period = RecurringPeriod.objects.get(name="Per 30 days")
+
+ self.product.recurring_periods.add(self.default_recurring_period,
+ through_defaults= { 'is_default': True })
+
+
+ def test_change_order(self):
+ """
+ Test changing an order
+
+ Expected result:
+
+ - Old order should be closed before new order starts
+ - New order should start at starting data
+ """
+
+ user = self.user
+
+ starting_price = 16
+ downgrade_price = 8
+
+ starting_date = timezone.make_aware(datetime.datetime(2019,3,3))
+ ending1_date = starting_date + datetime.timedelta(days=15)
+ change1_date = start_after(ending1_date)
+
+ bill_ending_date = change1_date + datetime.timedelta(days=1)
+
+
+ order1 = Order.objects.create(owner=self.user,
+ billing_address=BillingAddress.get_address_for(self.user),
+ product=self.product,
+ config=vm_order_config,
+ starting_date=starting_date)
+
+ order1.update_order(vm_order_downgrade_config, starting_date=change1_date)
+
+ bills = Bill.create_next_bills_for_user(user, ending_date=bill_ending_date)
+
+ bill = bills[0]
+ bill_records = BillRecord.objects.filter(bill=bill)
+
+ self.assertEqual(len(bill_records), 2)
+
+ self.assertEqual(bill_records[0].starting_date, starting_date)
+ self.assertEqual(bill_records[0].ending_date, ending1_date)
+
+ self.assertEqual(bill_records[1].starting_date, change1_date)
+
+
+
+ def test_downgrade_product(self):
+ """
+ Test downgrading behaviour:
+
+ We create a recurring product (recurring time: 30 days) and downgrade after 15 days.
+
+ We create the bill right AFTER the end of the first order.
+
+ Expected result:
+
+ - First bill record for 30 days
+ - Second bill record starting after 30 days
+ - Bill contains two bill records
+
+ """
+
+ user = self.user
+
+ starting_price = 16
+ downgrade_price = 8
+
+ starting_date = timezone.make_aware(datetime.datetime(2019,3,3))
+ first_order_should_end_at = starting_date + datetime.timedelta(days=30)
+ change1_date = start_after(starting_date + datetime.timedelta(days=15))
+ bill_ending_date = change1_date + datetime.timedelta(days=1)
+
+ order1 = Order.objects.create(owner=self.user,
+ billing_address=BillingAddress.get_address_for(self.user),
+ product=self.product,
+ config=vm_order_config,
+ starting_date=starting_date)
+
+ order1.update_order(vm_order_downgrade_config, starting_date=change1_date)
+
+ bills = Bill.create_next_bills_for_user(user, ending_date=bill_ending_date)
+
+ bill = bills[0]
+ bill_records = BillRecord.objects.filter(bill=bill)
+
+ self.assertEqual(len(bill_records), 2)
+
+ self.assertEqual(bill_records[0].starting_date, starting_date)
+ self.assertEqual(bill_records[0].order.ending_date, first_order_should_end_at)
+
+
+class BillTestCase(TestCase):
+ """
+ Test aspects of billing / creating a bill
+ """
+
+ def setUp(self):
+ RecurringPeriod.populate_db_defaults()
+
+ self.user_without_address = get_user_model().objects.create(
+ username='no_home_person',
+ email='far.away@domain.tld')
+
+ self.user = get_user_model().objects.create(
+ username='jdoe',
+ email='john.doe@domain.tld')
+
+ self.recurring_user = get_user_model().objects.create(
+ username='recurrent_product_user',
+ email='jane.doe@domain.tld')
+
+ self.user_addr = BillingAddress.objects.create(
+ owner=self.user,
+ organization = 'Test org',
+ street="unknown",
+ city="unknown",
+ postal_code="unknown",
+ active=True)
+
+ self.recurring_user_addr = BillingAddress.objects.create(
+ owner=self.recurring_user,
+ organization = 'Test org',
+ street="Somewhere",
+ city="Else",
+ postal_code="unknown",
+ active=True)
+
+ self.order_meta = {}
+ self.order_meta[1] = {
+ 'starting_date': timezone.make_aware(datetime.datetime(2020,3,3)),
+ 'ending_date': timezone.make_aware(datetime.datetime(2020,4,17)),
+ 'price': 15,
+ 'description': 'One chocolate bar'
+ }
+
+ self.chocolate = Product.objects.create(name="Swiss Chocolate",
+ description="Not only for testing, but for joy",
+ config=chocolate_product_config)
+
+
+ self.vm = Product.objects.create(name="Super Fast VM",
+ description="Zooooom",
+ config=vm_product_config)
+
+
+ RecurringPeriod.populate_db_defaults()
+ self.default_recurring_period = RecurringPeriod.objects.get(name="Per 30 days")
+
+ self.onetime_recurring_period = RecurringPeriod.objects.get(name="Onetime")
+
+ self.chocolate.recurring_periods.add(self.onetime_recurring_period,
+ through_defaults= { 'is_default': True })
+
+ self.vm.recurring_periods.add(self.default_recurring_period,
+ through_defaults= { 'is_default': True })
+
+
+ # used for generating multiple bills
+ self.bill_dates = [
+ timezone.make_aware(datetime.datetime(2020,3,31)),
+ timezone.make_aware(datetime.datetime(2020,4,30)),
+ timezone.make_aware(datetime.datetime(2020,5,31)),
+ ]
+
+
+ def order_chocolate(self):
+ return Order.objects.create(
+ owner=self.user,
+ recurring_period=RecurringPeriod.objects.get(name="Onetime"),
+ product=self.chocolate,
+ billing_address=BillingAddress.get_address_for(self.user),
+ starting_date=self.order_meta[1]['starting_date'],
+ ending_date=self.order_meta[1]['ending_date'],
+ config=chocolate_order_config)
+
+ def order_vm(self, owner=None):
+
+ if not owner:
+ owner = self.recurring_user
+
+ return Order.objects.create(
+ owner=owner,
+ product=self.vm,
+ config=vm_order_config,
+ billing_address=BillingAddress.get_address_for(self.recurring_user),
+ starting_date=timezone.make_aware(datetime.datetime(2020,3,3)),
+ )
+
+ return Order.objects.create(
+ owner=self.user,
+ recurring_period=RecurringPeriod.objects.get(name="Onetime"),
+ product=self.chocolate,
+ billing_address=BillingAddress.get_address_for(self.user),
+ starting_date=self.order_meta[1]['starting_date'],
+ ending_date=self.order_meta[1]['ending_date'],
+ config=chocolate_order_config)
+
+
+
+ def test_bill_one_time_one_bill_record(self):
+ """
+ Ensure there is only 1 bill record per order
+ """
+
+ order = self.order_chocolate()
+
+ bill = Bill.create_next_bill_for_user_address(self.user_addr)
+
+ self.assertEqual(order.billrecord_set.count(), 1)
+
+ def test_bill_sum_onetime(self):
+ """
+ Check the bill sum for a single one time order
+ """
+
+ order = self.order_chocolate()
+ bill = Bill.create_next_bill_for_user_address(self.user_addr)
+ self.assertEqual(bill.sum, chocolate_one_time_price)
+
+
+ def test_bill_creates_record_for_recurring_order(self):
+ """
+ Ensure there is only 1 bill record per order
+ """
+
+ order = self.order_vm()
+ bill = Bill.create_next_bill_for_user_address(self.recurring_user_addr)
+
+ self.assertEqual(order.billrecord_set.count(), 1)
+ self.assertEqual(bill.billrecord_set.count(), 1)
+
+
+ def test_new_bill_after_closing(self):
+ """
+ After closing a bill and the user has a recurring product,
+ the next bill run should create e new bill
+ """
+
+ order = self.order_vm()
+
+ for ending_date in self.bill_dates:
+ b = Bill.create_next_bill_for_user_address(self.recurring_user_addr, ending_date)
+ b.close()
+
+ bill_count = Bill.objects.filter(owner=self.recurring_user).count()
+
+ self.assertEqual(len(self.bill_dates), bill_count)
+
+
+
+class BillingAddressTestCase(TestCase):
+ def setUp(self):
+ self.user = get_user_model().objects.create(
+ username='random_user',
+ email='jane.random@domain.tld')
+
+
+ def test_user_no_address(self):
+ """
+ Raise an error, when there is no address
+ """
+
+ self.assertRaises(uncloud_pay.models.BillingAddress.DoesNotExist,
+ BillingAddress.get_address_for,
+ self.user)
diff --git a/uncloud_pay/views.py b/uncloud_pay/views.py
new file mode 100644
index 0000000..53d6ef4
--- /dev/null
+++ b/uncloud_pay/views.py
@@ -0,0 +1,398 @@
+from django.contrib.auth.mixins import LoginRequiredMixin
+from django.views.generic.base import TemplateView
+
+
+from django.shortcuts import render
+from django.db import transaction
+from django.contrib.auth import get_user_model
+from rest_framework import viewsets, mixins, permissions, status, views
+from rest_framework.renderers import TemplateHTMLRenderer
+from rest_framework.response import Response
+from rest_framework.decorators import action
+from rest_framework.reverse import reverse
+from rest_framework.decorators import renderer_classes
+from vat_validator import validate_vat, vies
+from vat_validator.countries import EU_COUNTRY_CODES
+from hardcopy import bytestring_to_pdf
+from django.core.files.temp import NamedTemporaryFile
+from django.http import FileResponse
+from django.template.loader import render_to_string
+from copy import deepcopy
+
+import json
+import logging
+
+from .models import *
+from .serializers import *
+from datetime import datetime
+from vat_validator import sanitize_vat
+import uncloud_pay.stripe as uncloud_stripe
+
+logger = logging.getLogger(__name__)
+
+###
+# Payments and Payment Methods.
+
+class PaymentViewSet(viewsets.ReadOnlyModelViewSet):
+ serializer_class = PaymentSerializer
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ return Payment.objects.filter(owner=self.request.user)
+
+class OrderViewSet(viewsets.ReadOnlyModelViewSet):
+ serializer_class = OrderSerializer
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ return Order.objects.filter(owner=self.request.user)
+
+
+class RegisterCard(LoginRequiredMixin, TemplateView):
+ login_url = '/login/'
+
+ # This is not supposed to be "static" --
+ # the idea is to be able to switch the provider when needed
+ template_name = "uncloud_pay/stripe.html"
+
+ def get_context_data(self, **kwargs):
+ customer_id = uncloud_stripe.get_customer_id_for(self.request.user)
+ setup_intent = uncloud_stripe.create_setup_intent(customer_id)
+
+ context = super().get_context_data(**kwargs)
+ context['client_secret'] = setup_intent.client_secret
+ context['username'] = self.request.user
+ context['stripe_pk'] = uncloud_stripe.public_api_key
+ return context
+
+
+class PaymentMethodViewSet(viewsets.ModelViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_serializer_class(self):
+ if self.action == 'create':
+ return CreatePaymentMethodSerializer
+ elif self.action == 'update':
+ return UpdatePaymentMethodSerializer
+ elif self.action == 'charge':
+ return ChargePaymentMethodSerializer
+ else:
+ return PaymentMethodSerializer
+
+ def get_queryset(self):
+ return PaymentMethod.objects.filter(owner=self.request.user)
+
+ # XXX: Handling of errors is far from great down there.
+ @transaction.atomic
+ def create(self, request):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+
+ # Set newly created method as primary if no other method is.
+ if PaymentMethod.get_primary_for(request.user) == None:
+ serializer.validated_data['primary'] = True
+
+ if serializer.validated_data['source'] == "stripe":
+ # Retrieve Stripe customer ID for user.
+ customer_id = uncloud_stripe.get_customer_id_for(request.user)
+ if customer_id == None:
+ return Response(
+ {'error': 'Could not resolve customer stripe ID.'},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ try:
+ setup_intent = uncloud_stripe.create_setup_intent(customer_id)
+ except Exception as e:
+ return Response({'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ payment_method = PaymentMethod.objects.create(
+ owner=request.user,
+ stripe_setup_intent_id=setup_intent.id,
+ **serializer.validated_data)
+
+ # TODO: find a way to use reverse properly:
+ # https://www.django-rest-framework.org/api-guide/reverse/
+ path = "payment-method/{}/register-stripe-cc".format(
+ payment_method.uuid)
+ stripe_registration_url = reverse('api-root', request=request) + path
+ return Response({'please_visit': stripe_registration_url})
+ else:
+ serializer.save(owner=request.user, **serializer.validated_data)
+ return Response(serializer.data)
+
+ @action(detail=True, methods=['post'])
+ def charge(self, request, pk=None):
+ payment_method = self.get_object()
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ amount = serializer.validated_data['amount']
+ try:
+ payment = payment_method.charge(amount)
+ output_serializer = PaymentSerializer(payment)
+ return Response(output_serializer.data)
+ except Exception as e:
+ return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ @action(detail=True, methods=['get'], url_path='register-stripe-cc', renderer_classes=[TemplateHTMLRenderer])
+ def register_stripe_cc(self, request, pk=None):
+ payment_method = self.get_object()
+
+ if payment_method.source != 'stripe':
+ return Response(
+ {'error': 'This is not a Stripe-based payment method.'},
+ template_name='error.html.j2')
+
+ if payment_method.active:
+ return Response(
+ {'error': 'This payment method is already active'},
+ template_name='error.html.j2')
+
+ try:
+ setup_intent = uncloud_stripe.get_setup_intent(
+ payment_method.stripe_setup_intent_id)
+ except Exception as e:
+ return Response(
+ {'error': str(e)},
+ template_name='error.html.j2')
+
+ # TODO: find a way to use reverse properly:
+ # https://www.django-rest-framework.org/api-guide/reverse/
+ callback_path= "payment-method/{}/activate-stripe-cc/".format(
+ payment_method.uuid)
+ callback = reverse('api-root', request=request) + callback_path
+
+ # Render stripe card registration form.
+ template_args = {
+ 'client_secret': setup_intent.client_secret,
+ 'stripe_pk': uncloud_stripe.public_api_key,
+ 'callback': callback
+ }
+ return Response(template_args, template_name='stripe-payment.html.j2')
+
+ @action(detail=True, methods=['post'], url_path='activate-stripe-cc')
+ def activate_stripe_cc(self, request, pk=None):
+ payment_method = self.get_object()
+ try:
+ setup_intent = uncloud_stripe.get_setup_intent(
+ payment_method.stripe_setup_intent_id)
+ except Exception as e:
+ return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ # Card had been registered, fetching payment method.
+ print(setup_intent)
+ if setup_intent.payment_method:
+ payment_method.stripe_payment_method_id = setup_intent.payment_method
+ payment_method.save()
+
+ return Response({
+ 'uuid': payment_method.uuid,
+ 'activated': payment_method.active})
+ else:
+ error = 'Could not fetch payment method from stripe. Please try again.'
+ return Response({'error': error})
+
+ @action(detail=True, methods=['post'], url_path='set-as-primary')
+ def set_as_primary(self, request, pk=None):
+ payment_method = self.get_object()
+ payment_method.set_as_primary_for(request.user)
+
+ serializer = self.get_serializer(payment_method)
+ return Response(serializer.data)
+
+###
+# Bills and Orders.
+
+class BillViewSet(viewsets.ReadOnlyModelViewSet):
+ serializer_class = BillSerializer
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ return Bill.objects.filter(owner=self.request.user)
+
+
+ @action(detail=False, methods=['get'])
+ def unpaid(self, request):
+ serializer = self.get_serializer(
+ Bill.get_unpaid_for(self.request.user),
+ many=True)
+ return Response(serializer.data)
+
+ @action(detail=True, methods=['get'])
+ def download(self, *args, **kwargs):
+ """
+ Allow to download
+ """
+ bill = self.get_object()
+ provider = UncloudProvider.get_provider()
+ output_file = NamedTemporaryFile()
+ bill_html = render_to_string("bill.html.j2", {'bill': bill})
+
+ bytestring_to_pdf(bill_html.encode('utf-8'), output_file)
+ response = FileResponse(output_file, content_type="application/pdf")
+ response['Content-Disposition'] = 'filename="{}_{}.pdf"'.format(
+ bill.reference, bill.uuid
+ )
+
+ return response
+
+
+class OrderViewSet(viewsets.ReadOnlyModelViewSet):
+ serializer_class = OrderSerializer
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ return Order.objects.filter(owner=self.request.user)
+
+class BillingAddressViewSet(mixins.CreateModelMixin,
+ mixins.RetrieveModelMixin,
+ mixins.UpdateModelMixin,
+ mixins.ListModelMixin,
+ viewsets.GenericViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_serializer_class(self):
+ if self.action == 'update':
+ return UpdateBillingAddressSerializer
+ else:
+ return BillingAddressSerializer
+
+ def get_queryset(self):
+ return self.request.user.billingaddress_set.all()
+
+ def create(self, request):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+
+ # Validate VAT numbers.
+ country = serializer.validated_data["country"]
+
+ # We ignore empty VAT numbers.
+ if 'vat_number' in serializer.validated_data and serializer.validated_data["vat_number"] != "":
+ vat_number = serializer.validated_data["vat_number"]
+
+ if not validate_vat(country, vat_number):
+ return Response(
+ {'error': 'Malformed VAT number.'},
+ status=status.HTTP_400_BAD_REQUEST)
+ elif country in EU_COUNTRY_CODES:
+ # XXX: make a synchroneous call to a third patry API here might not be a good idea..
+ try:
+ vies_state = vies.check_vat(country, vat_number)
+ if not vies_state.valid:
+ return Response(
+ {'error': 'European VAT number does not exist in VIES.'},
+ status=status.HTTP_400_BAD_REQUEST)
+ except Exception as e:
+ logger.warning(e)
+ return Response(
+ {'error': 'Could not validate EU VAT number against VIES. Try again later..'},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+
+ serializer.save(owner=request.user)
+ return Response(serializer.data)
+
+###
+# Admin stuff.
+
+class AdminPaymentViewSet(viewsets.ModelViewSet):
+ serializer_class = PaymentSerializer
+ permission_classes = [permissions.IsAdminUser]
+
+ def get_queryset(self):
+ return Payment.objects.all()
+
+ def create(self, request):
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ serializer.save(timestamp=datetime.now())
+
+ headers = self.get_success_headers(serializer.data)
+ return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
+
+# Bills are generated from orders and should not be created or updated by hand.
+class AdminBillViewSet(BillViewSet):
+ serializer_class = BillSerializer
+ permission_classes = [permissions.IsAdminUser]
+
+ def get_queryset(self):
+ return Bill.objects.all()
+
+ @action(detail=False, methods=['get'])
+ def unpaid(self, request):
+ unpaid_bills = []
+ # XXX: works but we can do better than number of users + 1 SQL requests...
+ for user in get_user_model().objects.all():
+ unpaid_bills = unpaid_bills + Bill.get_unpaid_for(self.request.user)
+
+ serializer = self.get_serializer(unpaid_bills, many=True)
+ return Response(serializer.data)
+
+ @action(detail=False, methods=['post'])
+ def generate(self, request):
+ users = get_user_model().objects.all()
+
+ generated_bills = []
+ for user in users:
+ now = timezone.now()
+ generated_bills = generated_bills + Bill.generate_for(
+ year=now.year,
+ month=now.month,
+ user=user)
+
+ return Response(
+ map(lambda b: b.reference, generated_bills),
+ status=status.HTTP_200_OK)
+
+class AdminOrderViewSet(mixins.ListModelMixin,
+ mixins.RetrieveModelMixin,
+ mixins.CreateModelMixin,
+ mixins.UpdateModelMixin,
+ viewsets.GenericViewSet):
+ serializer_class = OrderSerializer
+ permission_classes = [permissions.IsAdminUser]
+
+ def get_serializer(self, *args, **kwargs):
+ return self.serializer_class(*args, **kwargs, admin=True)
+
+ def get_queryset(self):
+ return Order.objects.all()
+
+ # Updates create a new order and terminate the 'old' one.
+ @transaction.atomic
+ def update(self, request, *args, **kwargs):
+ order = self.get_object()
+ partial = kwargs.pop('partial', False)
+ serializer = self.get_serializer(order, data=request.data, partial=partial)
+ serializer.is_valid(raise_exception=True)
+
+ # Clone existing order for replacement.
+ replacing_order = deepcopy(order)
+
+ # Yes, that's how you make a new entry in DB:
+ # https://docs.djangoproject.com/en/3.0/topics/db/queries/#copying-model-instances
+ replacing_order.pk = None
+
+ for attr, value in serializer.validated_data.items():
+ setattr(replacing_order, attr, value)
+
+ # Save replacing order and terminate 'previous' one.
+ replacing_order.save()
+ order.replaced_by = replacing_order
+ order.save()
+ order.terminate()
+
+ return Response(replacing_order)
+
+ @action(detail=True, methods=['post'])
+ def terminate(self, request, pk):
+ order = self.get_object()
+ if order.is_terminated:
+ return Response(
+ {'error': 'Order is already terminated.'},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ else:
+ order.terminate()
+ return Response({}, status=status.HTTP_200_OK)
diff --git a/uncloud_service/__init__.py b/uncloud_service/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud/uncloud_net/admin.py b/uncloud_service/admin.py
similarity index 100%
rename from uncloud/uncloud_net/admin.py
rename to uncloud_service/admin.py
diff --git a/uncloud/ungleich_service/apps.py b/uncloud_service/apps.py
similarity index 71%
rename from uncloud/ungleich_service/apps.py
rename to uncloud_service/apps.py
index 184e181..190bd35 100644
--- a/uncloud/ungleich_service/apps.py
+++ b/uncloud_service/apps.py
@@ -2,4 +2,4 @@ from django.apps import AppConfig
class UngleichServiceConfig(AppConfig):
- name = 'ungleich_service'
+ name = 'uncloud_service'
diff --git a/uncloud_service/models.py b/uncloud_service/models.py
new file mode 100644
index 0000000..a37e42b
--- /dev/null
+++ b/uncloud_service/models.py
@@ -0,0 +1,63 @@
+from django.db import models
+from uncloud_pay.models import Product, RecurringPeriod, AMOUNT_MAX_DIGITS, AMOUNT_DECIMALS
+from uncloud_vm.models import VMProduct, VMDiskImageProduct
+from django.core.validators import MinValueValidator
+
+class MatrixServiceProduct(models.Model):
+ monthly_managment_fee = 20
+
+ description = "Managed Matrix HomeServer"
+
+ # Specific to Matrix-as-a-Service
+ vm = models.ForeignKey(
+ VMProduct, on_delete=models.CASCADE
+ )
+ domain = models.CharField(max_length=255, default='domain.tld')
+
+ # Default recurring price is PER_MONT, see Product class.
+ # def recurring_price(self, recurring_period=RecurringPeriod.PER_30D):
+ # return self.monthly_managment_fee
+
+ @staticmethod
+ def base_image():
+ # TODO: find a way to safely reference debian 10 image.
+#e return VMDiskImageProduct.objects.get(uuid="93e564c5-adb3-4741-941f-718f76075f02")
+ return False
+
+ # @staticmethod
+ # def allowed_recurring_periods():
+ # return list(filter(
+ # lambda pair: pair[0] in [RecurringPeriod.PER_30D],
+ # RecurringPeriod.choices))
+
+ @property
+ def one_time_price(self):
+ return 30
+
+class GenericServiceProduct(models.Model):
+ custom_description = models.TextField()
+ custom_recurring_price = models.DecimalField(default=0.0,
+ max_digits=AMOUNT_MAX_DIGITS,
+ decimal_places=AMOUNT_DECIMALS,
+ validators=[MinValueValidator(0)])
+ custom_one_time_price = models.DecimalField(default=0.0,
+ max_digits=AMOUNT_MAX_DIGITS,
+ decimal_places=AMOUNT_DECIMALS,
+ validators=[MinValueValidator(0)])
+
+ @property
+ def recurring_price(self):
+ # FIXME: handle recurring_period somehow.
+ return self.custom_recurring_price
+
+ @property
+ def description(self):
+ return self.custom_description
+
+ @property
+ def one_time_price(self):
+ return self.custom_one_time_price
+
+ @staticmethod
+ def allowed_recurring_periods():
+ return RecurringPeriod.choices
diff --git a/uncloud_service/serializers.py b/uncloud_service/serializers.py
new file mode 100644
index 0000000..bc6d753
--- /dev/null
+++ b/uncloud_service/serializers.py
@@ -0,0 +1,60 @@
+from rest_framework import serializers
+from .models import *
+from uncloud_vm.serializers import ManagedVMProductSerializer
+from uncloud_vm.models import VMProduct
+from uncloud_pay.models import RecurringPeriod, BillingAddress
+
+# XXX: the OrderSomethingSomthingProductSerializer classes add a lot of
+# boilerplate: can we reduce it somehow?
+
+class MatrixServiceProductSerializer(serializers.ModelSerializer):
+ vm = ManagedVMProductSerializer()
+
+ class Meta:
+ model = MatrixServiceProduct
+ fields = ['order', 'owner', 'status', 'vm', 'domain',
+ 'recurring_period']
+ read_only_fields = ['order', 'owner', 'status']
+
+class OrderMatrixServiceProductSerializer(MatrixServiceProductSerializer):
+ # recurring_period = serializers.ChoiceField(
+ # choices=MatrixServiceProduct.allowed_recurring_periods())
+
+ def __init__(self, *args, **kwargs):
+ super(OrderMatrixServiceProductSerializer, self).__init__(*args, **kwargs)
+ self.fields['billing_address'] = serializers.ChoiceField(
+ choices=BillingAddress.get_addresses_for(
+ self.context['request'].user)
+ )
+
+ class Meta:
+ model = MatrixServiceProductSerializer.Meta.model
+ fields = MatrixServiceProductSerializer.Meta.fields + [
+ 'recurring_period', 'billing_address'
+ ]
+ read_only_fields = MatrixServiceProductSerializer.Meta.read_only_fields
+
+class GenericServiceProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = GenericServiceProduct
+ fields = ['order', 'owner', 'status', 'custom_recurring_price',
+ 'custom_description', 'custom_one_time_price']
+ read_only_fields = [ 'owner', 'status']
+
+class OrderGenericServiceProductSerializer(GenericServiceProductSerializer):
+ # recurring_period = serializers.ChoiceField(
+ # choices=GenericServiceProduct.allowed_recurring_periods())
+
+ def __init__(self, *args, **kwargs):
+ super(OrderGenericServiceProductSerializer, self).__init__(*args, **kwargs)
+ self.fields['billing_address'] = serializers.ChoiceField(
+ choices=BillingAddress.get_addresses_for(
+ self.context['request'].user)
+ )
+
+ class Meta:
+ model = GenericServiceProductSerializer.Meta.model
+ fields = GenericServiceProductSerializer.Meta.fields + [
+ 'recurring_period', 'billing_address'
+ ]
+ read_only_fields = GenericServiceProductSerializer.Meta.read_only_fields
diff --git a/uncloud/uncloud_net/tests.py b/uncloud_service/tests.py
similarity index 100%
rename from uncloud/uncloud_net/tests.py
rename to uncloud_service/tests.py
diff --git a/uncloud_service/views.py b/uncloud_service/views.py
new file mode 100644
index 0000000..abd4a05
--- /dev/null
+++ b/uncloud_service/views.py
@@ -0,0 +1,128 @@
+from rest_framework import viewsets, permissions
+from rest_framework.response import Response
+from django.db import transaction
+from django.utils import timezone
+
+from .models import *
+from .serializers import *
+
+from uncloud_pay.helpers import ProductViewSet
+from uncloud_pay.models import Order
+from uncloud_vm.models import VMProduct, VMDiskProduct
+
+def create_managed_vm(cores, ram, disk_size, image, order):
+ # Create VM
+ disk = VMDiskProduct(
+ owner=order.owner,
+ order=order,
+ size_in_gb=disk_size,
+ image=image)
+ vm = VMProduct(
+ name="Managed Service Host",
+ owner=order.owner,
+ cores=cores,
+ ram_in_gb=ram,
+ primary_disk=disk)
+ disk.vm = vm
+
+ vm.save()
+ disk.save()
+
+ return vm
+
+
+class MatrixServiceProductViewSet(ProductViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+ serializer_class = MatrixServiceProductSerializer
+
+ def get_queryset(self):
+ return MatrixServiceProduct.objects.filter(owner=self.request.user)
+
+ def get_serializer_class(self):
+ if self.action == 'create':
+ return OrderMatrixServiceProductSerializer
+ else:
+ return MatrixServiceProductSerializer
+
+ @transaction.atomic
+ def create(self, request):
+ # Extract serializer data.
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ order_recurring_period = serializer.validated_data.pop("recurring_period")
+ order_billing_address = serializer.validated_data.pop("billing_address")
+
+ # Create base order.)
+ order = Order.objects.create(
+ recurring_period=order_recurring_period,
+ owner=request.user,
+ billing_address=order_billing_address,
+ starting_date=timezone.now()
+ )
+ order.save()
+
+ # Create unerderlying VM.
+ data = serializer.validated_data.pop('vm')
+ vm = create_managed_vm(
+ order=order,
+ cores=data['cores'],
+ ram=data['ram_in_gb'],
+ disk_size=data['primary_disk']['size_in_gb'],
+ image=MatrixServiceProduct.base_image())
+
+ # Create service.
+ service = serializer.save(
+ order=order,
+ owner=request.user,
+ vm=vm)
+
+ return Response(serializer.data)
+
+class GenericServiceProductViewSet(ProductViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ return GenericServiceProduct.objects.filter(owner=self.request.user)
+
+ def get_serializer_class(self):
+ if self.action == 'create':
+ return OrderGenericServiceProductSerializer
+ else:
+ return GenericServiceProductSerializer
+
+ @transaction.atomic
+ def create(self, request):
+ # Extract serializer data.
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ order_recurring_period = serializer.validated_data.pop("recurring_period")
+ order_billing_address = serializer.validated_data.pop("billing_address")
+
+ # Create base order.
+ order = Order.objects.create(
+ recurring_period=order_recurring_period,
+ owner=request.user,
+ billing_address=order_billing_address,
+ starting_date=timezone.now()
+ )
+ order.save()
+
+ # Create service.
+ print(serializer.validated_data)
+ service = serializer.save(order=order, owner=request.user)
+
+ # XXX: Move this to some kind of on_create hook in parent
+ # Product class?
+ order.add_record(
+ service.one_time_price,
+ service.recurring_price,
+ service.description)
+
+ # XXX: Move this to some kind of on_create hook in parent
+ # Product class?
+ order.add_record(
+ service.one_time_price,
+ service.recurring_price,
+ service.description)
+
+ return Response(serializer.data)
diff --git a/uncloud_storage/__init__.py b/uncloud_storage/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud/uncloud_pay/admin.py b/uncloud_storage/admin.py
similarity index 100%
rename from uncloud/uncloud_pay/admin.py
rename to uncloud_storage/admin.py
diff --git a/uncloud/uncloud_storage/apps.py b/uncloud_storage/apps.py
similarity index 100%
rename from uncloud/uncloud_storage/apps.py
rename to uncloud_storage/apps.py
diff --git a/uncloud/uncloud_storage/models.py b/uncloud_storage/models.py
similarity index 100%
rename from uncloud/uncloud_storage/models.py
rename to uncloud_storage/models.py
diff --git a/uncloud/uncloud_pay/tests.py b/uncloud_storage/tests.py
similarity index 100%
rename from uncloud/uncloud_pay/tests.py
rename to uncloud_storage/tests.py
diff --git a/uncloud/uncloud_net/views.py b/uncloud_storage/views.py
similarity index 100%
rename from uncloud/uncloud_net/views.py
rename to uncloud_storage/views.py
diff --git a/uncloud_vm/__init__.py b/uncloud_vm/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud_vm/admin.py b/uncloud_vm/admin.py
new file mode 100644
index 0000000..6f3bc50
--- /dev/null
+++ b/uncloud_vm/admin.py
@@ -0,0 +1,19 @@
+from django.contrib import admin
+
+# Register your models here.
+from uncloud_vm.models import *
+from uncloud_pay.models import Order
+
+class VMDiskInline(admin.TabularInline):
+ model = VMDiskProduct
+
+class OrderInline(admin.TabularInline):
+ model = Order
+
+class VMProductAdmin(admin.ModelAdmin):
+ inlines = [
+ VMDiskInline
+ ]
+
+admin.site.register(VMProduct, VMProductAdmin)
+admin.site.register(VMDiskProduct)
diff --git a/uncloud/uncloud_vm/apps.py b/uncloud_vm/apps.py
similarity index 100%
rename from uncloud/uncloud_vm/apps.py
rename to uncloud_vm/apps.py
diff --git a/uncloud_vm/management/commands/vm.py b/uncloud_vm/management/commands/vm.py
new file mode 100644
index 0000000..667c5ad
--- /dev/null
+++ b/uncloud_vm/management/commands/vm.py
@@ -0,0 +1,119 @@
+import json
+
+import uncloud.secrets as secrets
+
+from django.core.management.base import BaseCommand
+from django.contrib.auth import get_user_model
+
+from uncloud_vm.models import VMSnapshotProduct, VMProduct, VMHost
+from datetime import datetime
+
+class Command(BaseCommand):
+ help = 'Select VM Host for VMs'
+
+ def add_arguments(self, parser):
+ parser.add_argument('--this-hostname', required=True)
+ parser.add_argument('--this-cluster', required=True)
+
+ parser.add_argument('--create-vm-snapshots', action='store_true')
+ parser.add_argument('--schedule-vms', action='store_true')
+ parser.add_argument('--start-vms', action='store_true')
+
+
+ def handle(self, *args, **options):
+ for cmd in [ 'create_vm_snapshots', 'schedule_vms', 'start_vms' ]:
+ if options[cmd]:
+ f = getattr(self, cmd)
+ f(args, options)
+
+ def schedule_vms(self, *args, **options):
+ for pending_vm in VMProduct.objects.filter(status='PENDING'):
+ cores_needed = pending_vm.cores
+ ram_needed = pending_vm.ram_in_gb
+
+ # Database filtering
+ possible_vmhosts = VMHost.objects.filter(physical_cores__gte=cores_needed)
+
+ # Logical filtering
+ possible_vmhosts = [ vmhost for vmhost in possible_vmhosts
+ if vmhost.available_cores >=cores_needed
+ and vmhost.available_ram_in_gb >= ram_needed ]
+
+ if not possible_vmhosts:
+ log.error("No suitable Host found - cannot schedule VM {}".format(pending_vm))
+ continue
+
+ vmhost = possible_vmhosts[0]
+ pending_vm.vmhost = vmhost
+ pending_vm.status = 'SCHEDULED'
+ pending_vm.save()
+
+ print("Scheduled VM {} on VMHOST {}".format(pending_vm, pending_vm.vmhost))
+
+ print(self)
+
+ def start_vms(self, *args, **options):
+ vmhost = VMHost.objects.get(hostname=options['this_hostname'])
+
+ if not vmhost:
+ raise Exception("No vmhost {} exists".format(options['vmhostname']))
+
+ # not active? done here
+ if not vmhost.status = 'ACTIVE':
+ return
+
+ vms_to_start = VMProduct.objects.filter(vmhost=vmhost,
+ status='SCHEDULED')
+ for vm in vms_to_start:
+ """ run qemu:
+ check if VM is not already active / qemu running
+ prepare / create the Qemu arguments
+ """
+ print("Starting VM {}".format(VM))
+
+ def check_vms(self, *args, **options):
+ """
+ Check if all VMs that are supposed to run are running
+ """
+
+ def modify_vms(self, *args, **options):
+ """
+ Check all VMs that are requested to be modified and restart them
+ """
+
+ def create_vm_snapshots(self, *args, **options):
+ this_cluster = VMCluster(option['this_cluster'])
+
+ for snapshot in VMSnapshotProduct.objects.filter(status='PENDING',
+ cluster=this_cluster):
+ if not snapshot.extra_data:
+ snapshot.extra_data = {}
+
+ # TODO: implement locking here
+ if 'creating_hostname' in snapshot.extra_data:
+ pass
+
+ snapshot.extra_data['creating_hostname'] = options['this_hostname']
+ snapshot.extra_data['creating_start'] = str(datetime.now())
+ snapshot.save()
+
+ # something on the line of:
+ # for disk im vm.disks:
+ # rbd snap create pool/image-name@snapshot name
+ # snapshot.extra_data['snapshots']
+ # register the snapshot names in extra_data (?)
+
+ print(snapshot)
+
+ def check_health(self, *args, **options):
+ pending_vms = VMProduct.objects.filter(status='PENDING')
+ vmhosts = VMHost.objects.filter(status='active')
+
+ # 1. Check that all active hosts reported back N seconds ago
+ # 2. Check that no VM is running on a dead host
+ # 3. Migrate VMs if necessary
+ # 4. Check that no VMs have been pending for longer than Y seconds
+
+ # If VM snapshots exist without a VM -> notify user (?)
+
+ print("Nothing is good, you should implement me")
diff --git a/uncloud/uncloud_vm/migrations/0001_initial.py b/uncloud_vm/migrations/0001_initial.py
similarity index 53%
rename from uncloud/uncloud_vm/migrations/0001_initial.py
rename to uncloud_vm/migrations/0001_initial.py
index f9f40d8..4ec089a 100644
--- a/uncloud/uncloud_vm/migrations/0001_initial.py
+++ b/uncloud_vm/migrations/0001_initial.py
@@ -1,9 +1,8 @@
-# Generated by Django 3.0.3 on 2020-03-05 10:34
+# Generated by Django 3.1 on 2020-12-13 10:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
-import uuid
class Migration(migrations.Migration):
@@ -11,80 +10,76 @@ class Migration(migrations.Migration):
initial = True
dependencies = [
- ('uncloud_pay', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
+ migrations.CreateModel(
+ name='VMCluster',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('extra_data', models.JSONField(blank=True, editable=False, null=True)),
+ ('name', models.CharField(max_length=128, unique=True)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
migrations.CreateModel(
name='VMDiskImageProduct',
fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('extra_data', models.JSONField(blank=True, editable=False, null=True)),
('name', models.CharField(max_length=256)),
('is_os_image', models.BooleanField(default=False)),
- ('is_public', models.BooleanField(default=False)),
+ ('is_public', models.BooleanField(default=False, editable=False)),
('size_in_gb', models.FloatField(blank=True, null=True)),
('import_url', models.URLField(blank=True, null=True)),
('image_source', models.CharField(max_length=128, null=True)),
('image_source_type', models.CharField(max_length=128, null=True)),
- ('storage_class', models.CharField(choices=[('hdd', 'HDD'), ('ssd', 'SSD')], default='ssd', max_length=32)),
- ('status', models.CharField(choices=[('pending', 'Pending'), ('creating', 'Creating'), ('active', 'Active'), ('disabled', 'Disabled'), ('unusable', 'Unusable'), ('deleted', 'Deleted')], default='pending', max_length=32)),
+ ('storage_class', models.CharField(choices=[('HDD', 'HDD'), ('SSD', 'SSD')], default='SSD', max_length=32)),
+ ('status', models.CharField(choices=[('PENDING', 'Pending'), ('AWAITING_PAYMENT', 'Awaiting payment'), ('BEING_CREATED', 'Being created'), ('SCHEDULED', 'Scheduled'), ('ACTIVE', 'Active'), ('MODIFYING', 'Modifying'), ('DELETED', 'Deleted'), ('DISABLED', 'Disabled'), ('UNUSABLE', 'Unusable')], default='PENDING', max_length=32)),
('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
+ options={
+ 'abstract': False,
+ },
),
migrations.CreateModel(
name='VMHost',
fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('extra_data', models.JSONField(blank=True, editable=False, null=True)),
('hostname', models.CharField(max_length=253, unique=True)),
('physical_cores', models.IntegerField(default=0)),
('usable_cores', models.IntegerField(default=0)),
('usable_ram_in_gb', models.FloatField(default=0)),
- ('status', models.CharField(choices=[('pending', 'Pending'), ('creating', 'Creating'), ('active', 'Active'), ('disabled', 'Disabled'), ('unusable', 'Unusable'), ('deleted', 'Deleted')], default='pending', max_length=32)),
- ('vms', models.TextField(default='')),
+ ('status', models.CharField(choices=[('PENDING', 'Pending'), ('AWAITING_PAYMENT', 'Awaiting payment'), ('BEING_CREATED', 'Being created'), ('SCHEDULED', 'Scheduled'), ('ACTIVE', 'Active'), ('MODIFYING', 'Modifying'), ('DELETED', 'Deleted'), ('DISABLED', 'Disabled'), ('UNUSABLE', 'Unusable')], default='PENDING', max_length=32)),
+ ('vmcluster', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmcluster')),
],
+ options={
+ 'abstract': False,
+ },
),
migrations.CreateModel(
name='VMProduct',
fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('status', models.CharField(choices=[('PENDING', 'Pending'), ('AWAITING_PAYMENT', 'Awaiting payment'), ('BEING_CREATED', 'Being created'), ('ACTIVE', 'Active'), ('DELETED', 'Deleted')], default='PENDING', max_length=32)),
- ('name', models.CharField(max_length=32)),
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('name', models.CharField(blank=True, max_length=32, null=True)),
('cores', models.IntegerField()),
('ram_in_gb', models.FloatField()),
- ('vmid', models.IntegerField(null=True)),
- ('order', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.Order')),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ('vmhost', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMHost')),
+ ('vmcluster', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmcluster')),
+ ('vmhost', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmhost')),
],
- options={
- 'abstract': False,
- },
- ),
- migrations.CreateModel(
- name='VMWithOSProduct',
- fields=[
- ('vmproduct_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='uncloud_vm.VMProduct')),
- ],
- options={
- 'abstract': False,
- },
- bases=('uncloud_vm.vmproduct',),
),
migrations.CreateModel(
name='VMSnapshotProduct',
fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('status', models.CharField(choices=[('PENDING', 'Pending'), ('AWAITING_PAYMENT', 'Awaiting payment'), ('BEING_CREATED', 'Being created'), ('ACTIVE', 'Active'), ('DELETED', 'Deleted')], default='PENDING', max_length=32)),
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gb_ssd', models.FloatField(editable=False)),
('gb_hdd', models.FloatField(editable=False)),
- ('order', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.Order')),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMProduct')),
+ ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snapshots', to='uncloud_vm.vmproduct')),
],
- options={
- 'abstract': False,
- },
),
migrations.CreateModel(
name='VMNetworkCard',
@@ -92,17 +87,25 @@ class Migration(migrations.Migration):
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mac_address', models.BigIntegerField()),
('ip_address', models.GenericIPAddressField(blank=True, null=True)),
- ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMProduct')),
+ ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmproduct')),
],
),
migrations.CreateModel(
name='VMDiskProduct',
fields=[
- ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size_in_gb', models.FloatField(blank=True)),
- ('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMDiskImageProduct')),
- ('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
- ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.VMProduct')),
+ ('disk_type', models.CharField(choices=[('ceph/ssd', 'Ceph Ssd'), ('ceph/hdd', 'Ceph Hdd'), ('local/ssd', 'Local Ssd'), ('local/hdd', 'Local Hdd')], default='ceph/ssd', max_length=20)),
+ ('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmdiskimageproduct')),
+ ('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmproduct')),
],
),
+ migrations.CreateModel(
+ name='VMWithOSProduct',
+ fields=[
+ ('vmproduct_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='uncloud_vm.vmproduct')),
+ ('primary_disk', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='uncloud_vm.vmdiskproduct')),
+ ],
+ bases=('uncloud_vm.vmproduct',),
+ ),
]
diff --git a/uncloud_vm/migrations/0002_vmproduct_owner.py b/uncloud_vm/migrations/0002_vmproduct_owner.py
new file mode 100644
index 0000000..3b96a87
--- /dev/null
+++ b/uncloud_vm/migrations/0002_vmproduct_owner.py
@@ -0,0 +1,21 @@
+# Generated by Django 3.1.4 on 2021-04-14 10:40
+
+from django.conf import settings
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ('uncloud_vm', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='vmproduct',
+ name='owner',
+ field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
+ ),
+ ]
diff --git a/uncloud_vm/migrations/0003_vmproduct_created_order_at.py b/uncloud_vm/migrations/0003_vmproduct_created_order_at.py
new file mode 100644
index 0000000..8f5d0c4
--- /dev/null
+++ b/uncloud_vm/migrations/0003_vmproduct_created_order_at.py
@@ -0,0 +1,20 @@
+# Generated by Django 3.1.4 on 2021-04-14 10:46
+
+import datetime
+from django.db import migrations, models
+from django.utils.timezone import utc
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_vm', '0002_vmproduct_owner'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='vmproduct',
+ name='created_order_at',
+ field=models.DateTimeField(default=datetime.datetime(2021, 4, 14, 10, 46, 14, 96330, tzinfo=utc)),
+ ),
+ ]
diff --git a/uncloud_vm/migrations/0004_auto_20210414_1048.py b/uncloud_vm/migrations/0004_auto_20210414_1048.py
new file mode 100644
index 0000000..20214bc
--- /dev/null
+++ b/uncloud_vm/migrations/0004_auto_20210414_1048.py
@@ -0,0 +1,24 @@
+# Generated by Django 3.1.4 on 2021-04-14 10:48
+
+import datetime
+from django.db import migrations, models
+from django.utils.timezone import utc
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_vm', '0003_vmproduct_created_order_at'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='vmproduct',
+ name='created_order_at',
+ ),
+ migrations.AddField(
+ model_name='vmproduct',
+ name='create_order_at',
+ field=models.DateTimeField(default=datetime.datetime(2021, 4, 14, 10, 48, 6, 641056, tzinfo=utc)),
+ ),
+ ]
diff --git a/uncloud_vm/migrations/0005_auto_20210414_1119.py b/uncloud_vm/migrations/0005_auto_20210414_1119.py
new file mode 100644
index 0000000..ef9df79
--- /dev/null
+++ b/uncloud_vm/migrations/0005_auto_20210414_1119.py
@@ -0,0 +1,24 @@
+# Generated by Django 3.1.4 on 2021-04-14 11:19
+
+import datetime
+from django.db import migrations, models
+from django.utils.timezone import utc
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_vm', '0004_auto_20210414_1048'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='vmproduct',
+ name='create_order_at',
+ ),
+ migrations.AddField(
+ model_name='vmproduct',
+ name='created_order_at',
+ field=models.DateTimeField(default=datetime.datetime(2021, 4, 14, 11, 19, 39, 447274, tzinfo=utc)),
+ ),
+ ]
diff --git a/uncloud_vm/migrations/0006_auto_20210414_1122.py b/uncloud_vm/migrations/0006_auto_20210414_1122.py
new file mode 100644
index 0000000..2c302fb
--- /dev/null
+++ b/uncloud_vm/migrations/0006_auto_20210414_1122.py
@@ -0,0 +1,20 @@
+# Generated by Django 3.1.4 on 2021-04-14 11:22
+
+import datetime
+from django.db import migrations, models
+from django.utils.timezone import utc
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('uncloud_vm', '0005_auto_20210414_1119'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='vmproduct',
+ name='created_order_at',
+ field=models.DateTimeField(default=datetime.datetime(2021, 4, 14, 11, 22, 11, 352536, tzinfo=utc)),
+ ),
+ ]
diff --git a/uncloud_vm/migrations/__init__.py b/uncloud_vm/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/uncloud/uncloud_vm/models.py b/uncloud_vm/models.py
similarity index 53%
rename from uncloud/uncloud_vm/models.py
rename to uncloud_vm/models.py
index 57b54cf..952cde9 100644
--- a/uncloud/uncloud_vm/models.py
+++ b/uncloud_vm/models.py
@@ -1,33 +1,26 @@
-import uuid
+import datetime
+from django.utils import timezone
from django.db import models
from django.contrib.auth import get_user_model
-# Uncomment if you override model's clean method
-# from django.core.exceptions import ValidationError
-
from uncloud_pay.models import Product, RecurringPeriod
+from uncloud.models import UncloudModel, UncloudStatus
+
import uncloud_pay.models as pay_models
import uncloud_storage.models
-STATUS_CHOICES = (
- ('pending', 'Pending'), # Initial state
- ('creating', 'Creating'), # Creating VM/image/etc.
- ('active', 'Active'), # Is usable / active
- ('disabled', 'Disabled'), # Is usable, but cannot be used for new things
- ('unusable', 'Unusable'), # Has some kind of error
- ('deleted', 'Deleted'), # Does not exist anymore, only DB entry as a log
-)
-
-STATUS_DEFAULT = 'pending'
-
-
-class VMHost(models.Model):
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
+class VMCluster(UncloudModel):
+ name = models.CharField(max_length=128, unique=True)
+class VMHost(UncloudModel):
# 253 is the maximum DNS name length
hostname = models.CharField(max_length=253, unique=True)
+ vmcluster = models.ForeignKey(
+ VMCluster, on_delete=models.CASCADE, editable=False, blank=True, null=True
+ )
+
# indirectly gives a maximum number of cores / VM - f.i. 32
physical_cores = models.IntegerField(default=0)
@@ -38,7 +31,7 @@ class VMHost(models.Model):
usable_ram_in_gb = models.FloatField(default=0)
status = models.CharField(
- max_length=32, choices=STATUS_CHOICES, default=STATUS_DEFAULT
+ max_length=32, choices=UncloudStatus.choices, default=UncloudStatus.PENDING
)
@property
@@ -51,55 +44,61 @@ class VMHost(models.Model):
@property
def available_ram_in_gb(self):
- return self.usable_ram_in_gb - sum([vm.ram_in_gb for vm in self.vms ])
+ return self.usable_ram_in_gb - self.used_ram_in_gb
@property
def available_cores(self):
return self.usable_cores - sum([vm.cores for vm in self.vms ])
-class VMProduct(Product):
+
+class VMProduct(models.Model):
+ owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE,
+ blank=True, null=True)
vmhost = models.ForeignKey(
VMHost, on_delete=models.CASCADE, editable=False, blank=True, null=True
)
- # VM-specific. The name is only intended for customers: it's a pain to
- # remember IDs (speaking from experience as ungleich customer)!
+ vmcluster = models.ForeignKey(
+ VMCluster, on_delete=models.CASCADE, editable=False, blank=True, null=True
+ )
+
name = models.CharField(max_length=32, blank=True, null=True)
cores = models.IntegerField()
ram_in_gb = models.FloatField()
+ created_order_at = models.DateTimeField(default=timezone.make_aware(datetime.datetime.now()))
+ @property
+ def recurring_price(self):
+ return self.cores * 3 + self.ram_in_gb * 4
- def recurring_price(self, recurring_period=RecurringPeriod.PER_MONTH):
- # TODO: move magic numbers in variables
- if recurring_period == RecurringPeriod.PER_MONTH:
- return self.cores * 3 + self.ram_in_gb * 4
- elif recurring_period == RecurringPeriod.PER_HOUR:
- return self.cores * 4.0/(30 * 24) + self.ram_in_gb * 4.5/(30* 24)
- else:
- raise Exception('Invalid recurring period for VM Product pricing.')
-
- def __str__(self):
- return "VM {} ({}): {} cores {} gb ram".format(self.uuid,
- self.name,
- self.cores,
- self.ram_in_gb)
@property
def description(self):
return "Virtual machine '{}': {} core(s), {}GB memory".format(
self.name, self.cores, self.ram_in_gb)
- @staticmethod
- def allowed_recurring_periods():
- return list(filter(
- lambda pair: pair[0] in [RecurringPeriod.PER_MONTH, RecurringPeriod.PER_HOUR],
- RecurringPeriod.choices))
+ # @staticmethod
+ # def allowed_recurring_periods():
+ # return list(filter(
+ # lambda pair: pair[0] in [RecurringPeriod.PER_365D,
+ # RecurringPeriod.PER_30D, RecurringPeriod.PER_HOUR],
+ # RecurringPeriod.choices))
+
+ def create_order_at(self, dt):
+ self.created_order_at = dt
+
+ def create_or_update_order(self, when_to_start):
+ self.created_order_at = when_to_start
+
+ def __str__(self):
+ return f"VM id={self.id},name={self.name},cores={self.cores},ram_in_gb={self.ram_in_gb}"
+
class VMWithOSProduct(VMProduct):
- pass
+ primary_disk = models.ForeignKey('VMDiskProduct', on_delete=models.CASCADE, null=True)
-class VMDiskImageProduct(models.Model):
+class VMDiskImageProduct(UncloudModel):
"""
Images are used for cloning/linking.
@@ -107,16 +106,13 @@ class VMDiskImageProduct(models.Model):
"""
- uuid = models.UUIDField(
- primary_key=True, default=uuid.uuid4, editable=False
- )
owner = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, editable=False
)
name = models.CharField(max_length=256)
is_os_image = models.BooleanField(default=False)
- is_public = models.BooleanField(default=False)
+ is_public = models.BooleanField(default=False, editable=False) # only allow admins to set this
size_in_gb = models.FloatField(null=True, blank=True)
import_url = models.URLField(null=True, blank=True)
@@ -128,15 +124,25 @@ class VMDiskImageProduct(models.Model):
default = uncloud_storage.models.StorageClass.SSD)
status = models.CharField(
- max_length=32, choices=STATUS_CHOICES, default=STATUS_DEFAULT
+ max_length=32, choices=UncloudStatus.choices, default=UncloudStatus.PENDING
)
def __str__(self):
- return "VMDiskImage {} ({}): {} gb".format(self.uuid,
+ return "VMDiskImage {} ({}): {} gb".format(self.id,
self.name,
self.size_in_gb)
+# See https://docs.djangoproject.com/en/dev/ref/models/fields/#field-choices-enum-types
+class VMDiskType(models.TextChoices):
+ """
+ Types of disks that can be attached to VMs
+ """
+ CEPH_SSD = 'ceph/ssd'
+ CEPH_HDD = 'ceph/hdd'
+ LOCAL_SSD = 'local/ssd'
+ LOCAL_HDD = 'local/hdd'
+
class VMDiskProduct(models.Model):
"""
@@ -147,30 +153,33 @@ class VMDiskProduct(models.Model):
It can be enlarged, but not shrinked compared to the VMDiskImageProduct.
"""
- uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
- owner = models.ForeignKey(get_user_model(),
- on_delete=models.CASCADE,
- editable=False)
-
vm = models.ForeignKey(VMProduct, on_delete=models.CASCADE)
- image = models.ForeignKey(VMDiskImageProduct, on_delete=models.CASCADE)
+
+ image = models.ForeignKey(VMDiskImageProduct, on_delete=models.CASCADE,
+ blank=True, null=True)
size_in_gb = models.FloatField(blank=True)
- # Sample code for clean method
+ disk_type = models.CharField(
+ max_length=20,
+ choices=VMDiskType.choices,
+ default=VMDiskType.CEPH_SSD)
- # Ensures that a VMDiskProduct can only be created from a VMDiskImageProduct
- # that is in status 'active'
+ def __str__(self):
+ return f"Disk {self.size_in_gb}GB ({self.disk_type}) for {self.vm}"
- # def clean(self):
- # if self.image.status != 'active':
- # raise ValidationError({
- # 'image': 'VM Disk must be created from an active disk image.'
- # })
+ @property
+ def recurring_price(self):
+ if self.disk_type == VMDiskType.CEPH_SSD:
+ price_per_gb = 3.5/10
+ elif self.disk_type == VMDiskType.CEPH_HDD:
+ price_per_gb = 1.5/100
+ elif self.disk_type == VMDiskType.LOCAL_SSD:
+ price_per_gb = 3.5/10
+ elif self.disk_type == VMDiskType.CEPH_HDD:
+ price_per_gb = 1.5/100
- def save(self, *args, **kwargs):
- self.full_clean()
- super().save(*args, **kwargs)
+ return self.size_in_gb * price_per_gb
class VMNetworkCard(models.Model):
@@ -182,7 +191,7 @@ class VMNetworkCard(models.Model):
null=True)
-class VMSnapshotProduct(Product):
+class VMSnapshotProduct(models.Model):
gb_ssd = models.FloatField(editable=False)
gb_hdd = models.FloatField(editable=False)
diff --git a/uncloud_vm/serializers.py b/uncloud_vm/serializers.py
new file mode 100644
index 0000000..a60d10b
--- /dev/null
+++ b/uncloud_vm/serializers.py
@@ -0,0 +1,143 @@
+from django.contrib.auth import get_user_model
+
+from rest_framework import serializers
+
+from .models import *
+from uncloud_pay.models import RecurringPeriod, BillingAddress
+
+# XXX: does not seem to be used?
+
+GB_SSD_PER_DAY=0.012
+GB_HDD_PER_DAY=0.0006
+
+GB_SSD_PER_DAY=0.012
+GB_HDD_PER_DAY=0.0006
+
+###
+# Admin views.
+
+class VMHostSerializer(serializers.HyperlinkedModelSerializer):
+ vms = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
+
+ class Meta:
+ model = VMHost
+ fields = '__all__'
+ read_only_fields = [ 'vms' ]
+
+class VMClusterSerializer(serializers.HyperlinkedModelSerializer):
+ class Meta:
+ model = VMCluster
+ fields = '__all__'
+
+
+###
+# Disks.
+
+class VMDiskProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = VMDiskProduct
+ fields = '__all__'
+
+class CreateVMDiskProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = VMDiskProduct
+ fields = ['size_in_gb', 'image']
+
+class CreateManagedVMDiskProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = VMDiskProduct
+ fields = ['size_in_gb']
+
+class VMDiskImageProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = VMDiskImageProduct
+ fields = '__all__'
+
+class VMSnapshotProductSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = VMSnapshotProduct
+ fields = '__all__'
+
+
+ # verify that vm.owner == user.request
+ def validate_vm(self, value):
+ if not value.owner == self.context['request'].user:
+ raise serializers.ValidationError("VM {} not found for owner {}.".format(value,
+ self.context['request'].user))
+ disks = VMDiskProduct.objects.filter(vm=value)
+
+ if len(disks) == 0:
+ raise serializers.ValidationError("VM {} does not have any disks, cannot snapshot".format(value.id))
+
+ return value
+
+ pricing = {}
+ pricing['per_gb_ssd'] = 0.012
+ pricing['per_gb_hdd'] = 0.0006
+ pricing['recurring_period'] = 'per_day'
+
+###
+# VMs
+
+# Helper used in uncloud_service for services allocating VM.
+class ManagedVMProductSerializer(serializers.ModelSerializer):
+ """
+ Managed VM serializer used in ungleich_service app.
+ """
+ primary_disk = CreateManagedVMDiskProductSerializer()
+ class Meta:
+ model = VMWithOSProduct
+ fields = [ 'cores', 'ram_in_gb', 'primary_disk']
+
+class VMProductSerializer(serializers.ModelSerializer):
+ primary_disk = CreateVMDiskProductSerializer()
+ snapshots = VMSnapshotProductSerializer(many=True, read_only=True)
+ disks = VMDiskProductSerializer(many=True, read_only=True)
+
+ class Meta:
+ model = VMWithOSProduct
+ fields = ['order', 'owner', 'status', 'name', 'cores',
+ 'ram_in_gb', 'primary_disk', 'snapshots', 'disks', 'extra_data']
+ read_only_fields = ['order', 'owner', 'status']
+
+class OrderVMProductSerializer(VMProductSerializer):
+ # recurring_period = serializers.ChoiceField(
+ # choices=VMWithOSProduct.allowed_recurring_periods())
+
+ def __init__(self, *args, **kwargs):
+ super(VMProductSerializer, self).__init__(*args, **kwargs)
+
+ class Meta:
+ model = VMProductSerializer.Meta.model
+ fields = VMProductSerializer.Meta.fields + [ 'recurring_period' ]
+ read_only_fields = VMProductSerializer.Meta.read_only_fields
+
+# Nico's playground.
+class NicoVMProductSerializer(serializers.ModelSerializer):
+ snapshots = VMSnapshotProductSerializer(many=True, read_only=True)
+ order = serializers.StringRelatedField()
+
+ class Meta:
+ model = VMProduct
+ read_only_fields = ['order', 'owner', 'status',
+ 'vmhost', 'vmcluster', 'snapshots',
+ 'extra_data' ]
+ fields = read_only_fields + [ 'name',
+ 'cores',
+ 'ram_in_gb'
+ ]
+
+class DCLVMProductSerializer(serializers.HyperlinkedModelSerializer):
+ """
+ Create an interface similar to standard DCL
+ """
+
+ # Custom field used at creation (= ordering) only.
+ # recurring_period = serializers.ChoiceField(
+ # choices=VMProduct.allowed_recurring_periods())
+
+ os_disk_uuid = serializers.UUIDField()
+ # os_disk_size =
+
+ class Meta:
+ model = VMProduct
diff --git a/uncloud_vm/tests.py b/uncloud_vm/tests.py
new file mode 100644
index 0000000..e5d403f
--- /dev/null
+++ b/uncloud_vm/tests.py
@@ -0,0 +1,98 @@
+import datetime
+
+import parsedatetime
+
+from django.test import TestCase
+from django.contrib.auth import get_user_model
+from django.utils import timezone
+from django.core.exceptions import ValidationError
+
+from uncloud_vm.models import VMDiskImageProduct, VMDiskProduct, VMProduct, VMHost
+from uncloud_pay.models import Order, RecurringPeriod
+
+User = get_user_model()
+cal = parsedatetime.Calendar()
+
+
+# If you want to check the test database using some GUI/cli tool
+# then use the following connecting parameters
+
+# host: localhost
+# database: test_uncloud
+# user: root
+# password:
+# port: 5432
+
+class VMTestCase(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ # Setup vm host
+ cls.vm_host, created = VMHost.objects.get_or_create(
+ hostname='serverx.placey.ungleich.ch', physical_cores=32, usable_cores=320,
+ usable_ram_in_gb=512.0, status='active'
+ )
+ super().setUpClass()
+
+ def setUp(self) -> None:
+ # Setup two users as it is common to test with different user
+ self.user = User.objects.create_user(
+ username='testuser', email='test@test.com', first_name='Test', last_name='User'
+ )
+ self.user2 = User.objects.create_user(
+ username='Meow', email='meow123@test.com', first_name='Meow', last_name='Cat'
+ )
+ super().setUp()
+
+ def create_sample_vm(self, owner):
+ one_month_later, parse_status = cal.parse("1 month later")
+ return VMProduct.objects.create(
+ vmhost=self.vm_host, cores=2, ram_in_gb=4, owner=owner,
+ order=Order.objects.create(
+ owner=owner,
+ creation_date=datetime.datetime.now(tz=timezone.utc),
+ starting_date=datetime.datetime.now(tz=timezone.utc),
+ ending_date=datetime.datetime(*one_month_later[:6], tzinfo=timezone.utc),
+ recurring_period=RecurringPeriod.PER_MONTH
+ )
+ )
+
+# TODO: the logic tested by this test is not implemented yet.
+# def test_disk_product(self):
+# """Ensures that a VMDiskProduct can only be created from a VMDiskImageProduct
+# that is in status 'active'"""
+#
+# vm = self.create_sample_vm(owner=self.user)
+#
+# pending_disk_image = VMDiskImageProduct.objects.create(
+# owner=self.user, name='pending_disk_image', is_os_image=True, is_public=True, size_in_gb=10,
+# status='pending'
+# )
+# try:
+# vm_disk_product = VMDiskProduct.objects.create(
+# owner=self.user, vm=vm, image=pending_disk_image, size_in_gb=10
+# )
+# except ValidationError:
+# vm_disk_product = None
+#
+# self.assertIsNone(
+# vm_disk_product,
+# msg='VMDiskProduct created with disk image whose status is not active.'
+# )
+
+# TODO: the logic tested by this test is not implemented yet.
+# def test_vm_disk_product_creation_for_someone_else(self):
+# """Ensure that a user can only create a VMDiskProduct for his/her own VM"""
+#
+# # Create a VM which is ownership of self.user2
+# someone_else_vm = self.create_sample_vm(owner=self.user2)
+#
+# # 'self.user' would try to create a VMDiskProduct for 'user2's VM
+# with self.assertRaises(ValidationError, msg='User created a VMDiskProduct for someone else VM.'):
+# vm_disk_product = VMDiskProduct.objects.create(
+# owner=self.user, vm=someone_else_vm,
+# size_in_gb=10,
+# image=VMDiskImageProduct.objects.create(
+# owner=self.user, name='disk_image', is_os_image=True, is_public=True, size_in_gb=10,
+# status='active'
+# )
+# )
diff --git a/uncloud/uncloud_vm/views.py b/uncloud_vm/views.py
similarity index 68%
rename from uncloud/uncloud_vm/views.py
rename to uncloud_vm/views.py
index 7b5fa4f..67f8656 100644
--- a/uncloud/uncloud_vm/views.py
+++ b/uncloud_vm/views.py
@@ -1,5 +1,6 @@
from django.db import transaction
from django.shortcuts import render
+from django.utils import timezone
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
@@ -8,23 +9,18 @@ from rest_framework import viewsets, permissions
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
-from .models import VMHost, VMProduct, VMSnapshotProduct, VMDiskProduct, VMDiskImageProduct
-from uncloud_pay.models import Order
+from .models import VMHost, VMProduct, VMSnapshotProduct, VMDiskProduct, VMDiskImageProduct, VMCluster
+from uncloud_pay.models import Order, BillingAddress
-from .serializers import (VMHostSerializer, VMProductSerializer,
- VMSnapshotProductSerializer, VMDiskImageProductSerializer,
- VMDiskProductSerializer, DCLVMProductSerializer)
+from .serializers import *
from uncloud_pay.helpers import ProductViewSet
-
import datetime
-class VMHostViewSet(viewsets.ModelViewSet):
- serializer_class = VMHostSerializer
- queryset = VMHost.objects.all()
- permission_classes = [permissions.IsAdminUser]
+###
+# Generic disk image views. Do not require orders / billing.
-class VMDiskImageProductMineViewSet(viewsets.ModelViewSet):
+class VMDiskImageProductViewSet(ProductViewSet):
permission_classes = [permissions.IsAuthenticated]
serializer_class = VMDiskImageProductSerializer
@@ -32,7 +28,7 @@ class VMDiskImageProductMineViewSet(viewsets.ModelViewSet):
if self.request.user.is_superuser:
obj = VMDiskImageProduct.objects.all()
else:
- obj = VMDiskImageProduct.objects.filter(owner=self.request.user)
+ obj = VMDiskImageProduct.objects.filter(owner=self.request.user) | VMDiskImageProduct.objects.filter(is_public=True)
return obj
@@ -49,7 +45,6 @@ class VMDiskImageProductMineViewSet(viewsets.ModelViewSet):
serializer.save(owner=request.user)
return Response(serializer.data)
-
class VMDiskImageProductPublicViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = [permissions.IsAuthenticated]
serializer_class = VMDiskImageProductSerializer
@@ -57,6 +52,9 @@ class VMDiskImageProductPublicViewSet(viewsets.ReadOnlyModelViewSet):
def get_queryset(self):
return VMDiskImageProduct.objects.filter(is_public=True)
+###
+# User VM disk and snapshots.
+
class VMDiskProductViewSet(viewsets.ModelViewSet):
"""
Let a user modify their own VMDisks
@@ -65,7 +63,12 @@ class VMDiskProductViewSet(viewsets.ModelViewSet):
serializer_class = VMDiskProductSerializer
def get_queryset(self):
- return VMDiskProduct.objects.filter(owner=self.request.user)
+ if self.request.user.is_superuser:
+ obj = VMDiskProduct.objects.all()
+ else:
+ obj = VMDiskProduct.objects.filter(owner=self.request.user)
+
+ return obj
def create(self, request):
serializer = VMDiskProductSerializer(data=request.data, context={'request': request})
@@ -80,57 +83,20 @@ class VMDiskProductViewSet(viewsets.ModelViewSet):
if size_in_gb < serializer.validated_data['image'].size_in_gb:
raise ValidationError(detail={ 'error_mesage': 'Size is smaller than original image' })
-
serializer.save(owner=request.user, size_in_gb=size_in_gb)
return Response(serializer.data)
-
-
-class VMProductViewSet(ProductViewSet):
- permission_classes = [permissions.IsAuthenticated]
- serializer_class = VMProductSerializer
-
- def get_queryset(self):
- if self.request.user.is_superuser:
- obj = VMProduct.objects.all()
- else:
- obj = VMProduct.objects.filter(owner=self.request.user)
-
- return obj
-
- # Use a database transaction so that we do not get half-created structure
- # if something goes wrong.
- @transaction.atomic
- def create(self, request):
- # Extract serializer data.
- serializer = VMProductSerializer(data=request.data, context={'request': request})
- serializer.is_valid(raise_exception=True)
- order_recurring_period = serializer.validated_data.pop("recurring_period")
-
- # Create base order.
- order = Order.objects.create(
- recurring_period=order_recurring_period,
- owner=request.user
- )
- order.save()
-
- # Create VM.
- vm = serializer.save(owner=request.user, order=order)
-
- # Add Product record to order (VM is mutable, allows to keep history in order).
- # XXX: Move this to some kind of on_create hook in parent Product class?
- order.add_record(vm.one_time_price,
- vm.recurring_price(order.recurring_period), vm.description)
-
- return Response(serializer.data)
-
-
class VMSnapshotProductViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.IsAuthenticated]
serializer_class = VMSnapshotProductSerializer
def get_queryset(self):
- return VMSnapshotProduct.objects.filter(owner=self.request.user)
+ if self.request.user.is_superuser:
+ obj = VMSnapshotProduct.objects.all()
+ else:
+ obj = VMSnapshotProduct.objects.filter(owner=self.request.user)
+
+ return obj
def create(self, request):
serializer = VMSnapshotProductSerializer(data=request.data, context={'request': request})
@@ -162,7 +128,99 @@ class VMSnapshotProductViewSet(viewsets.ModelViewSet):
return Response(serializer.data)
+###
+# User VMs.
+class VMProductViewSet(ProductViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+
+ def get_queryset(self):
+ if self.request.user.is_superuser:
+ obj = VMWithOSProduct.objects.all()
+ else:
+ obj = VMWithOSProduct.objects.filter(owner=self.request.user)
+
+ return obj
+
+ def get_serializer_class(self):
+ if self.action == 'create':
+ return OrderVMProductSerializer
+ else:
+ return VMProductSerializer
+
+ # Use a database transaction so that we do not get half-created structure
+ # if something goes wrong.
+ @transaction.atomic
+ def create(self, request):
+ # Extract serializer data.
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ order_recurring_period = serializer.validated_data.pop("recurring_period")
+
+ # Create disk image.
+ disk = VMDiskProduct(owner=request.user,
+ **serializer.validated_data.pop("primary_disk"))
+ vm = VMWithOSProduct(owner=request.user, primary_disk=disk,
+ **serializer.validated_data)
+ disk.vm = vm # XXX: Is this really needed?
+
+ # Create VM and Disk orders.
+ vm_order = Order.from_product(
+ vm,
+ recurring_period=order_recurring_period,
+ starting_date=timezone.now()
+ )
+
+ disk_order = Order.from_product(
+ disk,
+ recurring_period=order_recurring_period,
+ starting_date=timezone.now(),
+ depends_on=vm_order
+ )
+
+
+ # Commit to DB.
+ vm.order = vm_order
+ vm.save()
+ vm_order.save()
+
+ disk.order = disk_order
+ disk_order.save()
+ disk.save()
+
+ return Response(VMProductSerializer(vm, context={'request': request}).data)
+
+class NicoVMProductViewSet(ProductViewSet):
+ permission_classes = [permissions.IsAuthenticated]
+ serializer_class = NicoVMProductSerializer
+
+ def get_queryset(self):
+ obj = VMProduct.objects.filter(owner=self.request.user)
+ return obj
+
+ def create(self, request):
+ serializer = self.serializer_class(data=request.data, context={'request': request})
+ serializer.is_valid(raise_exception=True)
+ vm = serializer.save(owner=request.user)
+
+ return Response(serializer.data)
+
+
+###
+# Admin stuff.
+
+class VMHostViewSet(viewsets.ModelViewSet):
+ serializer_class = VMHostSerializer
+ queryset = VMHost.objects.all()
+ permission_classes = [permissions.IsAdminUser]
+
+class VMClusterViewSet(viewsets.ModelViewSet):
+ serializer_class = VMClusterSerializer
+ queryset = VMCluster.objects.all()
+ permission_classes = [permissions.IsAdminUser]
+
+##
+# Nico's playground.
# Also create:
# - /dcl/available_os
@@ -200,9 +258,4 @@ class DCLCreateVMProductViewSet(ProductViewSet):
# Create VM.
vm = serializer.save(owner=request.user, order=order)
- # Add Product record to order (VM is mutable, allows to keep history in order).
- # XXX: Move this to some kind of on_create hook in parent Product class?
- order.add_record(vm.one_time_price,
- vm.recurring_price(order.recurring_period), vm.description)
-
return Response(serializer.data)