Rename / prepare for merge with uncloud repo

This commit is contained in:
Nico Schottelius 2020-04-02 19:31:03 +02:00
commit 7a6c8739f6
118 changed files with 1499 additions and 0 deletions

View file

@ -0,0 +1,3 @@
from django.contrib import admin
# Register your models here.

View file

@ -0,0 +1,5 @@
from django.apps import AppConfig
class OpennebulaConfig(AppConfig):
name = 'opennebula'

View file

@ -0,0 +1,74 @@
import json
import uncloud.secrets as secrets
from xmlrpc.client import ServerProxy as RPCClient
from django.core.management.base import BaseCommand
from xmltodict import parse
from enum import IntEnum
from opennebula.models import VM as VMModel
from uncloud_vm.models import VMHost
from django_auth_ldap.backend import LDAPBackend
class HostStates(IntEnum):
"""
The following flags are copied from
https://docs.opennebula.org/5.8/integration/system_interfaces/api.html#schemas-for-host
"""
INIT = 0 # Initial state for enabled hosts
MONITORING_MONITORED = 1 # Monitoring the host (from monitored)
MONITORED = 2 # The host has been successfully monitored
ERROR = 3 # An error ocurrer while monitoring the host
DISABLED = 4 # The host is disabled
MONITORING_ERROR = 5 # Monitoring the host (from error)
MONITORING_INIT = 6 # Monitoring the host (from init)
MONITORING_DISABLED = 7 # Monitoring the host (from disabled)
OFFLINE = 8 # The host is totally offline
class Command(BaseCommand):
help = 'Syncronize Host information from OpenNebula'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
with RPCClient(secrets.OPENNEBULA_URL) as rpc_client:
success, response, *_ = rpc_client.one.hostpool.info(secrets.OPENNEBULA_USER_PASS)
if success:
response = json.loads(json.dumps(parse(response)))
host_pool = response.get('HOST_POOL', {}).get('HOST', {})
for host in host_pool:
host_share = host.get('HOST_SHARE', {})
host_name = host.get('NAME')
state = int(host.get('STATE', HostStates.OFFLINE.value))
if state == HostStates.MONITORED:
status = 'active'
elif state == HostStates.DISABLED:
status = 'disabled'
else:
status = 'unusable'
usable_cores = host_share.get('TOTAL_CPU')
usable_ram_in_kb = int(host_share.get('TOTAL_MEM', 0))
usable_ram_in_gb = int(usable_ram_in_kb / 2 ** 20)
# vms cannot be created like this -- Nico, 2020-03-17
# vms = host.get('VMS', {}) or {}
# vms = vms.get('ID', []) or []
# vms = ','.join(vms)
VMHost.objects.update_or_create(
hostname=host_name,
defaults={
'usable_cores': usable_cores,
'usable_ram_in_gb': usable_ram_in_gb,
'status': status
}
)
else:
print(response)

View file

@ -0,0 +1,47 @@
import json
import uncloud.secrets as secrets
from xmlrpc.client import ServerProxy as RPCClient
from django_auth_ldap.backend import LDAPBackend
from django.core.management.base import BaseCommand
from xmltodict import parse
from opennebula.models import VM as VMModel
class Command(BaseCommand):
help = 'Syncronize VM information from OpenNebula'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
with RPCClient(secrets.OPENNEBULA_URL) as rpc_client:
success, response, *_ = rpc_client.one.vmpool.infoextended(
secrets.OPENNEBULA_USER_PASS, -2, -1, -1, -1
)
if success:
vms = json.loads(json.dumps(parse(response)))['VM_POOL']['VM']
unknown_user = set()
backend = LDAPBackend()
for vm in vms:
vm_id = vm['ID']
vm_owner = vm['UNAME']
user = backend.populate_user(username=vm_owner)
if not user:
unknown_user.add(vm_owner)
else:
VMModel.objects.update_or_create(
vmid=vm_id,
defaults={'data': vm, 'owner': user}
)
print('User not found in ldap:', unknown_user)
else:
print(response)

View file

@ -0,0 +1,193 @@
import sys
from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.contrib.auth import get_user_model
from opennebula.models import VM as VMModel
from uncloud_vm.models import VMHost, VMProduct, VMNetworkCard, VMDiskImageProduct, VMDiskProduct
from uncloud_pay.models import Order
import logging
log = logging.getLogger(__name__)
def convert_mac_to_int(mac_address: str):
# Remove octet connecting characters
mac_address = mac_address.replace(':', '')
mac_address = mac_address.replace('.', '')
mac_address = mac_address.replace('-', '')
mac_address = mac_address.replace(' ', '')
# Parse the resulting number as hexadecimal
mac_address = int(mac_address, base=16)
return mac_address
def get_vm_price(core, ram, ssd_size, hdd_size, n_of_ipv4, n_of_ipv6):
total = 3 * core + 4 * ram + (3.5 * ssd_size/10.) + (1.5 * hdd_size/100.) + 8 * n_of_ipv4 + 0 * n_of_ipv6
# TODO: Find some reason about the following magical subtraction.
total -= 8
return total
def create_nics(one_vm, vm_product):
for nic in one_vm.nics:
mac_address = convert_mac_to_int(nic.get('MAC'))
ip_address = nic.get('IP', None) or nic.get('IP6_GLOBAL', None)
VMNetworkCard.objects.update_or_create(
mac_address=mac_address, vm=vm_product, defaults={'ip_address': ip_address}
)
def sync_disk_and_image(one_vm, vm_product, disk_owner):
"""
a) Check all opennebula disk if they are in the uncloud VM, if not add
b) Check all uncloud disks and remove them if they are not in the opennebula VM
"""
vmdisknum = 0
one_disks_extra_data = []
for disk in one_vm.disks:
vmowner = one_vm.owner
name = disk.get('image')
vmdisknum += 1
log.info("Checking disk {} for VM {}".format(name, one_vm))
is_os_image, is_public, status = True, False, 'active'
image_size_in_gb = disk.get('image_size_in_gb')
disk_size_in_gb = disk.get('size_in_gb')
storage_class = disk.get('storage_class')
image_source = disk.get('source')
image_source_type = disk.get('source_type')
image, _ = VMDiskImageProduct.objects.update_or_create(
name=name,
defaults={
'owner': disk_owner,
'is_os_image': is_os_image,
'is_public': is_public,
'size_in_gb': image_size_in_gb,
'storage_class': storage_class,
'image_source': image_source,
'image_source_type': image_source_type,
'status': status
}
)
# identify vmdisk from opennebula - primary mapping key
extra_data = {
'opennebula_vm': one_vm.vmid,
'opennebula_size_in_gb': disk_size_in_gb,
'opennebula_source': disk.get('opennebula_source'),
'opennebula_disk_num': vmdisknum
}
# Save for comparing later
one_disks_extra_data.append(extra_data)
try:
vm_disk = VMDiskProduct.objects.get(extra_data=extra_data)
except VMDiskProduct.DoesNotExist:
vm_disk = VMDiskProduct.objects.create(
owner=vmowner,
vm=vm_product,
image=image,
size_in_gb=disk_size_in_gb,
extra_data=extra_data
)
# Now remove all disks that are not in above extra_data list
for disk in VMDiskProduct.objects.filter(vm=vm_product):
extra_data = disk.extra_data
if not extra_data in one_disks_extra_data:
log.info("Removing disk {} from VM {}".format(disk, vm_product))
disk.delete()
disks = [ disk.extra_data for disk in VMDiskProduct.objects.filter(vm=vm_product) ]
log.info("VM {} has disks: {}".format(vm_product, disks))
class Command(BaseCommand):
help = 'Migrate Opennebula VM to regular (uncloud) vm'
def add_arguments(self, parser):
parser.add_argument('--disk-owner', required=True, help="The user who owns the the opennebula disks")
def handle(self, *args, **options):
log.debug("{} {}".format(args, options))
disk_owner = get_user_model().objects.get(username=options['disk_owner'])
for one_vm in VMModel.objects.all():
if not one_vm.last_host:
log.warning("No VMHost for VM {} - VM might be on hold - skipping".format(one_vm.vmid))
continue
try:
vmhost = VMHost.objects.get(hostname=one_vm.last_host)
except VMHost.DoesNotExist:
log.error("VMHost {} does not exist, aborting".format(one_vm.last_host))
raise
cores = one_vm.cores
ram_in_gb = one_vm.ram_in_gb
owner = one_vm.owner
status = 'active'
ssd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['ssd', 'one'] ])
hdd_size = sum([ disk['size_in_gb'] for disk in one_vm.disks if disk['pool_name'] in ['hdd'] ])
# List of IPv4 addresses and Global IPv6 addresses
ipv4, ipv6 = one_vm.ips
# TODO: Insert actual/real creation_date, starting_date, ending_date
# instead of pseudo one we are putting currently
creation_date = starting_date = datetime.now(tz=timezone.utc)
# Price calculation based on datacenterlight.ch
one_time_price = 0
recurring_period = 'per_month'
recurring_price = get_vm_price(cores, ram_in_gb,
ssd_size, hdd_size,
len(ipv4), len(ipv6))
try:
vm_product = VMProduct.objects.get(extra_data__opennebula_id=one_vm.vmid)
except VMProduct.DoesNotExist:
order = Order.objects.create(
owner=owner,
creation_date=creation_date,
starting_date=starting_date
)
vm_product = VMProduct(
extra_data={ 'opennebula_id': one_vm.vmid },
name=one_vm.uncloud_name,
order=order
)
# we don't use update_or_create, as filtering by json AND setting json
# at the same time does not work
vm_product.vmhost = vmhost
vm_product.owner = owner
vm_product.cores = cores
vm_product.ram_in_gb = ram_in_gb
vm_product.status = status
vm_product.save()
# Create VMNetworkCards
create_nics(one_vm, vm_product)
# Create VMDiskImageProduct and VMDiskProduct
sync_disk_and_image(one_vm, vm_product, disk_owner=disk_owner)

View file

@ -0,0 +1,28 @@
# Generated by Django 3.0.3 on 2020-02-23 17:12
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='VM',
fields=[
('vmid', models.IntegerField(primary_key=True, serialize=False)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]

View file

@ -0,0 +1,27 @@
# Generated by Django 3.0.3 on 2020-02-25 13:35
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('opennebula', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='vm',
name='uuid',
),
migrations.RemoveField(
model_name='vm',
name='vmid',
),
migrations.AddField(
model_name='vm',
name='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, unique=True),
),
]

View file

@ -0,0 +1,19 @@
# Generated by Django 3.0.3 on 2020-02-25 14:28
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('opennebula', '0002_auto_20200225_1335'),
]
operations = [
migrations.AlterField(
model_name='vm',
name='id',
field=models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, serialize=False, unique=True),
),
]

View file

@ -0,0 +1,23 @@
# Generated by Django 3.0.3 on 2020-02-25 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opennebula', '0003_auto_20200225_1428'),
]
operations = [
migrations.RemoveField(
model_name='vm',
name='id',
),
migrations.AddField(
model_name='vm',
name='vmid',
field=models.IntegerField(default=42, primary_key=True, serialize=False),
preserve_default=False,
),
]

View file

@ -0,0 +1,91 @@
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import JSONField
# ungleich specific
storage_class_mapping = {
'one': 'ssd',
'ssd': 'ssd',
'hdd': 'hdd'
}
class VM(models.Model):
vmid = models.IntegerField(primary_key=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
data = JSONField()
@property
def uncloud_name(self):
return "opennebula-{}".format(self.vmid)
@property
def cores(self):
return int(self.data['TEMPLATE']['VCPU'])
@property
def ram_in_gb(self):
return int(self.data['TEMPLATE']['MEMORY'])/1024
@property
def disks(self):
"""
If there is no disk then the key DISK does not exist.
If there is only one disk, we have a dictionary in the database.
If there are multiple disks, we have a list of dictionaries in the database.
"""
disks = []
if 'DISK' in self.data['TEMPLATE']:
if type(self.data['TEMPLATE']['DISK']) is dict:
disks = [self.data['TEMPLATE']['DISK']]
else:
disks = self.data['TEMPLATE']['DISK']
disks = [
{
'size_in_gb': int(d['SIZE'])/1024,
'opennebula_source': d['SOURCE'],
'opennebula_name': d['IMAGE'],
'image_size_in_gb': int(d['ORIGINAL_SIZE'])/1024,
'pool_name': d['POOL_NAME'],
'image': d['IMAGE'],
'source': d['SOURCE'],
'source_type': d['TM_MAD'],
'storage_class': storage_class_mapping[d['POOL_NAME']]
}
for d in disks
]
return disks
@property
def last_host(self):
return ((self.data.get('HISTORY_RECORDS', {}) or {}).get('HISTORY', {}) or {}).get('HOSTNAME', None)
@property
def graphics(self):
return self.data.get('TEMPLATE', {}).get('GRAPHICS', {})
@property
def nics(self):
_nics = self.data.get('TEMPLATE', {}).get('NIC', {})
if isinstance(_nics, dict):
_nics = [_nics]
return _nics
@property
def ips(self):
ipv4, ipv6 = [], []
for nic in self.nics:
ip = nic.get('IP')
ip6 = nic.get('IP6_GLOBAL')
if ip:
ipv4.append(ip)
if ip6:
ipv6.append(ip6)
return ipv4, ipv6

View file

@ -0,0 +1,10 @@
from rest_framework import serializers
from opennebula.models import VM
class OpenNebulaVMSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = VM
fields = [ 'vmid', 'owner', 'data',
'uncloud_name', 'cores', 'ram_in_gb',
'disks', 'nics', 'ips' ]

View file

@ -0,0 +1,3 @@
from django.test import TestCase
# Create your tests here.

View file

@ -0,0 +1,16 @@
from rest_framework import viewsets, permissions
from .models import VM
from .serializers import OpenNebulaVMSerializer
class VMViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.IsAuthenticated]
serializer_class = OpenNebulaVMSerializer
def get_queryset(self):
if self.request.user.is_superuser:
obj = VM.objects.all()
else:
obj = VM.objects.filter(owner=self.request.user)
return obj