Compare commits
2 commits
master
...
uncloud-ap
Author | SHA1 | Date | |
---|---|---|---|
f1bb1ee3ca | |||
e5dd5e45c6 |
500 changed files with 1314 additions and 55743 deletions
15
.gitignore
vendored
15
.gitignore
vendored
|
@ -1,12 +1,6 @@
|
|||
.idea/
|
||||
.vscode/
|
||||
__pycache__/
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
pay.conf
|
||||
log.txt
|
||||
test.py
|
||||
STRIPE
|
||||
venv/
|
||||
|
||||
uncloud/docs/build
|
||||
logs.txt
|
||||
|
@ -22,8 +16,3 @@ uncloud/version.py
|
|||
build/
|
||||
venv/
|
||||
dist/
|
||||
.history/
|
||||
*.iso
|
||||
*.sqlite3
|
||||
.DS_Store
|
||||
static/CACHE/
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
stages:
|
||||
- lint
|
||||
- test
|
||||
|
||||
run-tests:
|
||||
stage: test
|
||||
image: code.ungleich.ch:5050/uncloud/uncloud/uncloud-ci:latest
|
||||
services:
|
||||
- postgres:latest
|
||||
variables:
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_USER: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
coverage: /^TOTAL.+?(\d+\%)$/
|
||||
script:
|
||||
- pip install -r requirements.txt
|
||||
- coverage run --source='.' ./manage.py test
|
||||
- coverage report
|
71
README.md
71
README.md
|
@ -1,70 +1,3 @@
|
|||
# Uncloud
|
||||
# ucloud
|
||||
|
||||
Cloud management platform, the ungleich way.
|
||||
|
||||
|
||||
[![pipeline status](https://code.ungleich.ch/uncloud/uncloud/badges/master/pipeline.svg)](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
||||
[![coverage report](https://code.ungleich.ch/uncloud/uncloud/badges/master/coverage.svg)](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
||||
|
||||
## Useful commands
|
||||
|
||||
* `./manage.py import-vat-rates path/to/csv`
|
||||
* `./manage.py createsuperuser`
|
||||
|
||||
## Development setup
|
||||
|
||||
Install system dependencies:
|
||||
|
||||
* On Fedora, you will need the following packages: `python3-virtualenv python3-devel openldap-devel gcc chromium`
|
||||
* sudo apt-get install libpq-dev python-dev libxml2-dev libxslt1-dev libldap2-dev libsasl2-dev libffi-dev
|
||||
|
||||
|
||||
NOTE: you will need to configure a LDAP server and credentials for authentication. See `uncloud/settings.py`.
|
||||
|
||||
```
|
||||
# Initialize virtualenv.
|
||||
» virtualenv .venv
|
||||
Using base prefix '/usr'
|
||||
New python executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python3
|
||||
Also creating executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python
|
||||
Installing setuptools, pip, wheel...
|
||||
done.
|
||||
|
||||
# Enter virtualenv.
|
||||
» source .venv/bin/activate
|
||||
|
||||
# Install dependencies.
|
||||
» pip install -r requirements.txt
|
||||
[...]
|
||||
|
||||
# Run migrations.
|
||||
» ./manage.py migrate
|
||||
Operations to perform:
|
||||
Apply all migrations: admin, auth, contenttypes, opennebula, sessions, uncloud_auth, uncloud_net, uncloud_pay, uncloud_service, uncloud_vm
|
||||
Running migrations:
|
||||
[...]
|
||||
|
||||
# Run webserver.
|
||||
» ./manage.py runserver
|
||||
Watching for file changes with StatReloader
|
||||
Performing system checks...
|
||||
|
||||
System check identified no issues (0 silenced).
|
||||
May 07, 2020 - 10:17:08
|
||||
Django version 3.0.6, using settings 'uncloud.settings'
|
||||
Starting development server at http://127.0.0.1:8000/
|
||||
Quit the server with CONTROL-C.
|
||||
```
|
||||
### Run Background Job Queue
|
||||
We use Django Q to handle the asynchronous code and Background Cron jobs
|
||||
To start the workers make sure first that Redis or the Django Q broker is working and you can edit it's settings in the settings file.
|
||||
```
|
||||
./manage.py qcluster
|
||||
```
|
||||
|
||||
### Note on PGSQL
|
||||
|
||||
If you want to use Postgres:
|
||||
|
||||
* Install on configure PGSQL on your base system.
|
||||
* OR use a container! `podman run --rm -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust -it postgres:latest`
|
||||
Checkout https://ungleich.ch/ucloud/ for the documentation of ucloud.
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
* Intro
|
||||
This file lists issues that should be handled, are small and likely
|
||||
not yet high prio.
|
||||
* Issues
|
||||
** TODO Register prefered address in User model
|
||||
** TODO Allow to specify different recurring periods
|
|
@ -1,55 +0,0 @@
|
|||
"""
|
||||
investigate into a simple python function that maps an ldap user to a vat percentage. Basically you need to
|
||||
lookup the customer address, check if she is a business/registered tax number and if not apply the local
|
||||
vat
|
||||
"""
|
||||
|
||||
import iso3166
|
||||
import datetime
|
||||
|
||||
from csv import DictReader
|
||||
|
||||
|
||||
def get_vat(street_address, city, postal_code, country, vat_number=None):
|
||||
vat = {
|
||||
'Austria': [
|
||||
{'period': '1984-01-01/', 'rate': 0.2},
|
||||
{'period': '1976-01-01/1984-01-01', 'rate': 0.18},
|
||||
{'period': '1973-01-01/1976-01-01', 'rate': 0.16},
|
||||
]
|
||||
}
|
||||
return iso3166.countries.get(country)
|
||||
|
||||
# return iso3166.countries_by_name[country]
|
||||
|
||||
|
||||
def main():
|
||||
# vat = get_vat(
|
||||
# street_address='82 Nasheman-e-Iqbal near Wapda Town',
|
||||
# city='Lahore',
|
||||
# postal_code=53700,
|
||||
# country='Pakistan',
|
||||
# )
|
||||
# print(vat)
|
||||
vat_rates = {}
|
||||
with open('vat_rates.csv', newline='') as csvfile:
|
||||
reader = DictReader(csvfile)
|
||||
for row in reader:
|
||||
territory_codes = row['territory_codes'].split('\n')
|
||||
for code in territory_codes:
|
||||
if code not in vat_rates:
|
||||
vat_rates[code] = {}
|
||||
|
||||
start_date = row['start_date']
|
||||
stop_data = row['stop_date']
|
||||
time_period = f'{start_date}|{stop_data}'
|
||||
r = row.copy()
|
||||
del r['start_date']
|
||||
del r['stop_date']
|
||||
del r['territory_codes']
|
||||
vat_rates[code][time_period] = r
|
||||
print(vat_rates)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,46 +0,0 @@
|
|||
import importlib
|
||||
import sys
|
||||
import os
|
||||
|
||||
from os.path import join as join_path
|
||||
from xmlrpc.client import ServerProxy as RPCClient
|
||||
|
||||
root = os.path.dirname(os.getcwd())
|
||||
sys.path.append(join_path(root, 'uncloud'))
|
||||
secrets = importlib.import_module('uncloud.secrets')
|
||||
|
||||
|
||||
class OpenNebula:
|
||||
def __init__(self, url, session_string):
|
||||
self.session_string = session_string
|
||||
self.client = RPCClient(secrets.OPENNEBULA_URL)
|
||||
|
||||
def create_user(self, username, password, authentication_driver='', group_id=None):
|
||||
# https://docs.opennebula.org/5.10/integration/system_interfaces/api.html#one-user-allocate
|
||||
|
||||
if group_id is None:
|
||||
group_id = []
|
||||
|
||||
return self.client.one.user.allocate(
|
||||
self.session_string,
|
||||
username,
|
||||
password,
|
||||
authentication_driver,
|
||||
group_id
|
||||
)
|
||||
|
||||
def chmod(self, vm_id, user_id=-1, group_id=-1):
|
||||
# https://docs.opennebula.org/5.10/integration/system_interfaces/api.html#one-vm-chown
|
||||
|
||||
return self.client.one.vm.chown(self.session_string, vm_id, user_id, group_id)
|
||||
|
||||
|
||||
one = OpenNebula(secrets.OPENNEBULA_URL, secrets.OPENNEBULA_USER_PASS)
|
||||
|
||||
# Create User in OpenNebula
|
||||
# success, response, *_ = one.create_user(username='meow12345', password='hello_world')
|
||||
# print(success, response)
|
||||
|
||||
# Change owner of a VM
|
||||
# success, response, *_ = one.chmod(vm_id=25589, user_id=706)
|
||||
# print(success, response)
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
dbhost=$1; shift
|
||||
|
||||
ssh -L5432:localhost:5432 "$dbhost" &
|
||||
|
||||
python manage.py "$@"
|
||||
|
||||
|
||||
|
||||
# command only needs to be active while manage command is running
|
||||
|
||||
# -T no pseudo terminal
|
||||
|
||||
|
||||
# alternatively: commands output shell code
|
||||
|
||||
# ssh uncloud@dbhost "python manage.py --hostname xxx ..."
|
|
@ -1,51 +0,0 @@
|
|||
# uncloud-pay
|
||||
|
||||
The generic product/payment system.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell script
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
```shell script
|
||||
python ucloud_pay.py
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
#### 1. Adding of products
|
||||
```shell script
|
||||
http --json http://[::]:5000/product/add username=your_username_here password=your_password_here specs:=@ipv6-only-vm.json
|
||||
```
|
||||
|
||||
#### 2. Listing of products
|
||||
```shell script
|
||||
http --json http://[::]:5000/product/list
|
||||
```
|
||||
|
||||
#### 3. Registering user's payment method (credit card for now using Stripe)
|
||||
|
||||
```shell script
|
||||
http --json http://[::]:5000/user/register_payment card_number=4111111111111111 cvc=123 expiry_year=2020 expiry_month=8 card_holder_name="The test user" username=your_username_here password=your_password_here line1="your_billing_address" city="your_city" country="your_country"
|
||||
```
|
||||
|
||||
#### 4. Ordering products
|
||||
|
||||
First of all, user have to buy the membership first.
|
||||
|
||||
```shell script
|
||||
http --json http://[::]:5000/product/order username=your_username_here password=your_password_here product_id=membership pay=True
|
||||
```
|
||||
|
||||
```shell script
|
||||
http --json http://[::]:5000/product/order username=your_username_here password=your_password_here product_id=ipv6-only-vm cpu=1 ram=1 os-disk-space=10 os=alpine pay=True
|
||||
```
|
||||
|
||||
#### 5. Listing users orders
|
||||
|
||||
```shell script
|
||||
http --json POST http://[::]:5000/order/list username=your_username_here password=your_password_here
|
||||
```
|
|
@ -1,21 +0,0 @@
|
|||
import os
|
||||
|
||||
from ungleich_common.ldap.ldap_manager import LdapManager
|
||||
from ungleich_common.std.configparser import StrictConfigParser
|
||||
from ungleich_common.etcd.etcd_wrapper import EtcdWrapper
|
||||
|
||||
config_file = os.environ.get('meow-pay-config-file', default='pay.conf')
|
||||
|
||||
config = StrictConfigParser(allow_no_value=True)
|
||||
config.read(config_file)
|
||||
|
||||
etcd_client = EtcdWrapper(
|
||||
host=config.get('etcd', 'host'), port=config.get('etcd', 'port'),
|
||||
ca_cert=config.get('etcd', 'ca_cert'), cert_key=config.get('etcd', 'cert_key'),
|
||||
cert_cert=config.get('etcd', 'cert_cert')
|
||||
)
|
||||
|
||||
ldap_manager = LdapManager(
|
||||
server=config.get('ldap', 'server'), admin_dn=config.get('ldap', 'admin_dn'),
|
||||
admin_password=config.get('ldap', 'admin_password')
|
||||
)
|
|
@ -1,213 +0,0 @@
|
|||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
import etcd3
|
||||
import json
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
from ldaptest import is_valid_ldap_user
|
||||
|
||||
def authenticate(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not getattr(func, 'authenticated', True):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
# pass in username/password !
|
||||
acct = basic_authentication() # custom account lookup function
|
||||
|
||||
if acct:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
flask_restful.abort(401)
|
||||
return wrapper
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError as e:
|
||||
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
|
||||
except etcd3.exceptions.ConnectionTimeoutError as e:
|
||||
raise UncloudException('etcd connection timeout. {}'.format(e))
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class DB(object):
|
||||
def __init__(self, config, prefix="/"):
|
||||
self.config = config
|
||||
|
||||
# Root for everything
|
||||
self.base_prefix= '/nicohack'
|
||||
|
||||
# Can be set from outside
|
||||
self.prefix = prefix
|
||||
|
||||
self.connect()
|
||||
|
||||
@readable_errors
|
||||
def connect(self):
|
||||
self._db_clients = []
|
||||
for endpoint in self.config.etcd_hosts:
|
||||
client = etcd3.client(host=endpoint, **self.config.etcd_args)
|
||||
self._db_clients.append(client)
|
||||
|
||||
def realkey(self, key):
|
||||
return "{}{}/{}".format(self.base_prefix,
|
||||
self.prefix,
|
||||
key)
|
||||
|
||||
@readable_errors
|
||||
def get(self, key, as_json=False, **kwargs):
|
||||
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
|
||||
|
||||
if as_json:
|
||||
value = json.loads(value)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
@readable_errors
|
||||
def set(self, key, value, as_json=False, **kwargs):
|
||||
if as_json:
|
||||
value = json.dumps(value)
|
||||
|
||||
# FIXME: iterate over clients in case of failure ?
|
||||
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
|
||||
|
||||
|
||||
class Membership(Resource):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def get(self):
|
||||
data = request.get_json(silent=True) or {}
|
||||
print("{} {}".format(data, config))
|
||||
return {'message': 'Order successful' }, 200
|
||||
|
||||
def post(self):
|
||||
data = request.get_json(silent=True) or {}
|
||||
print("{} {}".format(data, config))
|
||||
return {'message': 'Order 2x successful' }, 200
|
||||
|
||||
|
||||
class Order(Resource):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
print("{} {}".format(data, config))
|
||||
|
||||
|
||||
class Product(Resource):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
self.products = []
|
||||
self.products.append(
|
||||
{ "name": "membership-free",
|
||||
"description": """
|
||||
This membership gives you access to the API and includes a VPN
|
||||
with 1 IPv6 address.
|
||||
See https://redmine.ungleich.ch/issues/7747?
|
||||
""",
|
||||
"uuid": "a3883466-0012-4d01-80ff-cbf7469957af",
|
||||
"recurring": True,
|
||||
"recurring_time_frame": "per_year",
|
||||
"features": [
|
||||
{ "name": "membership",
|
||||
"price_one_time": 0,
|
||||
"price_recurring": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
self.products.append(
|
||||
{ "name": "membership-standard",
|
||||
"description": """
|
||||
This membership gives you access to the API and includes an IPv6-VPN with
|
||||
one IPv6 address ("Road warrior")
|
||||
See https://redmine.ungleich.ch/issues/7747?
|
||||
""",
|
||||
"uuid": "1d85296b-0863-4dd6-a543-a6d5a4fbe4a6",
|
||||
"recurring": True,
|
||||
"recurring_time_frame": "per_month",
|
||||
"features": [
|
||||
{ "name": "membership",
|
||||
"price_one_time": 0,
|
||||
"price_recurring": 5
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
)
|
||||
self.products.append(
|
||||
{ "name": "membership-premium",
|
||||
"description": """
|
||||
This membership gives you access to the API and includes an
|
||||
IPv6-VPN with a /48 IPv6 network.
|
||||
See https://redmine.ungleich.ch/issues/7747?
|
||||
""",
|
||||
"uuid": "bfd63fd2-d227-436f-a8b8-600de74dd6ce",
|
||||
"recurring": True,
|
||||
"recurring_time_frame": "per_month",
|
||||
"features": [
|
||||
{ "name": "membership",
|
||||
"price_one_time": 0,
|
||||
"price_recurring": 5
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
)
|
||||
self.products.append(
|
||||
{ "name": "ipv6-vpn-with-/48",
|
||||
"description": """
|
||||
An IPv6 VPN with a /48 network included.
|
||||
""",
|
||||
"uuid": "fe5753f8-6fe1-4dc4-9b73-7b803de4c597",
|
||||
"recurring": True,
|
||||
"recurring_time_frame": "per_year",
|
||||
"features": [
|
||||
{ "name": "vpn",
|
||||
"price_one_time": 0,
|
||||
"price_recurring": 120
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
print("{} {}".format(data, config))
|
||||
|
||||
def get(self):
|
||||
data = request.get_json(silent=True) or {}
|
||||
print("{} {}".format(data, config))
|
||||
|
||||
return self.products
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = Flask(__name__)
|
||||
|
||||
config = {}
|
||||
|
||||
config['etcd_url']="https://etcd1.ungleich.ch"
|
||||
config['ldap_url']="ldaps://ldap1.ungleich.ch"
|
||||
|
||||
api = Api(app)
|
||||
api.add_resource(Order, '/orders', resource_class_args=( config, ))
|
||||
api.add_resource(Product, '/products', resource_class_args=( config, ))
|
||||
api.add_resource(Membership, '/membership', resource_class_args=( config, ))
|
||||
|
||||
app.run(host='::', port=5000, debug=True)
|
|
@ -1,87 +0,0 @@
|
|||
import logging
|
||||
|
||||
import parsedatetime
|
||||
|
||||
from datetime import datetime
|
||||
from stripe_utils import StripeUtils
|
||||
|
||||
|
||||
def get_plan_id_from_product(product):
|
||||
plan_id = 'ucloud-v1-'
|
||||
plan_id += product['name'].strip().replace(' ', '-')
|
||||
return plan_id
|
||||
|
||||
|
||||
def get_pricing(price_in_chf_cents, product_type, recurring_period):
|
||||
if product_type == 'recurring':
|
||||
return 'CHF {}/{}'.format(price_in_chf_cents/100, recurring_period)
|
||||
elif product_type == 'one-time':
|
||||
return 'CHF {} (One time charge)'.format(price_in_chf_cents/100)
|
||||
|
||||
|
||||
def get_user_friendly_product(product_dict):
|
||||
uf_product = {
|
||||
'name': product_dict['name'],
|
||||
'description': product_dict['description'],
|
||||
'product_id': product_dict['usable-id'],
|
||||
'pricing': get_pricing(
|
||||
product_dict['price'], product_dict['type'], product_dict['recurring_period']
|
||||
)
|
||||
}
|
||||
if product_dict['type'] == 'recurring':
|
||||
uf_product['minimum_subscription_period'] = product_dict['minimum_subscription_period']
|
||||
return uf_product
|
||||
|
||||
|
||||
def get_token(card_number, cvc, exp_month, exp_year):
|
||||
stripe_utils = StripeUtils()
|
||||
token_response = stripe_utils.get_token_from_card(
|
||||
card_number, cvc, exp_month, exp_year
|
||||
)
|
||||
if token_response['response_object']:
|
||||
return token_response['response_object'].id
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def resolve_product(usable_id, etcd_client):
|
||||
products = etcd_client.get_prefix('/v1/products/', value_in_json=True)
|
||||
for p in products:
|
||||
if p.value['usable-id'] == usable_id:
|
||||
return p.value
|
||||
return None
|
||||
|
||||
|
||||
def calculate_charges(specification, data):
|
||||
logging.debug('Calculating charges for specs:{} and data:{}'.format(specification, data))
|
||||
one_time_charge = 0
|
||||
recurring_charge = 0
|
||||
for feature_name, feature_detail in specification['features'].items():
|
||||
if feature_detail['constant']:
|
||||
data[feature_name] = 1
|
||||
|
||||
if feature_detail['unit']['type'] != 'str':
|
||||
one_time_charge += feature_detail['one_time_fee']
|
||||
recurring_charge += (
|
||||
feature_detail['price_per_unit_per_period'] * data[feature_name] /
|
||||
feature_detail['unit']['value']
|
||||
)
|
||||
return one_time_charge, recurring_charge
|
||||
|
||||
|
||||
def is_order_valid(order_timestamp, renewal_period):
|
||||
"""
|
||||
Sample Code Usage
|
||||
|
||||
>> current_datetime, status = cal.parse('Now')
|
||||
>> current_datetime = datetime(*current_datetime[:6])
|
||||
|
||||
>> print('Is order valid: ', is_order_valid(current_datetime, '1 month'))
|
||||
>> True
|
||||
"""
|
||||
cal = parsedatetime.Calendar()
|
||||
|
||||
renewal_datetime, status = cal.parse(renewal_period)
|
||||
renewal_datetime = datetime(*renewal_datetime[:6])
|
||||
|
||||
return order_timestamp <= renewal_datetime
|
|
@ -1,28 +0,0 @@
|
|||
{
|
||||
"usable-id": "ipv6-only-django-hosting",
|
||||
"active": true,
|
||||
"name": "IPv6 Only Django Hosting",
|
||||
"description": "Host your Django application on our shiny IPv6 Only VM",
|
||||
"recurring_period": "month",
|
||||
"quantity": "inf",
|
||||
"features": {
|
||||
"cpu": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 3,
|
||||
"one_time_fee": 0,
|
||||
"constant": false
|
||||
},
|
||||
"ram": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 4,
|
||||
"one_time_fee": 0,
|
||||
"constant": false
|
||||
},
|
||||
"os-disk-space": {
|
||||
"unit": {"value": 10, "type":"int"},
|
||||
"one_time_fee": 0,
|
||||
"price_per_unit_per_period": 3.5,
|
||||
"constant": false
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
"usable-id": "ipv6-only-vm",
|
||||
"active": true,
|
||||
"name": "IPv6 Only VM",
|
||||
"description": "IPv6 Only VM are accessible to only those having IPv6 for themselves",
|
||||
"recurring_period": "month",
|
||||
"quantity": "inf",
|
||||
"features": {
|
||||
"cpu": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 3,
|
||||
"one_time_fee": 0,
|
||||
"constant": false
|
||||
},
|
||||
"ram": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 4,
|
||||
"one_time_fee": 0,
|
||||
"constant": false
|
||||
},
|
||||
"os-disk-space": {
|
||||
"unit": {"value": 10, "type":"int"},
|
||||
"one_time_fee": 0,
|
||||
"price_per_unit_per_period": 4,
|
||||
"constant": false
|
||||
},
|
||||
"os": {
|
||||
"unit": {"value": 1, "type":"str"},
|
||||
"one_time_fee": 0,
|
||||
"price_per_unit_per_period": 0,
|
||||
"constant": false
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"usable-id": "ipv6-only-vpn",
|
||||
"active": true,
|
||||
"name": "IPv6 Only VPN",
|
||||
"description": "IPv6 VPN enable you to access IPv6 only websites and more",
|
||||
"recurring_period": "month",
|
||||
"quantity": "inf",
|
||||
"features": {
|
||||
"vpn": {
|
||||
"unit": {"value": 1, "type": "int"},
|
||||
"price_per_unit_per_period": 10,
|
||||
"one_time_fee": 0,
|
||||
"constant": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"usable-id": "ipv6-box",
|
||||
"active": true,
|
||||
"name": "IPv6 Box",
|
||||
"description": "A ready-to-go IPv6 Box: it creates a VPN to ungleich and distributes IPv6 addresses to all your computers.",
|
||||
"recurring_period": "eternity",
|
||||
"quantity": 4,
|
||||
"features": {
|
||||
"ipv6-box": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 0,
|
||||
"one_time_fee": 250,
|
||||
"constant": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"usable-id": "membership",
|
||||
"active": true,
|
||||
"name": "Membership",
|
||||
"description": "Membership to use uncloud-pay",
|
||||
"recurring_period": "month",
|
||||
"quantity": "inf",
|
||||
"features": {
|
||||
"membership": {
|
||||
"unit": {"value": 1, "type":"int"},
|
||||
"price_per_unit_per_period": 5,
|
||||
"one_time_fee": 0,
|
||||
"constant": true
|
||||
}
|
||||
},
|
||||
"max_per_user": "1"
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
stripe
|
||||
flask
|
||||
Flask-RESTful
|
||||
git+https://code.ungleich.ch/ahmedbilal/ungleich-common/#egg=ungleich-common-etcd&subdirectory=etcd
|
||||
git+https://code.ungleich.ch/ahmedbilal/ungleich-common/#egg=ungleich-common-ldap&subdirectory=ldap
|
||||
git+https://code.ungleich.ch/ahmedbilal/ungleich-common/#egg=ungleich-common-std&subdirectory=std
|
||||
git+https://code.ungleich.ch/ahmedbilal/ungleich-common/#egg=ungleich-common-schemas&subdirectory=schemas
|
|
@ -1,17 +0,0 @@
|
|||
[etcd]
|
||||
host = 127.0.0.1
|
||||
port = 2379
|
||||
ca_cert
|
||||
cert_cert
|
||||
cert_key
|
||||
|
||||
[stripe]
|
||||
private_key=stripe_private_key
|
||||
|
||||
[app]
|
||||
port = 5000
|
||||
|
||||
[ldap]
|
||||
server = ldap_server_url
|
||||
admin_dn = ldap_admin_dn
|
||||
admin_password = ldap_admin_password
|
|
@ -1,136 +0,0 @@
|
|||
import logging
|
||||
import config
|
||||
import json
|
||||
import math
|
||||
|
||||
from config import ldap_manager, etcd_client
|
||||
from helper import resolve_product
|
||||
from ungleich_common.schemas.schemas import BaseSchema, Field, ValidationException
|
||||
|
||||
|
||||
class AddProductSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.add_schema(UserCredentialSchema, data)
|
||||
self.specs = Field('specs', dict, **self.get(data, 'specs'))
|
||||
self.update = Field('update', bool, **self.get(data, 'update', return_default=True, default=False))
|
||||
|
||||
def validation(self):
|
||||
user = self.objects['user']
|
||||
user = json.loads(user.entry_to_json())
|
||||
uid, ou, *dc = user['dn'].replace('ou=', '').replace('dc=', '').replace('uid=', '').split(',')
|
||||
if ou != config.config.get('ldap', 'internal_user_ou', fallback='users'):
|
||||
raise ValidationException('You do not have access to create product.')
|
||||
|
||||
product = resolve_product(self.specs.value['usable-id'], etcd_client)
|
||||
if product:
|
||||
self.objects['product'] = product
|
||||
|
||||
|
||||
class AddressSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.line1 = Field('line1', str, **self.get(data, 'line1'))
|
||||
self.line2 = Field('line2', str, **self.get(data, 'line2', return_default=True))
|
||||
self.city = Field('city', str, **self.get(data, 'city'))
|
||||
self.country = Field('country', str, **self.get(data, 'country'))
|
||||
self.state = Field('state', str, **self.get(data, 'state', return_default=True))
|
||||
self.postal_code = Field('postal_code', str, **self.get(data, 'postal_code', return_default=True))
|
||||
|
||||
|
||||
class UserRegisterPaymentSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
|
||||
self.add_schema(UserCredentialSchema, data)
|
||||
self.add_schema(AddressSchema, data, under_field_name='address')
|
||||
|
||||
self.card_number = Field('card_number', str, **self.get(data, 'card_number'))
|
||||
self.cvc = Field('cvc', str, **self.get(data, 'cvc'))
|
||||
self.expiry_year = Field('expiry_year', int, **self.get(data, 'expiry_year'))
|
||||
self.expiry_month = Field('expiry_month', int, **self.get(data, 'expiry_month'))
|
||||
self.card_holder_name = Field('card_holder_name', str, **self.get(data, 'card_holder_name'))
|
||||
|
||||
|
||||
class UserCredentialSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.username = Field('username', str, **self.get(data, 'username'))
|
||||
self.password = Field('password', str, **self.get(data, 'password'))
|
||||
|
||||
def validation(self):
|
||||
try:
|
||||
entry = ldap_manager.is_password_valid(self.username.value, self.password.value, query_key='uid')
|
||||
except ValueError:
|
||||
raise ValidationException('No user with \'{}\' username found. You can create account at '
|
||||
'https://account.ungleich.ch'.format(self.username.value))
|
||||
except Exception:
|
||||
raise ValidationException('Invalid username/password.')
|
||||
else:
|
||||
self.objects['user'] = entry
|
||||
|
||||
|
||||
class ProductOrderSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.product_id = Field(
|
||||
'product_id', str, **self.get(data, 'product_id'), validators=[self.product_id_validation]
|
||||
)
|
||||
self.pay_consent = Field('pay', bool, **self.get(data, 'pay', return_default=True, default=False))
|
||||
self.add_schema(UserCredentialSchema, data)
|
||||
|
||||
def product_id_validation(self):
|
||||
product = resolve_product(self.product_id.value, etcd_client)
|
||||
if product:
|
||||
product['quantity'] = float(product['quantity'])
|
||||
self.product_id.value = product['uuid']
|
||||
self.objects['product'] = product
|
||||
logging.debug('Got product {}'.format(product))
|
||||
|
||||
if not product['active']:
|
||||
raise ValidationException('Product is not active at the moment.')
|
||||
|
||||
if product['quantity'] <= 0:
|
||||
raise ValidationException('Out of stock.')
|
||||
else:
|
||||
raise ValidationException('No such product exists.')
|
||||
|
||||
def validation(self):
|
||||
username = self.objects['user'].uid
|
||||
customer_previous_orders = etcd_client.get_prefix('/v1/user/{}'.format(username), value_in_json=True)
|
||||
customer_previous_orders = [o.value for o in customer_previous_orders]
|
||||
membership = next(filter(lambda o: o['product'] == 'membership', customer_previous_orders), None)
|
||||
if membership is None and self.objects['product']['usable-id'] != 'membership':
|
||||
raise ValidationException('Please buy membership first to use this facility')
|
||||
max_quantity_user_can_order = float(self.objects['product'].get('max_per_user', math.inf))
|
||||
previous_order_of_same_product = [
|
||||
o for o in customer_previous_orders if o['product'] == self.objects['product']['usable-id']
|
||||
]
|
||||
if len(previous_order_of_same_product) >= max_quantity_user_can_order:
|
||||
raise ValidationException(
|
||||
'You cannot buy {} more than {} times'.format(
|
||||
self.objects['product']['name'], int(max_quantity_user_can_order)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class OrderListSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.add_schema(UserCredentialSchema, data)
|
||||
|
||||
|
||||
def make_return_message(err, status_code=200):
|
||||
logging.debug('message: {}'.format(str(err)))
|
||||
return {'message': str(err)}, status_code
|
||||
|
||||
|
||||
def create_schema(specification, data):
|
||||
fields = {}
|
||||
for feature_name, feature_detail in specification['features'].items():
|
||||
if not feature_detail['constant']:
|
||||
fields[feature_name] = Field(
|
||||
feature_name, eval(feature_detail['unit']['type']), **BaseSchema.get(data, feature_name)
|
||||
)
|
||||
|
||||
return type('{}Schema'.format(specification['name']), (BaseSchema,), fields)
|
|
@ -1,7 +0,0 @@
|
|||
import stripe_utils
|
||||
import os
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
s = stripe_utils.StripeUtils(os.environ['STRIPE_PRIVATE_KEY'])
|
||||
print(s.get_stripe_customer_from_email('coder.purple+2002@gmail.com'))
|
|
@ -1,491 +0,0 @@
|
|||
import re
|
||||
import stripe
|
||||
import stripe.error
|
||||
import logging
|
||||
|
||||
from config import etcd_client as client, config as config
|
||||
|
||||
stripe.api_key = config.get('stripe', 'private_key')
|
||||
|
||||
|
||||
def handle_stripe_error(f):
|
||||
def handle_problems(*args, **kwargs):
|
||||
response = {
|
||||
'paid': False,
|
||||
'response_object': None,
|
||||
'error': None
|
||||
}
|
||||
|
||||
common_message = "Currently it's not possible to make payments."
|
||||
try:
|
||||
response_object = f(*args, **kwargs)
|
||||
response = {
|
||||
'response_object': response_object,
|
||||
'error': None
|
||||
}
|
||||
return response
|
||||
except stripe.error.CardError as e:
|
||||
# Since it's a decline, stripe.error.CardError will be caught
|
||||
body = e.json_body
|
||||
err = body['error']
|
||||
response.update({'error': err['message']})
|
||||
logging.error(str(e))
|
||||
return response
|
||||
except stripe.error.RateLimitError:
|
||||
response.update(
|
||||
{'error': "Too many requests made to the API too quickly"})
|
||||
return response
|
||||
except stripe.error.InvalidRequestError as e:
|
||||
logging.error(str(e))
|
||||
response.update({'error': "Invalid parameters"})
|
||||
return response
|
||||
except stripe.error.AuthenticationError as e:
|
||||
# Authentication with Stripe's API failed
|
||||
# (maybe you changed API keys recently)
|
||||
logging.error(str(e))
|
||||
response.update({'error': common_message})
|
||||
return response
|
||||
except stripe.error.APIConnectionError as e:
|
||||
logging.error(str(e))
|
||||
response.update({'error': common_message})
|
||||
return response
|
||||
except stripe.error.StripeError as e:
|
||||
# maybe send email
|
||||
logging.error(str(e))
|
||||
response.update({'error': common_message})
|
||||
return response
|
||||
except Exception as e:
|
||||
# maybe send email
|
||||
logging.error(str(e))
|
||||
response.update({'error': common_message})
|
||||
return response
|
||||
|
||||
return handle_problems
|
||||
|
||||
|
||||
class StripeUtils(object):
|
||||
CURRENCY = 'chf'
|
||||
INTERVAL = 'month'
|
||||
SUCCEEDED_STATUS = 'succeeded'
|
||||
STRIPE_PLAN_ALREADY_EXISTS = 'Plan already exists'
|
||||
STRIPE_NO_SUCH_PLAN = 'No such plan'
|
||||
PLAN_EXISTS_ERROR_MSG = 'Plan {} exists already.\nCreating a local StripePlan now.'
|
||||
PLAN_DOES_NOT_EXIST_ERROR_MSG = 'Plan {} does not exist.'
|
||||
|
||||
def __init__(self, private_key):
|
||||
self.stripe = stripe
|
||||
stripe.api_key = private_key
|
||||
|
||||
@handle_stripe_error
|
||||
def card_exists(self, customer, cc_number, exp_month, exp_year, cvc):
|
||||
token_obj = stripe.Token.create(
|
||||
card={
|
||||
'number': cc_number,
|
||||
'exp_month': exp_month,
|
||||
'exp_year': exp_year,
|
||||
'cvc': cvc,
|
||||
},
|
||||
)
|
||||
cards = stripe.Customer.list_sources(
|
||||
customer,
|
||||
limit=20,
|
||||
object='card'
|
||||
)
|
||||
|
||||
for card in cards.data:
|
||||
if (card.fingerprint == token_obj.card.fingerprint and
|
||||
int(card.exp_month) == int(exp_month) and int(card.exp_year) == int(exp_year)):
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_stripe_customer_from_email(email):
|
||||
customer = stripe.Customer.list(limit=1, email=email)
|
||||
return customer.data[0] if len(customer.data) == 1 else None
|
||||
|
||||
@staticmethod
|
||||
def update_customer_token(customer, token):
|
||||
customer.source = token
|
||||
customer.save()
|
||||
|
||||
@handle_stripe_error
|
||||
def get_token_from_card(self, cc_number, cvc, expiry_month, expiry_year):
|
||||
token_obj = stripe.Token.create(
|
||||
card={
|
||||
'number': cc_number,
|
||||
'exp_month': expiry_month,
|
||||
'exp_year': expiry_year,
|
||||
'cvc': cvc,
|
||||
},
|
||||
)
|
||||
return token_obj
|
||||
|
||||
@handle_stripe_error
|
||||
def associate_customer_card(self, stripe_customer_id, token,
|
||||
set_as_default=False):
|
||||
customer = stripe.Customer.retrieve(stripe_customer_id)
|
||||
card = customer.sources.create(source=token)
|
||||
if set_as_default:
|
||||
customer.default_source = card.id
|
||||
customer.save()
|
||||
return True
|
||||
|
||||
@handle_stripe_error
|
||||
def dissociate_customer_card(self, stripe_customer_id, card_id):
|
||||
customer = stripe.Customer.retrieve(stripe_customer_id)
|
||||
card = customer.sources.retrieve(card_id)
|
||||
card.delete()
|
||||
|
||||
@handle_stripe_error
|
||||
def update_customer_card(self, customer_id, token):
|
||||
customer = stripe.Customer.retrieve(customer_id)
|
||||
current_card_token = customer.default_source
|
||||
customer.sources.retrieve(current_card_token).delete()
|
||||
customer.source = token
|
||||
customer.save()
|
||||
credit_card_raw_data = customer.sources.data.pop()
|
||||
new_card_data = {
|
||||
'last4': credit_card_raw_data.last4,
|
||||
'brand': credit_card_raw_data.brand
|
||||
}
|
||||
return new_card_data
|
||||
|
||||
@handle_stripe_error
|
||||
def get_card_details(self, customer_id):
|
||||
customer = stripe.Customer.retrieve(customer_id)
|
||||
credit_card_raw_data = customer.sources.data.pop()
|
||||
card_details = {
|
||||
'last4': credit_card_raw_data.last4,
|
||||
'brand': credit_card_raw_data.brand,
|
||||
'exp_month': credit_card_raw_data.exp_month,
|
||||
'exp_year': credit_card_raw_data.exp_year,
|
||||
'fingerprint': credit_card_raw_data.fingerprint,
|
||||
'card_id': credit_card_raw_data.id
|
||||
}
|
||||
return card_details
|
||||
|
||||
@handle_stripe_error
|
||||
def get_all_invoices(self, customer_id, created_gt):
|
||||
return_list = []
|
||||
has_more_invoices = True
|
||||
starting_after = False
|
||||
while has_more_invoices:
|
||||
if starting_after:
|
||||
invoices = stripe.Invoice.list(
|
||||
limit=10, customer=customer_id, created={'gt': created_gt},
|
||||
starting_after=starting_after
|
||||
)
|
||||
else:
|
||||
invoices = stripe.Invoice.list(
|
||||
limit=10, customer=customer_id, created={'gt': created_gt}
|
||||
)
|
||||
has_more_invoices = invoices.has_more
|
||||
for invoice in invoices.data:
|
||||
sub_ids = []
|
||||
for line in invoice.lines.data:
|
||||
if line.type == 'subscription':
|
||||
sub_ids.append(line.id)
|
||||
elif line.type == 'invoiceitem':
|
||||
sub_ids.append(line.subscription)
|
||||
else:
|
||||
sub_ids.append('')
|
||||
invoice_details = {
|
||||
'created': invoice.created,
|
||||
'receipt_number': invoice.receipt_number,
|
||||
'invoice_number': invoice.number,
|
||||
'paid_at': invoice.status_transitions.paid_at if invoice.paid else 0,
|
||||
'period_start': invoice.period_start,
|
||||
'period_end': invoice.period_end,
|
||||
'billing_reason': invoice.billing_reason,
|
||||
'discount': invoice.discount.coupon.amount_off if invoice.discount else 0,
|
||||
'total': invoice.total,
|
||||
# to see how many line items we have in this invoice and
|
||||
# then later check if we have more than 1
|
||||
'lines_data_count': len(invoice.lines.data) if invoice.lines.data is not None else 0,
|
||||
'invoice_id': invoice.id,
|
||||
'lines_meta_data_csv': ','.join(
|
||||
[line.metadata.VM_ID if hasattr(line.metadata, 'VM_ID') else '' for line in invoice.lines.data]
|
||||
),
|
||||
'subscription_ids_csv': ','.join(sub_ids),
|
||||
'line_items': invoice.lines.data
|
||||
}
|
||||
starting_after = invoice.id
|
||||
return_list.append(invoice_details)
|
||||
return return_list
|
||||
|
||||
@handle_stripe_error
|
||||
def get_cards_details_from_token(self, token):
|
||||
stripe_token = stripe.Token.retrieve(token)
|
||||
card_details = {
|
||||
'last4': stripe_token.card.last4,
|
||||
'brand': stripe_token.card.brand,
|
||||
'exp_month': stripe_token.card.exp_month,
|
||||
'exp_year': stripe_token.card.exp_year,
|
||||
'fingerprint': stripe_token.card.fingerprint,
|
||||
'card_id': stripe_token.card.id
|
||||
}
|
||||
return card_details
|
||||
|
||||
def check_customer(self, stripe_cus_api_id, user, token):
|
||||
try:
|
||||
customer = stripe.Customer.retrieve(stripe_cus_api_id)
|
||||
except stripe.error.InvalidRequestError:
|
||||
customer = self.create_customer(token, user.email, user.name)
|
||||
user.stripecustomer.stripe_id = customer.get(
|
||||
'response_object').get('id')
|
||||
user.stripecustomer.save()
|
||||
if type(customer) is dict:
|
||||
customer = customer['response_object']
|
||||
return customer
|
||||
|
||||
@handle_stripe_error
|
||||
def get_customer(self, stripe_api_cus_id):
|
||||
customer = stripe.Customer.retrieve(stripe_api_cus_id)
|
||||
# data = customer.get('response_object')
|
||||
return customer
|
||||
|
||||
@handle_stripe_error
|
||||
def create_customer(self, token, email, name=None, address=None):
|
||||
if name is None or name.strip() == "":
|
||||
name = email
|
||||
customer = self.stripe.Customer.create(
|
||||
source=token,
|
||||
description=name,
|
||||
email=email,
|
||||
address=address
|
||||
)
|
||||
return customer
|
||||
|
||||
@handle_stripe_error
|
||||
def make_charge(self, amount=None, customer=None):
|
||||
_amount = float(amount)
|
||||
amount = int(_amount * 100) # stripe amount unit, in cents
|
||||
charge = self.stripe.Charge.create(
|
||||
amount=amount, # in cents
|
||||
currency=self.CURRENCY,
|
||||
customer=customer
|
||||
)
|
||||
return charge
|
||||
|
||||
@staticmethod
|
||||
def _get_all_stripe_plans():
|
||||
all_stripe_plans = client.get("/v1/stripe_plans")
|
||||
all_stripe_plans_set = set()
|
||||
if all_stripe_plans:
|
||||
all_stripe_plans_obj = all_stripe_plans.value
|
||||
if all_stripe_plans_obj and len(all_stripe_plans_obj['plans']) > 0:
|
||||
all_stripe_plans_set = set(all_stripe_plans_obj["plans"])
|
||||
return all_stripe_plans_set
|
||||
|
||||
@staticmethod
|
||||
def _save_all_stripe_plans(stripe_plans):
|
||||
client.put("/v1/stripe_plans", {"plans": list(stripe_plans)})
|
||||
|
||||
@handle_stripe_error
|
||||
def get_or_create_stripe_plan(self, product_name, amount, stripe_plan_id,
|
||||
interval=INTERVAL):
|
||||
"""
|
||||
This function checks if a StripePlan with the given
|
||||
stripe_plan_id already exists. If it exists then the function
|
||||
returns this object otherwise it creates a new StripePlan and
|
||||
returns the new object.
|
||||
|
||||
:param amount: The amount in CHF cents
|
||||
:param product_name: The name of the Stripe plan (product) to be created.
|
||||
:param stripe_plan_id: The id of the Stripe plan to be
|
||||
created. Use get_stripe_plan_id_string function to
|
||||
obtain the name of the plan to be created
|
||||
:param interval: The interval for subscription {month, year}. Defaults
|
||||
to month if not provided
|
||||
:return: The StripePlan object if it exists else creates a
|
||||
Plan object in Stripe and a local StripePlan and
|
||||
returns it. Returns None in case of Stripe error
|
||||
"""
|
||||
_amount = float(amount)
|
||||
amount = int(_amount * 100) # stripe amount unit, in cents
|
||||
all_stripe_plans = self._get_all_stripe_plans()
|
||||
if stripe_plan_id in all_stripe_plans:
|
||||
logging.debug("{} plan exists in db.".format(stripe_plan_id))
|
||||
else:
|
||||
logging.debug(("{} plan DOES NOT exist in db. "
|
||||
"Creating").format(stripe_plan_id))
|
||||
try:
|
||||
plan_obj = self.stripe.Plan.retrieve(id=stripe_plan_id)
|
||||
logging.debug("{} plan exists in Stripe".format(stripe_plan_id))
|
||||
all_stripe_plans.add(stripe_plan_id)
|
||||
except stripe.error.InvalidRequestError as e:
|
||||
if "No such plan" in str(e):
|
||||
logging.debug("Plan {} does not exist in Stripe, Creating")
|
||||
plan_obj = self.stripe.Plan.create(
|
||||
amount=amount,
|
||||
product={'name': product_name},
|
||||
interval=interval,
|
||||
currency=self.CURRENCY,
|
||||
id=stripe_plan_id)
|
||||
logging.debug(self.PLAN_EXISTS_ERROR_MSG.format(stripe_plan_id))
|
||||
all_stripe_plans.add(stripe_plan_id)
|
||||
self._save_all_stripe_plans(all_stripe_plans)
|
||||
return stripe_plan_id
|
||||
|
||||
@handle_stripe_error
|
||||
def delete_stripe_plan(self, stripe_plan_id):
|
||||
"""
|
||||
Deletes the Plan in Stripe and also deletes the local db copy
|
||||
of the plan if it exists
|
||||
|
||||
:param stripe_plan_id: The stripe plan id that needs to be
|
||||
deleted
|
||||
:return: True if the plan was deleted successfully from
|
||||
Stripe, False otherwise.
|
||||
"""
|
||||
return_value = False
|
||||
try:
|
||||
plan = self.stripe.Plan.retrieve(stripe_plan_id)
|
||||
plan.delete()
|
||||
return_value = True
|
||||
all_stripe_plans = self._get_all_stripe_plans()
|
||||
all_stripe_plans.remove(stripe_plan_id)
|
||||
self._save_all_stripe_plans(all_stripe_plans)
|
||||
except stripe.error.InvalidRequestError as e:
|
||||
if self.STRIPE_NO_SUCH_PLAN in str(e):
|
||||
logging.debug(
|
||||
self.PLAN_DOES_NOT_EXIST_ERROR_MSG.format(stripe_plan_id))
|
||||
return return_value
|
||||
|
||||
@handle_stripe_error
|
||||
def subscribe_customer_to_plan(self, customer, plans, trial_end=None):
|
||||
"""
|
||||
Subscribes the given customer to the list of given plans
|
||||
|
||||
:param customer: The stripe customer identifier
|
||||
:param plans: A list of stripe plans.
|
||||
:param trial_end: An integer representing when the Stripe subscription
|
||||
is supposed to end
|
||||
Ref: https://stripe.com/docs/api/python#create_subscription-items
|
||||
e.g.
|
||||
plans = [
|
||||
{
|
||||
"plan": "dcl-v1-cpu-2-ram-5gb-ssd-10gb",
|
||||
},
|
||||
]
|
||||
:return: The subscription StripeObject
|
||||
"""
|
||||
|
||||
subscription_result = self.stripe.Subscription.create(
|
||||
customer=customer, items=plans, trial_end=trial_end
|
||||
)
|
||||
return subscription_result
|
||||
|
||||
@handle_stripe_error
|
||||
def set_subscription_metadata(self, subscription_id, metadata):
|
||||
subscription = stripe.Subscription.retrieve(subscription_id)
|
||||
subscription.metadata = metadata
|
||||
subscription.save()
|
||||
|
||||
@handle_stripe_error
|
||||
def unsubscribe_customer(self, subscription_id):
|
||||
"""
|
||||
Cancels a given subscription
|
||||
|
||||
:param subscription_id: The Stripe subscription id string
|
||||
:return:
|
||||
"""
|
||||
sub = stripe.Subscription.retrieve(subscription_id)
|
||||
return sub.delete()
|
||||
|
||||
@handle_stripe_error
|
||||
def make_payment(self, customer, amount, token):
|
||||
charge = self.stripe.Charge.create(
|
||||
amount=amount, # in cents
|
||||
currency=self.CURRENCY,
|
||||
customer=customer
|
||||
)
|
||||
return charge
|
||||
|
||||
@staticmethod
|
||||
def get_stripe_plan_id(cpu, ram, ssd, version, app='dcl', hdd=None,
|
||||
price=None):
|
||||
"""
|
||||
Returns the Stripe plan id string of the form
|
||||
`dcl-v1-cpu-2-ram-5gb-ssd-10gb` based on the input parameters
|
||||
|
||||
:param cpu: The number of cores
|
||||
:param ram: The size of the RAM in GB
|
||||
:param ssd: The size of ssd storage in GB
|
||||
:param hdd: The size of hdd storage in GB
|
||||
:param version: The version of the Stripe plans
|
||||
:param app: The application to which the stripe plan belongs
|
||||
to. By default it is 'dcl'
|
||||
:param price: The price for this plan
|
||||
:return: A string of the form `dcl-v1-cpu-2-ram-5gb-ssd-10gb`
|
||||
"""
|
||||
dcl_plan_string = 'cpu-{cpu}-ram-{ram}gb-ssd-{ssd}gb'.format(cpu=cpu,
|
||||
ram=ram,
|
||||
ssd=ssd)
|
||||
if hdd is not None:
|
||||
dcl_plan_string = '{dcl_plan_string}-hdd-{hdd}gb'.format(
|
||||
dcl_plan_string=dcl_plan_string, hdd=hdd)
|
||||
stripe_plan_id_string = '{app}-v{version}-{plan}'.format(
|
||||
app=app,
|
||||
version=version,
|
||||
plan=dcl_plan_string
|
||||
)
|
||||
if price is not None:
|
||||
stripe_plan_id_string_with_price = '{}-{}chf'.format(
|
||||
stripe_plan_id_string,
|
||||
round(price, 2)
|
||||
)
|
||||
return stripe_plan_id_string_with_price
|
||||
else:
|
||||
return stripe_plan_id_string
|
||||
|
||||
@staticmethod
|
||||
def get_vm_config_from_stripe_id(stripe_id):
|
||||
"""
|
||||
Given a string like "dcl-v1-cpu-2-ram-5gb-ssd-10gb" return different
|
||||
configuration params as a dict
|
||||
|
||||
:param stripe_id|str
|
||||
:return: dict
|
||||
"""
|
||||
pattern = re.compile(r'^dcl-v(\d+)-cpu-(\d+)-ram-(\d+\.?\d*)gb-ssd-(\d+)gb-?(\d*\.?\d*)(chf)?$')
|
||||
match_res = pattern.match(stripe_id)
|
||||
if match_res is not None:
|
||||
price = None
|
||||
try:
|
||||
price = match_res.group(5)
|
||||
except IndexError:
|
||||
logging.debug("Did not find price in {}".format(stripe_id))
|
||||
return {
|
||||
'version': match_res.group(1),
|
||||
'cores': match_res.group(2),
|
||||
'ram': match_res.group(3),
|
||||
'ssd': match_res.group(4),
|
||||
'price': price
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_stripe_plan_name(cpu, memory, disk_size, price):
|
||||
"""
|
||||
Returns the Stripe plan name
|
||||
:return:
|
||||
"""
|
||||
return "{cpu} Cores, {memory} GB RAM, {disk_size} GB SSD, " \
|
||||
"{price} CHF".format(
|
||||
cpu=cpu,
|
||||
memory=memory,
|
||||
disk_size=disk_size,
|
||||
price=round(price, 2)
|
||||
)
|
||||
|
||||
@handle_stripe_error
|
||||
def set_subscription_meta_data(self, subscription_id, meta_data):
|
||||
"""
|
||||
Adds VM metadata to a subscription
|
||||
:param subscription_id: Stripe identifier for the subscription
|
||||
:param meta_data: A dict of meta data to be added
|
||||
:return:
|
||||
"""
|
||||
subscription = stripe.Subscription.retrieve(subscription_id)
|
||||
subscription.metadata = meta_data
|
||||
subscription.save()
|
|
@ -1,338 +0,0 @@
|
|||
import logging
|
||||
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
from werkzeug.exceptions import HTTPException
|
||||
from config import etcd_client as client, config as config
|
||||
from stripe_utils import StripeUtils
|
||||
from schemas import (
|
||||
make_return_message, ValidationException, UserRegisterPaymentSchema,
|
||||
AddProductSchema, ProductOrderSchema, OrderListSchema, create_schema
|
||||
)
|
||||
from helper import get_plan_id_from_product, calculate_charges
|
||||
|
||||
|
||||
class ListProducts(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
products = client.get_prefix('/v1/products/')
|
||||
products = [
|
||||
product
|
||||
for product in [p.value for p in products]
|
||||
if product['active']
|
||||
]
|
||||
prod_dict = {}
|
||||
for p in products:
|
||||
prod_dict[p['usable-id']] = {
|
||||
'name': p['name'],
|
||||
'description': p['description'],
|
||||
}
|
||||
logger.debug('Products = {}'.format(prod_dict))
|
||||
return prod_dict, 200
|
||||
|
||||
class AddProduct(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
|
||||
try:
|
||||
logger.debug('Got data: {}'.format(str(data)))
|
||||
validator = AddProductSchema(data)
|
||||
validator.is_valid()
|
||||
except ValidationException as err:
|
||||
return make_return_message(err, 400)
|
||||
else:
|
||||
cleaned_values = validator.get_cleaned_values()
|
||||
previous_product = cleaned_values.get('product', None)
|
||||
if previous_product:
|
||||
if not cleaned_values['update']:
|
||||
return make_return_message('Product already exists. Pass --update to update the product.')
|
||||
else:
|
||||
product_uuid = previous_product.pop('uuid')
|
||||
else:
|
||||
product_uuid = uuid4().hex
|
||||
|
||||
product_value = cleaned_values['specs']
|
||||
|
||||
product_key = '/v1/products/{}'.format(product_uuid)
|
||||
product_value['uuid'] = product_uuid
|
||||
|
||||
logger.debug('Adding product data: {}'.format(str(product_value)))
|
||||
client.put(product_key, product_value)
|
||||
if not previous_product:
|
||||
return make_return_message('Product created.')
|
||||
else:
|
||||
return make_return_message('Product updated.')
|
||||
|
||||
################################################################################
|
||||
# Nico-ok-marker
|
||||
|
||||
|
||||
class UserRegisterPayment(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
|
||||
try:
|
||||
logger.debug('Got data: {}'.format(str(data)))
|
||||
validator = UserRegisterPaymentSchema(data)
|
||||
validator.is_valid()
|
||||
except ValidationException as err:
|
||||
return make_return_message(err, 400)
|
||||
else:
|
||||
cleaned_values = validator.get_cleaned_values()
|
||||
last4 = data['card_number'].strip()[-4:]
|
||||
|
||||
stripe_utils = StripeUtils()
|
||||
|
||||
# Does customer already exist ?
|
||||
stripe_customer = stripe_utils.get_stripe_customer_from_email(cleaned_values['user']['mail'])
|
||||
|
||||
# Does customer already exist ?
|
||||
if stripe_customer is not None:
|
||||
logger.debug('Customer {}-{} exists already'.format(
|
||||
cleaned_values['username'], cleaned_values['user']['mail'])
|
||||
)
|
||||
|
||||
# Check if the card already exists
|
||||
ce_response = stripe_utils.card_exists(
|
||||
stripe_customer.id, cc_number=data['card_number'],
|
||||
exp_month=int(data['expiry_month']),
|
||||
exp_year=int(data['expiry_year']),
|
||||
cvc=data['cvc'])
|
||||
|
||||
if ce_response['response_object']:
|
||||
message = 'The given card ending in {} exists already.'.format(last4)
|
||||
return make_return_message(message, 400)
|
||||
|
||||
elif ce_response['response_object'] is False:
|
||||
# Associate card with user
|
||||
logger.debug('Adding card ending in {}'.format(last4))
|
||||
token_response = stripe_utils.get_token_from_card(
|
||||
data['card_number'], data['cvc'], data['expiry_month'],
|
||||
data['expiry_year']
|
||||
)
|
||||
if token_response['response_object']:
|
||||
logger.debug('Token {}'.format(token_response['response_object'].id))
|
||||
resp = stripe_utils.associate_customer_card(
|
||||
stripe_customer.id, token_response['response_object'].id
|
||||
)
|
||||
if resp['response_object']:
|
||||
return make_return_message(
|
||||
'Card ending in {} registered as your payment source'.format(last4)
|
||||
)
|
||||
else:
|
||||
return make_return_message('Error with payment gateway. Contact support', 400)
|
||||
else:
|
||||
return make_return_message('Error: {}'.format(ce_response['error']), 400)
|
||||
else:
|
||||
# Stripe customer does not exist, create a new one
|
||||
logger.debug(
|
||||
'Customer {} does not exist, creating new'.format(cleaned_values['user']['mail'])
|
||||
)
|
||||
token_response = stripe_utils.get_token_from_card(
|
||||
cleaned_values['card_number'], cleaned_values['cvc'],
|
||||
cleaned_values['expiry_month'], cleaned_values['expiry_year']
|
||||
)
|
||||
if token_response['response_object']:
|
||||
logger.debug('Token {}'.format(token_response['response_object'].id))
|
||||
|
||||
# Create stripe customer
|
||||
stripe_customer_resp = stripe_utils.create_customer(
|
||||
name=cleaned_values['card_holder_name'],
|
||||
token=token_response['response_object'].id,
|
||||
email=cleaned_values['user']['mail'],
|
||||
address=cleaned_values['address']
|
||||
)
|
||||
stripe_customer = stripe_customer_resp['response_object']
|
||||
|
||||
if stripe_customer:
|
||||
logger.debug('Created stripe customer {}'.format(stripe_customer.id))
|
||||
return make_return_message(
|
||||
'Card ending in {} registered as your payment source'.format(last4)
|
||||
)
|
||||
else:
|
||||
return make_return_message('Error with card. Contact support', 400)
|
||||
else:
|
||||
return make_return_message('Error with payment gateway. Contact support', 400)
|
||||
|
||||
|
||||
class ProductOrder(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
|
||||
try:
|
||||
validator = ProductOrderSchema(data)
|
||||
validator.is_valid()
|
||||
except ValidationException as err:
|
||||
return make_return_message(err, 400)
|
||||
else:
|
||||
cleaned_values = validator.get_cleaned_values()
|
||||
stripe_utils = StripeUtils()
|
||||
|
||||
product = cleaned_values['product']
|
||||
|
||||
# Check the user has a payment source added
|
||||
stripe_customer = stripe_utils.get_stripe_customer_from_email(cleaned_values['user']['mail'])
|
||||
|
||||
if not stripe_customer or len(stripe_customer.sources) == 0:
|
||||
return make_return_message('Please register your payment method first.', 400)
|
||||
|
||||
try:
|
||||
product_schema = create_schema(product, data)
|
||||
product_schema = product_schema()
|
||||
product_schema.is_valid()
|
||||
except ValidationException as err:
|
||||
return make_return_message(err, 400)
|
||||
else:
|
||||
transformed_data = product_schema.get_cleaned_values()
|
||||
logger.debug('Tranformed data: {}'.format(transformed_data))
|
||||
one_time_charge, recurring_charge = calculate_charges(product, transformed_data)
|
||||
recurring_charge = int(recurring_charge)
|
||||
|
||||
if not cleaned_values['pay']:
|
||||
return make_return_message(
|
||||
'You would be charged {} CHF one time and {} CHF every {}. '
|
||||
'Add --pay to command to order.'.format(
|
||||
one_time_charge, recurring_charge, product['recurring_period']
|
||||
)
|
||||
)
|
||||
|
||||
with client.client.lock('product-order') as _:
|
||||
# Initiate a one-time/subscription based on product type
|
||||
if recurring_charge > 0:
|
||||
logger.debug('Product {} is recurring payment'.format(product['name']))
|
||||
plan_id = get_plan_id_from_product(product)
|
||||
res = stripe_utils.get_or_create_stripe_plan(
|
||||
product_name=product['name'],
|
||||
stripe_plan_id=plan_id, amount=recurring_charge,
|
||||
interval=product['recurring_period'],
|
||||
)
|
||||
if res['response_object']:
|
||||
logger.debug('Obtained plan {}'.format(plan_id))
|
||||
subscription_res = stripe_utils.subscribe_customer_to_plan(
|
||||
stripe_customer.id,
|
||||
[{'plan': plan_id}]
|
||||
)
|
||||
subscription_obj = subscription_res['response_object']
|
||||
if subscription_obj is None or subscription_obj.status != 'active':
|
||||
return make_return_message(
|
||||
'Error subscribing to plan. Detail: {}'.format(subscription_res['error']), 400
|
||||
)
|
||||
else:
|
||||
order_obj = {
|
||||
'order-id': uuid4().hex,
|
||||
'ordered-at': datetime.now().isoformat(),
|
||||
'product': product['usable-id'],
|
||||
'one-time-price': one_time_charge,
|
||||
'recurring-price': recurring_charge,
|
||||
'recurring-period': product['recurring_period']
|
||||
}
|
||||
client.put(
|
||||
'/v1/user/{}/orders/{}'.format(
|
||||
cleaned_values['username'], order_obj['order-id']
|
||||
), order_obj
|
||||
)
|
||||
product['quantity'] -= 1
|
||||
client.put('/v1/products/{}'.format(product['uuid']), product)
|
||||
|
||||
return {
|
||||
'message': 'Order Successful.',
|
||||
**order_obj
|
||||
}
|
||||
else:
|
||||
logger.error('Could not create plan {}'.format(plan_id))
|
||||
return make_return_message('Something wrong happened. Contact administrator', 400)
|
||||
|
||||
elif recurring_charge == 0 and one_time_charge > 0:
|
||||
logger.debug('Product {} is one-time payment'.format(product['name']))
|
||||
charge_response = stripe_utils.make_charge(
|
||||
amount=one_time_charge,
|
||||
customer=stripe_customer.id
|
||||
)
|
||||
stripe_onetime_charge = charge_response.get('response_object')
|
||||
|
||||
# Check if the payment was approved
|
||||
if not stripe_onetime_charge:
|
||||
msg = charge_response.get('error')
|
||||
return make_return_message('Error subscribing to plan. Details: {}'.format(msg), 400)
|
||||
|
||||
order_obj = {
|
||||
'order-id': uuid4().hex,
|
||||
'ordered-at': datetime.now().isoformat(),
|
||||
'product': product['usable-id'],
|
||||
'one-time-price': one_time_charge,
|
||||
}
|
||||
client.put(
|
||||
'/v1/user/{}/orders/{}'.format(cleaned_values['username'], order_obj['order-id']),
|
||||
order_obj
|
||||
)
|
||||
product['quantity'] -= 1
|
||||
client.put('/v1/products/{}'.format(product['uuid']), product)
|
||||
|
||||
return {'message': 'Order successful', **order_obj}, 200
|
||||
|
||||
|
||||
class OrderList(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.get_json(silent=True) or {}
|
||||
|
||||
try:
|
||||
validator = OrderListSchema(data)
|
||||
validator.is_valid()
|
||||
except ValidationException as err:
|
||||
return make_return_message(err, 400)
|
||||
else:
|
||||
cleaned_values = validator.get_cleaned_values()
|
||||
orders = client.get_prefix('/v1/user/{}/orders'.format(cleaned_values['username']))
|
||||
orders_dict = {
|
||||
order.value['order-id']: {
|
||||
**order.value
|
||||
}
|
||||
for order in orders
|
||||
}
|
||||
logger.debug('Orders = {}'.format(orders_dict))
|
||||
return {'orders': orders_dict}, 200
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
log_formater = logging.Formatter('[%(filename)s:%(lineno)d] %(message)s')
|
||||
|
||||
stream_logger = logging.StreamHandler()
|
||||
stream_logger.setFormatter(log_formater)
|
||||
|
||||
# file_logger = logging.FileHandler('log.txt')
|
||||
# file_logger.setLevel(logging.DEBUG)
|
||||
# file_logger.setFormatter(log_formater)
|
||||
|
||||
logger.addHandler(stream_logger)
|
||||
# logger.addHandler(file_logger)
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
api = Api(app)
|
||||
api.add_resource(ListProducts, '/product/list')
|
||||
api.add_resource(AddProduct, '/product/add')
|
||||
api.add_resource(ProductOrder, '/product/order')
|
||||
api.add_resource(UserRegisterPayment, '/user/register_payment')
|
||||
api.add_resource(OrderList, '/order/list')
|
||||
|
||||
app.run(host='::', port=config.get('app', 'port', fallback=5000), debug=True)
|
||||
|
||||
|
||||
@app.errorhandler(Exception)
|
||||
def handle_exception(e):
|
||||
app.logger.error(e)
|
||||
# pass through HTTP errors
|
||||
if isinstance(e, HTTPException):
|
||||
return e
|
||||
|
||||
# now you're handling non-HTTP exceptions only
|
||||
return {'message': 'Server Error'}, 500
|
|
@ -1,11 +0,0 @@
|
|||
## TODO 2020-02-22
|
||||
|
||||
* ~~move the current rest api to /opennebula~~
|
||||
* ~~make the /opennebula api only accessible by an admin account~~
|
||||
* ~~create a new filtered api on /vm/list that~~
|
||||
* ~~a) requires authentication~~
|
||||
* ~~b) only shows the VMs of the current user~~
|
||||
* ~~the new api should not contain all details, but: cpus (as read by the vcpu field), ram, ips, disks~~
|
||||
* ~~also make a (random) uuid the primary key for VMs - everything in this uncloud hack will use uuids as the id~~
|
||||
* ~~still expose the opennebula id as opennebula_id~~
|
||||
* ~~note put all secrets/configs into uncloud.secrets - I added a sample file into the repo~~
|
|
@ -1,102 +0,0 @@
|
|||
* snapshot feature
|
||||
** product: vm-snapshot
|
||||
** flow
|
||||
*** list all my VMs
|
||||
**** get the uuid of the VM I want to take a snapshot of
|
||||
*** request a snapshot
|
||||
```
|
||||
vmuuid=$(http nicocustomer
|
||||
http -a nicocustomer:xxx http://uncloud.ch/vm/create_snapshot uuid=
|
||||
password=...
|
||||
```
|
||||
** backend realisation
|
||||
*** list snapshots
|
||||
- have them in the DB
|
||||
- create an entry on create
|
||||
*** creating snapshots
|
||||
- vm sync / fsync?
|
||||
- rbd snapshot
|
||||
- host/cluster mapping?
|
||||
- need image(s)
|
||||
|
||||
* steps
|
||||
** DONE authenticate via ldap
|
||||
CLOSED: [2020-02-20 Thu 19:05]
|
||||
** DONE Make classes / views require authentication
|
||||
CLOSED: [2020-02-20 Thu 19:05]
|
||||
** TODO register credit card
|
||||
*** TODO find out what saving with us
|
||||
*** Info
|
||||
**** should not be fully saved in the DB
|
||||
**** model needs to be a bit different
|
||||
* Decide where to save sensitive data
|
||||
** stripe access key, etc.
|
||||
* python requirements (nicohack202002)
|
||||
django djangorestframework django-auth-ldap stripe
|
||||
* os package requirements (alpine)
|
||||
openldap-dev
|
||||
* VPN case
|
||||
** put on /orders with uuid
|
||||
** register cc
|
||||
* CC
|
||||
** TODO check whether we can register or not at stripe
|
||||
* membership
|
||||
** required for "smaller" / "shorter" products
|
||||
|
||||
* TODO Membership missing
|
||||
* Flows to be implemented - see https://redmine.ungleich.ch/issues/7609
|
||||
** Membership
|
||||
*** 5 CHF
|
||||
** Django Hosting
|
||||
*** One time payment 35 CHF
|
||||
*** Monthly payment depends on VM size
|
||||
*** Parameters: same as IPv6 only VM
|
||||
** IPv6 VPN
|
||||
*** Parameters: none
|
||||
*** Is for free if the customer has an active VM
|
||||
** IPv6 only VM
|
||||
*** Parameters: cores, ram, os_disk_size, OS
|
||||
* Django rest framework
|
||||
** viewset: .list and .create
|
||||
** view: .get .post
|
||||
* TODO register CC
|
||||
* DONE list products
|
||||
CLOSED: [2020-02-24 Mon 20:15]
|
||||
* An ungleich account - can be registered for free on
|
||||
https://account.ungleich.ch
|
||||
* httpie installed (provides the http command)
|
||||
|
||||
## Get a membership
|
||||
|
||||
|
||||
## Registering a payment method
|
||||
|
||||
To be able to pay for the membership, you will need to register a
|
||||
credit card or apply for payment on bill (TO BE IMPLEMENTED).
|
||||
|
||||
### Register credit card
|
||||
|
||||
```
|
||||
http POST https://api.ungleich.ch/membership \
|
||||
username=nico password=yourpassword \
|
||||
cc_number=.. \
|
||||
cc_
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Request payment via bill
|
||||
|
||||
|
||||
|
||||
|
||||
## Create the membership
|
||||
|
||||
|
||||
```
|
||||
http POST https://api.ungleich.ch/membership username=nico password=yourpassword
|
||||
|
||||
```
|
||||
|
||||
## List available products
|
|
@ -1,6 +0,0 @@
|
|||
* TODO register CC
|
||||
* TODO list products
|
||||
* ahmed
|
||||
** schemas
|
||||
*** field: is_valid? - used by schemas
|
||||
*** definition of a "schema"
|
|
@ -1,4 +0,0 @@
|
|||
db.sqlite3
|
||||
uncloud/secrets.py
|
||||
debug.log
|
||||
uncloud/local_settings.py
|
|
@ -1,36 +0,0 @@
|
|||
Hacking
|
||||
=======
|
||||
Using uncloud in hacking (aka development) mode.
|
||||
|
||||
|
||||
Get the code
|
||||
------------
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/uncloud/uncloud.git
|
||||
|
||||
|
||||
|
||||
Install python requirements
|
||||
---------------------------
|
||||
You need to have python3 installed.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
cd uncloud!
|
||||
python -m venv venv
|
||||
. ./venv/bin/activate
|
||||
./bin/uncloud-run-reinstall
|
||||
|
||||
|
||||
|
||||
Install os requirements
|
||||
-----------------------
|
||||
Install the following software packages: **dnsmasq**.
|
||||
|
||||
If you already have a working IPv6 SLAAC and DNS setup,
|
||||
this step can be skipped.
|
||||
|
||||
Note that you need at least one /64 IPv6 network to run uncloud.
|
|
@ -1,66 +0,0 @@
|
|||
VM images
|
||||
==================================
|
||||
|
||||
Overview
|
||||
---------
|
||||
|
||||
ucloud tries to be least invasise towards VMs and only require
|
||||
strictly necessary changes for running in a virtualised
|
||||
environment. This includes configurations for:
|
||||
|
||||
* Configuring the network
|
||||
* Managing access via ssh keys
|
||||
* Resizing the attached disk(s)
|
||||
|
||||
Upstream images
|
||||
---------------
|
||||
|
||||
The 'official' uncloud images are defined in the `uncloud/images
|
||||
<https://code.ungleich.ch/uncloud/images>`_ repository.
|
||||
|
||||
How to make you own Uncloud images
|
||||
----------------------------------
|
||||
|
||||
.. note::
|
||||
It is fairly easy to create your own images for uncloud, as the common
|
||||
operations (which are detailed below) can be automatically handled by the
|
||||
`uncloud/uncloud-init <https://code.ungleich.ch/uncloud/uncloud-init>`_ tool.
|
||||
|
||||
Network configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
All VMs in ucloud are required to support IPv6. The primary network
|
||||
configuration is always done using SLAAC. A VM thus needs only to be
|
||||
configured to
|
||||
|
||||
* accept router advertisements on all network interfaces
|
||||
* use the router advertisements to configure the network interfaces
|
||||
* accept the DNS entries from the router advertisements
|
||||
|
||||
|
||||
Configuring SSH keys
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
||||
|
||||
To accept ssh keys in your VM, request the URL
|
||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
||||
**authorized_keys** file. Below you find sample code to accomplish
|
||||
this task:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tmp=$(mktemp)
|
||||
curl -s http://metadata/ssk_keys > "$tmp"
|
||||
touch ~/.ssh/authorized_keys # ensure it exists
|
||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
||||
|
||||
|
||||
Disk resize
|
||||
~~~~~~~~~~~
|
||||
In virtualised environments, the disk sizes might grow. The operating
|
||||
system should detect disks that are bigger than the existing partition
|
||||
table and resize accordingly. This task is os specific.
|
||||
|
||||
ucloud does not support shrinking disks due to the complexity and
|
||||
intra OS dependencies.
|
|
@ -1,89 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
import logging
|
||||
import sys
|
||||
import importlib
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from etcd3.exceptions import ConnectionFailedError
|
||||
|
||||
from uncloud.common import settings
|
||||
from uncloud import UncloudException
|
||||
from uncloud.common.cli import resolve_otp_credentials
|
||||
|
||||
# Components that use etcd
|
||||
ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
|
||||
'imagescanner', 'metadata', 'configure', 'hack']
|
||||
|
||||
ALL_COMPONENTS = ETCD_COMPONENTS.copy()
|
||||
ALL_COMPONENTS.append('oneshot')
|
||||
#ALL_COMPONENTS.append('cli')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
subparsers = arg_parser.add_subparsers(dest='command')
|
||||
|
||||
parent_parser = argparse.ArgumentParser(add_help=False)
|
||||
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
|
||||
help='More verbose logging')
|
||||
parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
|
||||
default=os.path.expanduser('~/uncloud'))
|
||||
|
||||
etcd_parser = argparse.ArgumentParser(add_help=False)
|
||||
etcd_parser.add_argument('--etcd-host')
|
||||
etcd_parser.add_argument('--etcd-port')
|
||||
etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
|
||||
etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
|
||||
etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
|
||||
|
||||
for component in ALL_COMPONENTS:
|
||||
mod = importlib.import_module('uncloud.{}.main'.format(component))
|
||||
parser = getattr(mod, 'arg_parser')
|
||||
|
||||
if component in ETCD_COMPONENTS:
|
||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
|
||||
else:
|
||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
|
||||
|
||||
arguments = vars(arg_parser.parse_args())
|
||||
etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
|
||||
etcd_arguments = {
|
||||
'etcd': {
|
||||
key.replace('etcd_', ''): arguments[key]
|
||||
for key in etcd_arguments
|
||||
}
|
||||
}
|
||||
if not arguments['command']:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
# Initializing Settings and resolving otp_credentials
|
||||
# It is neccessary to resolve_otp_credentials after argument parsing is done because
|
||||
# previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
|
||||
# providing the default values for --name, --realm and --seed arguments from the values
|
||||
# we read from file. But, now we are asking user about where the config file lives. So,
|
||||
# to providing default value is not possible before parsing arguments. So, we are doing
|
||||
# it after..
|
||||
# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
|
||||
# resolve_otp_credentials(arguments)
|
||||
|
||||
name = arguments.pop('command')
|
||||
mod = importlib.import_module('uncloud.{}.main'.format(name))
|
||||
main = getattr(mod, 'main')
|
||||
|
||||
if arguments['debug']:
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
log = logging.getLogger()
|
||||
|
||||
try:
|
||||
main(arguments)
|
||||
except UncloudException as err:
|
||||
log.error(err)
|
||||
sys.exit(1)
|
||||
# except ConnectionFailedError as err:
|
||||
# log.error('Cannot connect to etcd: {}'.format(err))
|
||||
except Exception as err:
|
||||
log.exception(err)
|
|
@ -1,37 +0,0 @@
|
|||
import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from uncloud.hack.mac import MAC
|
||||
from uncloud import UncloudException
|
||||
|
||||
class TestMacLocal(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.config = Mock()
|
||||
self.config.arguments = {"no_db":True}
|
||||
self.mac = MAC(self.config)
|
||||
self.mac.create()
|
||||
|
||||
def testMacInt(self):
|
||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong first MAC index")
|
||||
|
||||
def testMacRepr(self):
|
||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong first MAC index")
|
||||
|
||||
def testMacStr(self):
|
||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong first MAC index")
|
||||
|
||||
def testValidationRaise(self):
|
||||
with self.assertRaises(UncloudException):
|
||||
self.mac.validate_mac("2")
|
||||
|
||||
def testValidation(self):
|
||||
self.assertTrue(self.mac.validate_mac("42:00:00:00:00:01"), "Validation of a given MAC not working properly")
|
||||
|
||||
def testNextMAC(self):
|
||||
self.mac.create()
|
||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong second MAC index")
|
||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong second MAC index")
|
||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong second MAC index")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -1,2 +0,0 @@
|
|||
class UncloudException(Exception):
|
||||
pass
|
|
@ -1,59 +0,0 @@
|
|||
import os
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
|
||||
class Optional:
|
||||
pass
|
||||
|
||||
|
||||
class Field:
|
||||
def __init__(self, _name, _type, _value=None):
|
||||
self.name = _name
|
||||
self.value = _value
|
||||
self.type = _type
|
||||
self.__errors = []
|
||||
|
||||
def validation(self):
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
if self.value == KeyError:
|
||||
self.add_error(
|
||||
"'{}' field is a required field".format(self.name)
|
||||
)
|
||||
else:
|
||||
if isinstance(self.value, Optional):
|
||||
pass
|
||||
elif not isinstance(self.value, self.type):
|
||||
self.add_error(
|
||||
"Incorrect Type for '{}' field".format(self.name)
|
||||
)
|
||||
else:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return self.__errors
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class VmUUIDField(Field):
|
||||
def __init__(self, data):
|
||||
self.uuid = data.get("uuid", KeyError)
|
||||
|
||||
super().__init__("uuid", str, self.uuid)
|
||||
|
||||
self.validation = self.vm_uuid_validation
|
||||
|
||||
def vm_uuid_validation(self):
|
||||
r = shared.etcd_client.get(
|
||||
os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
|
||||
)
|
||||
if not r:
|
||||
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
|
@ -1,148 +0,0 @@
|
|||
import binascii
|
||||
import ipaddress
|
||||
import random
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from pyotp import TOTP
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check_otp(name, realm, token):
|
||||
try:
|
||||
data = {
|
||||
"auth_name": shared.settings["otp"]["auth_name"],
|
||||
"auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
|
||||
"auth_realm": shared.settings["otp"]["auth_realm"],
|
||||
"name": name,
|
||||
"realm": realm,
|
||||
"token": token,
|
||||
}
|
||||
except binascii.Error as err:
|
||||
logger.error(
|
||||
"Cannot compute OTP for seed: {}".format(
|
||||
shared.settings["otp"]["auth_seed"]
|
||||
)
|
||||
)
|
||||
return 400
|
||||
|
||||
response = requests.post(
|
||||
shared.settings["otp"]["verification_controller_url"], json=data
|
||||
)
|
||||
return response.status_code
|
||||
|
||||
|
||||
def resolve_vm_name(name, owner):
|
||||
"""Return UUID of Virtual Machine of name == name and owner == owner
|
||||
|
||||
Input: name of vm, owner of vm.
|
||||
Output: uuid of vm if found otherwise None
|
||||
"""
|
||||
result = next(
|
||||
filter(
|
||||
lambda vm: vm.value["owner"] == owner
|
||||
and vm.value["name"] == name,
|
||||
shared.vm_pool.vms,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if result:
|
||||
return result.key.split("/")[-1]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def resolve_image_name(name, etcd_client):
|
||||
"""Return image uuid given its name and its store
|
||||
|
||||
* If the provided name is not in correct format
|
||||
i.e {store_name}:{image_name} return ValueError
|
||||
* If no such image found then return KeyError
|
||||
|
||||
"""
|
||||
|
||||
seperator = ":"
|
||||
|
||||
# Ensure, user/program passed valid name that is of type string
|
||||
try:
|
||||
store_name_and_image_name = name.split(seperator)
|
||||
|
||||
"""
|
||||
Examples, where it would work and where it would raise exception
|
||||
"images:alpine" --> ["images", "alpine"]
|
||||
|
||||
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
||||
|
||||
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
||||
as too many values to unpack
|
||||
"""
|
||||
store_name, image_name = store_name_and_image_name
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Image name not in correct format i.e {store_name}:{image_name}"
|
||||
)
|
||||
|
||||
images = etcd_client.get_prefix(
|
||||
shared.settings["etcd"]["image_prefix"], value_in_json=True
|
||||
)
|
||||
|
||||
# Try to find image with name == image_name and store_name == store_name
|
||||
try:
|
||||
image = next(
|
||||
filter(
|
||||
lambda im: im.value["name"] == image_name
|
||||
and im.value["store_name"] == store_name,
|
||||
images,
|
||||
)
|
||||
)
|
||||
except StopIteration:
|
||||
raise KeyError("No image with name {} found.".format(name))
|
||||
else:
|
||||
image_uuid = image.key.split("/")[-1]
|
||||
|
||||
return image_uuid
|
||||
|
||||
|
||||
def random_bytes(num=6):
|
||||
return [random.randrange(256) for _ in range(num)]
|
||||
|
||||
|
||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
|
||||
mac = random_bytes()
|
||||
if oui:
|
||||
if type(oui) == str:
|
||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||
mac = oui + random_bytes(num=6 - len(oui))
|
||||
else:
|
||||
if multicast:
|
||||
mac[0] |= 1 # set bit 0
|
||||
else:
|
||||
mac[0] &= ~1 # clear bit 0
|
||||
if uaa:
|
||||
mac[0] &= ~(1 << 1) # clear bit 1
|
||||
else:
|
||||
mac[0] |= 1 << 1 # set bit 1
|
||||
return separator.join(byte_fmt % b for b in mac)
|
||||
|
||||
|
||||
def mac2ipv6(mac, prefix):
|
||||
# only accept MACs separated by a colon
|
||||
parts = mac.split(":")
|
||||
|
||||
# modify parts to match IPv6 value
|
||||
parts.insert(3, "ff")
|
||||
parts.insert(4, "fe")
|
||||
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
|
||||
|
||||
# format output
|
||||
ipv6_parts = [str(0)] * 4
|
||||
for i in range(0, len(parts), 2):
|
||||
ipv6_parts.append("".join(parts[i : i + 2]))
|
||||
|
||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
||||
prefix = ipaddress.IPv6Address(prefix)
|
||||
return str(prefix + int(lower_part))
|
||||
|
|
@ -1,600 +0,0 @@
|
|||
import json
|
||||
import pynetbox
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
from uuid import uuid4
|
||||
from os.path import join as join_path
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
from uncloud.common import counters
|
||||
from uncloud.common.vm import VMStatus
|
||||
from uncloud.common.request import RequestEntry, RequestType
|
||||
from uncloud.api import schemas
|
||||
from uncloud.api.helper import generate_mac, mac2ipv6
|
||||
from uncloud import UncloudException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = Flask(__name__)
|
||||
api = Api(app)
|
||||
app.logger.handlers.clear()
|
||||
|
||||
arg_parser = argparse.ArgumentParser('api', add_help=False)
|
||||
arg_parser.add_argument('--port', '-p')
|
||||
|
||||
|
||||
@app.errorhandler(Exception)
|
||||
def handle_exception(e):
|
||||
app.logger.error(e)
|
||||
# pass through HTTP errors
|
||||
if isinstance(e, HTTPException):
|
||||
return e
|
||||
|
||||
# now you're handling non-HTTP exceptions only
|
||||
return {'message': 'Server Error'}, 500
|
||||
|
||||
|
||||
class CreateVM(Resource):
|
||||
"""API Request to Handle Creation of VM"""
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateVMSchema(data)
|
||||
if validator.is_valid():
|
||||
vm_uuid = uuid4().hex
|
||||
vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
|
||||
specs = {
|
||||
'cpu': validator.specs['cpu'],
|
||||
'ram': validator.specs['ram'],
|
||||
'os-ssd': validator.specs['os-ssd'],
|
||||
'hdd': validator.specs['hdd'],
|
||||
}
|
||||
macs = [generate_mac() for _ in range(len(data['network']))]
|
||||
tap_ids = [
|
||||
counters.increment_etcd_counter(
|
||||
shared.etcd_client, shared.settings['etcd']['tap_counter']
|
||||
)
|
||||
for _ in range(len(data['network']))
|
||||
]
|
||||
vm_entry = {
|
||||
'name': data['vm_name'],
|
||||
'owner': data['name'],
|
||||
'owner_realm': data['realm'],
|
||||
'specs': specs,
|
||||
'hostname': '',
|
||||
'status': VMStatus.stopped,
|
||||
'image_uuid': validator.image_uuid,
|
||||
'log': [],
|
||||
'vnc_socket': '',
|
||||
'network': list(zip(data['network'], macs, tap_ids)),
|
||||
'metadata': {'ssh-keys': []},
|
||||
'in_migration': False,
|
||||
}
|
||||
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||
|
||||
# Create ScheduleVM Request
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.ScheduleVM,
|
||||
uuid=vm_uuid,
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
|
||||
return {'message': 'VM Creation Queued'}, 200
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VmStatus(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VMStatusSchema(data)
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(
|
||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||
)
|
||||
vm_value = vm.value.copy()
|
||||
vm_value['ip'] = []
|
||||
for network_mac_and_tap in vm.network:
|
||||
network_name, mac, tap = network_mac_and_tap
|
||||
network = shared.etcd_client.get(
|
||||
join_path(
|
||||
shared.settings['etcd']['network_prefix'],
|
||||
data['name'],
|
||||
network_name,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
ipv6_addr = (
|
||||
network.value.get('ipv6').split('::')[0] + '::'
|
||||
)
|
||||
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
|
||||
vm.value = vm_value
|
||||
return vm.value
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateImage(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateImageSchema(data)
|
||||
if validator.is_valid():
|
||||
file_entry = shared.etcd_client.get(
|
||||
join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
|
||||
)
|
||||
file_entry_value = json.loads(file_entry.value)
|
||||
|
||||
image_entry_json = {
|
||||
'status': 'TO_BE_CREATED',
|
||||
'owner': file_entry_value['owner'],
|
||||
'filename': file_entry_value['filename'],
|
||||
'name': data['name'],
|
||||
'store_name': data['image_store'],
|
||||
'visibility': 'public',
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
join_path(
|
||||
shared.settings['etcd']['image_prefix'], data['uuid']
|
||||
),
|
||||
json.dumps(image_entry_json),
|
||||
)
|
||||
|
||||
return {'message': 'Image queued for creation.'}
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListPublicImages(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
images = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['image_prefix'], value_in_json=True
|
||||
)
|
||||
r = {'images': []}
|
||||
for image in images:
|
||||
image_key = '{}:{}'.format(
|
||||
image.value['store_name'], image.value['name']
|
||||
)
|
||||
r['images'].append(
|
||||
{'name': image_key, 'status': image.value['status']}
|
||||
)
|
||||
return r, 200
|
||||
|
||||
|
||||
class VMAction(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmActionSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm_entry = shared.vm_pool.get(
|
||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||
)
|
||||
action = data['action']
|
||||
|
||||
if action == 'start':
|
||||
action = 'schedule'
|
||||
|
||||
if action == 'delete' and vm_entry.hostname == '':
|
||||
if shared.storage_handler.is_vm_image_exists(
|
||||
vm_entry.uuid
|
||||
):
|
||||
r_status = shared.storage_handler.delete_vm_image(
|
||||
vm_entry.uuid
|
||||
)
|
||||
if r_status:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {'message': 'VM successfully deleted'}
|
||||
else:
|
||||
logger.error(
|
||||
'Some Error Occurred while deleting VM'
|
||||
)
|
||||
return {'message': 'VM deletion unsuccessfull'}
|
||||
else:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {'message': 'VM successfully deleted'}
|
||||
|
||||
r = RequestEntry.from_scratch(
|
||||
type='{}VM'.format(action.title()),
|
||||
uuid=data['uuid'],
|
||||
hostname=vm_entry.hostname,
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{'message': 'VM {} Queued'.format(action.title())},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VMMigration(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmMigrationSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(data['uuid'])
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.InitVMMigration,
|
||||
uuid=vm.uuid,
|
||||
hostname=join_path(
|
||||
shared.settings['etcd']['host_prefix'],
|
||||
validator.destination.value,
|
||||
),
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{'message': 'VM Migration Initialization Queued'},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserVM(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vms = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['vm_prefix'], value_in_json=True
|
||||
)
|
||||
return_vms = []
|
||||
user_vms = filter(
|
||||
lambda v: v.value['owner'] == data['name'], vms
|
||||
)
|
||||
for vm in user_vms:
|
||||
return_vms.append(
|
||||
{
|
||||
'name': vm.value['name'],
|
||||
'vm_uuid': vm.key.split('/')[-1],
|
||||
'specs': vm.value['specs'],
|
||||
'status': vm.value['status'],
|
||||
'hostname': vm.value['hostname'],
|
||||
'vnc_socket': vm.value.get('vnc_socket', None),
|
||||
}
|
||||
)
|
||||
if return_vms:
|
||||
return {'message': return_vms}, 200
|
||||
return {'message': 'No VM found'}, 404
|
||||
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserFiles(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
files = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['file_prefix'], value_in_json=True
|
||||
)
|
||||
return_files = []
|
||||
user_files = [f for f in files if f.value['owner'] == data['name']]
|
||||
for file in user_files:
|
||||
file_uuid = file.key.split('/')[-1]
|
||||
file = file.value
|
||||
file['uuid'] = file_uuid
|
||||
|
||||
file.pop('sha512sum', None)
|
||||
file.pop('owner', None)
|
||||
|
||||
return_files.append(file)
|
||||
return {'message': return_files}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateHost(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateHostSchema(data)
|
||||
if validator.is_valid():
|
||||
host_key = join_path(
|
||||
shared.settings['etcd']['host_prefix'], uuid4().hex
|
||||
)
|
||||
host_entry = {
|
||||
'specs': data['specs'],
|
||||
'hostname': data['hostname'],
|
||||
'status': 'DEAD',
|
||||
'last_heartbeat': '',
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
host_key, host_entry, value_in_json=True
|
||||
)
|
||||
|
||||
return {'message': 'Host Created'}, 200
|
||||
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListHost(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
hosts = shared.host_pool.hosts
|
||||
r = {
|
||||
host.key: {
|
||||
'status': host.status,
|
||||
'specs': host.specs,
|
||||
'hostname': host.hostname,
|
||||
}
|
||||
for host in hosts
|
||||
}
|
||||
return r, 200
|
||||
|
||||
|
||||
class GetSSHKeys(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.GetSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
if not validator.key_name.value:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get_prefix(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
keys = {
|
||||
key.key.split('/')[-1]: key.value
|
||||
for key in etcd_entry
|
||||
}
|
||||
return {'keys': keys}
|
||||
else:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
if etcd_entry:
|
||||
return {
|
||||
'keys': {
|
||||
etcd_entry.key.split('/')[
|
||||
-1
|
||||
]: etcd_entry.value
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {'keys': {}}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class AddSSHKey(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.AddSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
return {
|
||||
'message': 'Key with name "{}" already exists'.format(
|
||||
data['key_name']
|
||||
)
|
||||
}
|
||||
else:
|
||||
# Key Not Found. It implies user' haven't added any key yet.
|
||||
shared.etcd_client.put(
|
||||
etcd_key, data['key'], value_in_json=True
|
||||
)
|
||||
return {'message': 'Key added successfully'}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class RemoveSSHKey(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.RemoveSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
shared.etcd_client.client.delete(etcd_key)
|
||||
return {'message': 'Key successfully removed.'}
|
||||
else:
|
||||
return {
|
||||
'message': 'No Key with name "{}" Exists at all.'.format(
|
||||
data['key_name']
|
||||
)
|
||||
}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateNetwork(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateNetwork(data)
|
||||
|
||||
if validator.is_valid():
|
||||
|
||||
network_entry = {
|
||||
'id': counters.increment_etcd_counter(
|
||||
shared.etcd_client, shared.settings['etcd']['vxlan_counter']
|
||||
),
|
||||
'type': data['type'],
|
||||
}
|
||||
if validator.user.value:
|
||||
try:
|
||||
nb = pynetbox.api(
|
||||
url=shared.settings['netbox']['url'],
|
||||
token=shared.settings['netbox']['token'],
|
||||
)
|
||||
nb_prefix = nb.ipam.prefixes.get(
|
||||
prefix=shared.settings['network']['prefix']
|
||||
)
|
||||
prefix = nb_prefix.available_prefixes.create(
|
||||
data={
|
||||
'prefix_length': int(
|
||||
shared.settings['network']['prefix_length']
|
||||
),
|
||||
'description': '{}\'s network "{}"'.format(
|
||||
data['name'], data['network_name']
|
||||
),
|
||||
'is_pool': True,
|
||||
}
|
||||
)
|
||||
except Exception as err:
|
||||
app.logger.error(err)
|
||||
return {
|
||||
'message': 'Error occured while creating network.'
|
||||
}
|
||||
else:
|
||||
network_entry['ipv6'] = prefix['prefix']
|
||||
else:
|
||||
network_entry['ipv6'] = 'fd00::/64'
|
||||
|
||||
network_key = join_path(
|
||||
shared.settings['etcd']['network_prefix'],
|
||||
data['name'],
|
||||
data['network_name'],
|
||||
)
|
||||
shared.etcd_client.put(
|
||||
network_key, network_entry, value_in_json=True
|
||||
)
|
||||
return {'message': 'Network successfully added.'}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserNetwork(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
prefix = join_path(
|
||||
shared.settings['etcd']['network_prefix'], data['name']
|
||||
)
|
||||
networks = shared.etcd_client.get_prefix(
|
||||
prefix, value_in_json=True
|
||||
)
|
||||
user_networks = []
|
||||
for net in networks:
|
||||
net.value['name'] = net.key.split('/')[-1]
|
||||
user_networks.append(net.value)
|
||||
return {'networks': user_networks}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
api.add_resource(CreateVM, '/vm/create')
|
||||
api.add_resource(VmStatus, '/vm/status')
|
||||
|
||||
api.add_resource(VMAction, '/vm/action')
|
||||
api.add_resource(VMMigration, '/vm/migrate')
|
||||
|
||||
api.add_resource(CreateImage, '/image/create')
|
||||
api.add_resource(ListPublicImages, '/image/list-public')
|
||||
|
||||
api.add_resource(ListUserVM, '/user/vms')
|
||||
api.add_resource(ListUserFiles, '/user/files')
|
||||
api.add_resource(ListUserNetwork, '/user/networks')
|
||||
|
||||
api.add_resource(AddSSHKey, '/user/add-ssh')
|
||||
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
|
||||
api.add_resource(GetSSHKeys, '/user/get-ssh')
|
||||
|
||||
api.add_resource(CreateHost, '/host/create')
|
||||
api.add_resource(ListHost, '/host/list')
|
||||
|
||||
api.add_resource(CreateNetwork, '/network/create')
|
||||
|
||||
|
||||
def main(arguments):
|
||||
debug = arguments['debug']
|
||||
port = arguments['port']
|
||||
|
||||
try:
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['image_store_prefix'], value_in_json=True
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
image_stores = False
|
||||
|
||||
# Do not inject default values that might be very wrong
|
||||
# fail when required, not before
|
||||
#
|
||||
# if not image_stores:
|
||||
# data = {
|
||||
# 'is_public': True,
|
||||
# 'type': 'ceph',
|
||||
# 'name': 'images',
|
||||
# 'description': 'first ever public image-store',
|
||||
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
||||
# }
|
||||
|
||||
# shared.etcd_client.put(
|
||||
# join_path(
|
||||
# shared.settings['etcd']['image_store_prefix'], uuid4().hex
|
||||
# ),
|
||||
# json.dumps(data),
|
||||
# )
|
||||
|
||||
try:
|
||||
app.run(host='::', port=port, debug=debug)
|
||||
except OSError as e:
|
||||
raise UncloudException('Failed to start Flask: {}'.format(e))
|
|
@ -1,557 +0,0 @@
|
|||
"""
|
||||
This module contain classes thats validates and intercept/modify
|
||||
data coming from uncloud-cli (user)
|
||||
|
||||
It was primarily developed as an alternative to argument parser
|
||||
of Flask_Restful which is going to be deprecated. I also tried
|
||||
marshmallow for that purpose but it was an overkill (because it
|
||||
do validation + serialization + deserialization) and little
|
||||
inflexible for our purpose.
|
||||
"""
|
||||
|
||||
# TODO: Fix error message when user's mentioned VM (referred by name)
|
||||
# does not exists.
|
||||
#
|
||||
# Currently, it says uuid is a required field.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import bitmath
|
||||
|
||||
from uncloud.common.host import HostStatus
|
||||
from uncloud.common.vm import VMStatus
|
||||
from uncloud.common.shared import shared
|
||||
from . import helper, logger
|
||||
from .common_fields import Field, VmUUIDField
|
||||
from .helper import check_otp, resolve_vm_name
|
||||
|
||||
|
||||
class BaseSchema:
|
||||
def __init__(self, data, fields=None):
|
||||
_ = data # suppress linter warning
|
||||
self.__errors = []
|
||||
if fields is None:
|
||||
self.fields = []
|
||||
else:
|
||||
self.fields = fields
|
||||
|
||||
def validation(self):
|
||||
# custom validation is optional
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
for field in self.fields:
|
||||
field.is_valid()
|
||||
self.add_field_errors(field)
|
||||
|
||||
for parent in self.__class__.__bases__:
|
||||
try:
|
||||
parent.validation(self)
|
||||
except AttributeError:
|
||||
pass
|
||||
if not self.__errors:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return {"message": self.__errors}
|
||||
|
||||
def add_field_errors(self, field: Field):
|
||||
self.__errors += field.get_errors()
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class OTPSchema(BaseSchema):
|
||||
def __init__(self, data: dict, fields=None):
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.realm = Field("realm", str, data.get("realm", KeyError))
|
||||
self.token = Field("token", str, data.get("token", KeyError))
|
||||
|
||||
_fields = [self.name, self.realm, self.token]
|
||||
if fields:
|
||||
_fields += fields
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def validation(self):
|
||||
if (
|
||||
check_otp(
|
||||
self.name.value, self.realm.value, self.token.value
|
||||
)
|
||||
!= 200
|
||||
):
|
||||
self.add_error("Wrong Credentials")
|
||||
|
||||
|
||||
########################## Image Operations ###############################################
|
||||
|
||||
|
||||
class CreateImageSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.image_store = Field(
|
||||
"image_store", str, data.get("image_store", KeyError)
|
||||
)
|
||||
|
||||
# Validations
|
||||
self.uuid.validation = self.file_uuid_validation
|
||||
self.image_store.validation = self.image_store_name_validation
|
||||
|
||||
# All Fields
|
||||
fields = [self.uuid, self.name, self.image_store]
|
||||
super().__init__(data, fields)
|
||||
|
||||
def file_uuid_validation(self):
|
||||
file_entry = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
|
||||
)
|
||||
)
|
||||
if file_entry is None:
|
||||
self.add_error(
|
||||
"Image File with uuid '{}' Not Found".format(
|
||||
self.uuid.value
|
||||
)
|
||||
)
|
||||
|
||||
def image_store_name_validation(self):
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
|
||||
)
|
||||
)
|
||||
|
||||
image_store = next(
|
||||
filter(
|
||||
lambda s: json.loads(s.value)["name"]
|
||||
== self.image_store.value,
|
||||
image_stores,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not image_store:
|
||||
self.add_error(
|
||||
"Store '{}' does not exists".format(
|
||||
self.image_store.value
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Host Operations
|
||||
|
||||
|
||||
class CreateHostSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.hostname = Field(
|
||||
"hostname", str, data.get("hostname", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.specs.validation = self.specs_validation
|
||||
|
||||
fields = [self.hostname, self.specs]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if _cpu < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(10):
|
||||
self.add_error("OS-SSD must be atleast 10 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
def validation(self):
|
||||
if self.realm.value != "ungleich-admin":
|
||||
self.add_error(
|
||||
"Invalid Credentials/Insufficient Permission"
|
||||
)
|
||||
|
||||
|
||||
# VM Operations
|
||||
|
||||
|
||||
class CreateVMSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.vm_name = Field(
|
||||
"vm_name", str, data.get("vm_name", KeyError)
|
||||
)
|
||||
self.image = Field("image", str, data.get("image", KeyError))
|
||||
self.network = Field(
|
||||
"network", list, data.get("network", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.image.validation = self.image_validation
|
||||
self.vm_name.validation = self.vm_name_validation
|
||||
self.specs.validation = self.specs_validation
|
||||
self.network.validation = self.network_validation
|
||||
|
||||
fields = [self.vm_name, self.image, self.specs, self.network]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def image_validation(self):
|
||||
try:
|
||||
image_uuid = helper.resolve_image_name(
|
||||
self.image.value, shared.etcd_client
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Cannot resolve image name = %s", self.image.value
|
||||
)
|
||||
self.add_error(str(e))
|
||||
else:
|
||||
self.image_uuid = image_uuid
|
||||
|
||||
def vm_name_validation(self):
|
||||
if resolve_vm_name(
|
||||
name=self.vm_name.value, owner=self.name.value
|
||||
):
|
||||
self.add_error(
|
||||
'VM with same name "{}" already exists'.format(
|
||||
self.vm_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_validation(self):
|
||||
_network = self.network.value
|
||||
|
||||
if _network:
|
||||
for net in _network:
|
||||
network = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
|
||||
self.name.value,
|
||||
net,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
if not network:
|
||||
self.add_error(
|
||||
"Network with name {} does not exists".format(
|
||||
net
|
||||
)
|
||||
)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if int(_cpu) < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(1):
|
||||
self.add_error("OS-SSD must be atleast 1 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
|
||||
class VMStatusSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
|
||||
fields = [self.uuid]
|
||||
|
||||
super().__init__(data, fields)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
|
||||
class VmActionSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.action = Field("action", str, data.get("action", KeyError))
|
||||
|
||||
self.action.validation = self.action_validation
|
||||
|
||||
_fields = [self.uuid, self.action]
|
||||
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def action_validation(self):
|
||||
allowed_actions = ["start", "stop", "delete"]
|
||||
if self.action.value not in allowed_actions:
|
||||
self.add_error(
|
||||
"Invalid Action. Allowed Actions are {}".format(
|
||||
allowed_actions
|
||||
)
|
||||
)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if (
|
||||
self.action.value == "start"
|
||||
and vm.status == VMStatus.running
|
||||
and vm.hostname != ""
|
||||
):
|
||||
self.add_error("VM Already Running")
|
||||
|
||||
if self.action.value == "stop":
|
||||
if vm.status == VMStatus.stopped:
|
||||
self.add_error("VM Already Stopped")
|
||||
elif vm.status != VMStatus.running:
|
||||
self.add_error("Cannot stop non-running VM")
|
||||
|
||||
|
||||
class VmMigrationSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.destination = Field(
|
||||
"destination", str, data.get("destination", KeyError)
|
||||
)
|
||||
|
||||
self.destination.validation = self.destination_validation
|
||||
|
||||
fields = [self.destination]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def destination_validation(self):
|
||||
hostname = self.destination.value
|
||||
host = next(
|
||||
filter(
|
||||
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not host:
|
||||
self.add_error(
|
||||
"No Such Host ({}) exists".format(
|
||||
self.destination.value
|
||||
)
|
||||
)
|
||||
elif host.status != HostStatus.alive:
|
||||
self.add_error("Destination Host is dead")
|
||||
else:
|
||||
self.destination.value = host.key
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if vm.status != VMStatus.running:
|
||||
self.add_error("Can't migrate non-running VM")
|
||||
|
||||
if vm.hostname == os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
|
||||
):
|
||||
self.add_error(
|
||||
"Destination host couldn't be same as Source Host"
|
||||
)
|
||||
|
||||
|
||||
class AddSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||
|
||||
fields = [self.key_name, self.key]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class RemoveSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class GetSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", None)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class CreateNetwork(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
|
||||
self.type = Field("type", str, data.get("type", KeyError))
|
||||
self.user = Field("user", bool, bool(data.get("user", False)))
|
||||
|
||||
self.network_name.validation = self.network_name_validation
|
||||
self.type.validation = self.network_type_validation
|
||||
|
||||
fields = [self.network_name, self.type, self.user]
|
||||
super().__init__(data, fields=fields)
|
||||
|
||||
def network_name_validation(self):
|
||||
key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
|
||||
network = shared.etcd_client.get(key, value_in_json=True)
|
||||
if network:
|
||||
self.add_error(
|
||||
"Network with name {} already exists".format(
|
||||
self.network_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_type_validation(self):
|
||||
supported_network_types = ["vxlan"]
|
||||
if self.type.value not in supported_network_types:
|
||||
self.add_error(
|
||||
"Unsupported Network Type. Supported network types are {}".format(
|
||||
supported_network_types
|
||||
)
|
||||
)
|
|
@ -1,23 +0,0 @@
|
|||
import argparse
|
||||
import etcd3
|
||||
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
arg_parser = argparse.ArgumentParser('client', add_help=False)
|
||||
arg_parser.add_argument('--dump-etcd-contents-prefix', help="Dump contents below the given prefix")
|
||||
|
||||
def dump_etcd_contents(prefix):
|
||||
etcd = Etcd3Wrapper()
|
||||
for k,v in etcd.get_prefix_raw(prefix):
|
||||
k = k.decode('utf-8')
|
||||
v = v.decode('utf-8')
|
||||
print("{} = {}".format(k,v))
|
||||
# print("{} = {}".format(k,v))
|
||||
|
||||
# for k,v in etcd.get_prefix(prefix):
|
||||
#
|
||||
print("done")
|
||||
|
||||
|
||||
def main(arguments):
|
||||
if 'dump_etcd_contents_prefix' in arguments:
|
||||
dump_etcd_contents(prefix=arguments['dump_etcd_contents_prefix'])
|
|
@ -1,26 +0,0 @@
|
|||
from uncloud.common.shared import shared
|
||||
from pyotp import TOTP
|
||||
|
||||
|
||||
def get_token(seed):
|
||||
if seed is not None:
|
||||
try:
|
||||
token = TOTP(seed).now()
|
||||
except Exception:
|
||||
raise Exception('Invalid seed')
|
||||
else:
|
||||
return token
|
||||
|
||||
|
||||
def resolve_otp_credentials(kwargs):
|
||||
d = {
|
||||
'name': shared.settings['client']['name'],
|
||||
'realm': shared.settings['client']['realm'],
|
||||
'token': get_token(shared.settings['client']['seed'])
|
||||
}
|
||||
|
||||
for k, v in d.items():
|
||||
if k in kwargs and kwargs[k] is None:
|
||||
kwargs.update({k: v})
|
||||
|
||||
return d
|
|
@ -1,21 +0,0 @@
|
|||
from .etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
|
||||
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
|
||||
if kv:
|
||||
counter = int(kv.value)
|
||||
counter = counter + 1
|
||||
else:
|
||||
counter = 1
|
||||
|
||||
etcd_client.put(key, str(counter))
|
||||
return counter
|
||||
|
||||
|
||||
def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
if kv:
|
||||
return int(kv.value)
|
||||
return None
|
|
@ -1,34 +0,0 @@
|
|||
from uncloud.common.settings import get_settings
|
||||
from uncloud.common.vm import VmPool
|
||||
from uncloud.common.host import HostPool
|
||||
from uncloud.common.request import RequestPool
|
||||
import uncloud.common.storage_handlers as storage_handlers
|
||||
|
||||
|
||||
class Shared:
|
||||
@property
|
||||
def settings(self):
|
||||
return get_settings()
|
||||
|
||||
@property
|
||||
def etcd_client(self):
|
||||
return self.settings.get_etcd_client()
|
||||
|
||||
@property
|
||||
def host_pool(self):
|
||||
return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
|
||||
|
||||
@property
|
||||
def vm_pool(self):
|
||||
return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
|
||||
|
||||
@property
|
||||
def request_pool(self):
|
||||
return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
|
||||
|
||||
@property
|
||||
def storage_handler(self):
|
||||
return storage_handlers.get_storage_handler()
|
||||
|
||||
|
||||
shared = Shared()
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -1,39 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, arguments):
|
||||
""" read arguments dicts as a base """
|
||||
|
||||
self.arguments = arguments
|
||||
|
||||
# Split them so *etcd_args can be used and we can
|
||||
# iterate over etcd_hosts
|
||||
self.etcd_hosts = [ arguments['etcd_host'] ]
|
||||
self.etcd_args = {
|
||||
'ca_cert': arguments['etcd_ca_cert'],
|
||||
'cert_cert': arguments['etcd_cert_cert'],
|
||||
'cert_key': arguments['etcd_cert_key'],
|
||||
# 'user': None,
|
||||
# 'password': None
|
||||
}
|
||||
self.etcd_prefix = '/nicohack/'
|
|
@ -1,149 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
import etcd3
|
||||
import json
|
||||
import logging
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from functools import wraps
|
||||
from uncloud import UncloudException
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def db_logentry(message):
|
||||
timestamp = datetime.datetime.now()
|
||||
return {
|
||||
"timestamp": str(timestamp),
|
||||
"message": message
|
||||
}
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError as e:
|
||||
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
|
||||
except etcd3.exceptions.ConnectionTimeoutError as e:
|
||||
raise UncloudException('etcd connection timeout. {}'.format(e))
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class DB(object):
|
||||
def __init__(self, config, prefix="/"):
|
||||
self.config = config
|
||||
|
||||
# Root for everything
|
||||
self.base_prefix= '/nicohack'
|
||||
|
||||
# Can be set from outside
|
||||
self.prefix = prefix
|
||||
|
||||
try:
|
||||
self.connect()
|
||||
except FileNotFoundError as e:
|
||||
raise UncloudException("Is the path to the etcd certs correct? {}".format(e))
|
||||
|
||||
@readable_errors
|
||||
def connect(self):
|
||||
self._db_clients = []
|
||||
for endpoint in self.config.etcd_hosts:
|
||||
client = etcd3.client(host=endpoint, **self.config.etcd_args)
|
||||
self._db_clients.append(client)
|
||||
|
||||
def realkey(self, key):
|
||||
return "{}{}/{}".format(self.base_prefix,
|
||||
self.prefix,
|
||||
key)
|
||||
|
||||
@readable_errors
|
||||
def get(self, key, as_json=False, **kwargs):
|
||||
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
|
||||
|
||||
if as_json:
|
||||
value = json.loads(value)
|
||||
|
||||
return value
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, key, as_json=False, **kwargs):
|
||||
for value, meta in self._db_clients[0].get_prefix(self.realkey(key), **kwargs):
|
||||
k = meta.key.decode("utf-8")
|
||||
value = value.decode("utf-8")
|
||||
if as_json:
|
||||
value = json.loads(value)
|
||||
|
||||
yield (k, value)
|
||||
|
||||
|
||||
@readable_errors
|
||||
def set(self, key, value, as_json=False, **kwargs):
|
||||
if as_json:
|
||||
value = json.dumps(value)
|
||||
|
||||
log.debug("Setting {} = {}".format(self.realkey(key), value))
|
||||
# FIXME: iterate over clients in case of failure ?
|
||||
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
|
||||
|
||||
|
||||
@readable_errors
|
||||
def list_and_filter(self, key, filter_key=None, filter_regexp=None):
|
||||
for k,v in self.get_prefix(key, as_json=True):
|
||||
|
||||
if filter_key and filter_regexp:
|
||||
if filter_key in v:
|
||||
if re.match(filter_regexp, v[filter_key]):
|
||||
yield v
|
||||
else:
|
||||
yield v
|
||||
|
||||
|
||||
@readable_errors
|
||||
def increment(self, key, **kwargs):
|
||||
print(self.realkey(key))
|
||||
|
||||
|
||||
print("prelock")
|
||||
lock = self._db_clients[0].lock('/nicohack/foo')
|
||||
print("prelockacq")
|
||||
lock.acquire()
|
||||
print("prelockrelease")
|
||||
lock.release()
|
||||
|
||||
with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
|
||||
print("in lock")
|
||||
pass
|
||||
|
||||
# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
|
||||
# self.set(self.realkey(key), str(value + 1), **kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
endpoints = [ "https://etcd1.ungleich.ch:2379",
|
||||
"https://etcd2.ungleich.ch:2379",
|
||||
"https://etcd3.ungleich.ch:2379" ]
|
||||
|
||||
db = DB(url=endpoints)
|
|
@ -1,3 +0,0 @@
|
|||
*.iso
|
||||
radvdpid
|
||||
foo
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
|
||||
--key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
|
||||
--cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
|
||||
--endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo $@
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
dev=$1; shift
|
||||
|
||||
# bridge is setup from outside
|
||||
ip link set dev "$dev" master ${bridge}
|
||||
ip link set dev "$dev" up
|
|
@ -1 +0,0 @@
|
|||
000000000252
|
|
@ -1 +0,0 @@
|
|||
02:00
|
|
@ -1,29 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
netid=100
|
||||
dev=wlp2s0
|
||||
dev=wlp0s20f3
|
||||
#dev=wlan0
|
||||
|
||||
ip=2a0a:e5c1:111:888::48/64
|
||||
vxlandev=vxlan${netid}
|
||||
bridgedev=br${netid}
|
||||
|
||||
ip -6 link add ${vxlandev} type vxlan \
|
||||
id ${netid} \
|
||||
dstport 4789 \
|
||||
group ff05::${netid} \
|
||||
dev ${dev} \
|
||||
ttl 5
|
||||
|
||||
ip link set ${vxlandev} up
|
||||
|
||||
|
||||
ip link add ${bridgedev} type bridge
|
||||
ip link set ${bridgedev} up
|
||||
|
||||
ip link set ${vxlandev} master ${bridgedev} up
|
||||
|
||||
ip addr add ${ip} dev ${bridgedev}
|
|
@ -1,31 +0,0 @@
|
|||
flush ruleset
|
||||
|
||||
table bridge filter {
|
||||
chain prerouting {
|
||||
type filter hook prerouting priority 0;
|
||||
policy accept;
|
||||
|
||||
ibrname br100 jump br100
|
||||
}
|
||||
|
||||
chain br100 {
|
||||
# Allow all incoming traffic from outside
|
||||
iifname vxlan100 accept
|
||||
|
||||
# Default blocks: router advertisements, dhcpv6, dhcpv4
|
||||
icmpv6 type nd-router-advert drop
|
||||
ip6 version 6 udp sport 547 drop
|
||||
ip version 4 udp sport 67 drop
|
||||
|
||||
jump br100_vmlist
|
||||
drop
|
||||
}
|
||||
chain br100_vmlist {
|
||||
# VM1
|
||||
iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
|
||||
|
||||
# VM2
|
||||
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
|
||||
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
flush ruleset
|
||||
|
||||
table bridge filter {
|
||||
chain prerouting {
|
||||
type filter hook prerouting priority 0;
|
||||
policy accept;
|
||||
|
||||
ibrname br100 jump netpublic
|
||||
}
|
||||
|
||||
chain netpublic {
|
||||
iifname vxlan100 jump from_uncloud
|
||||
|
||||
# Default blocks: router advertisements, dhcpv6, dhcpv4
|
||||
icmpv6 type nd-router-advert drop
|
||||
ip6 version 6 udp sport 547 drop
|
||||
ip version 4 udp sport 67 drop
|
||||
|
||||
# Individual blocks
|
||||
# iifname tap1 jump vm1
|
||||
}
|
||||
|
||||
chain vm1 {
|
||||
ether saddr != 02:00:f0:a9:c4:4e drop
|
||||
ip6 saddr != 2a0a:e5c1:111:888:0:f0ff:fea9:c44e drop
|
||||
}
|
||||
|
||||
chain from_uncloud {
|
||||
accept
|
||||
}
|
||||
}
|
||||
|
||||
# table ip6 filter {
|
||||
# chain forward {
|
||||
# type filter hook forward priority 0;
|
||||
|
||||
# # policy drop;
|
||||
|
||||
# ct state established,related accept;
|
||||
|
||||
# }
|
||||
|
||||
# }
|
||||
|
||||
# table ip filter {
|
||||
# chain input {
|
||||
# type filter hook input priority filter; policy drop;
|
||||
# iif "lo" accept
|
||||
# icmp type { echo-reply, destination-unreachable, source-quench, redirect, echo-request, router-advertisement, router-solicitation, time-exceeded, parameter-problem, timestamp-request, timestamp-reply, info-request, info-reply, address-mask-request, address-mask-reply } accept
|
||||
# ct state established,related accept
|
||||
# tcp dport { 22 } accept
|
||||
# log prefix "firewall-ipv4: "
|
||||
# udp sport 67 drop
|
||||
# }
|
||||
|
||||
# chain forward {
|
||||
# type filter hook forward priority filter; policy drop;
|
||||
# log prefix "firewall-ipv4: "
|
||||
# }
|
||||
|
||||
# chain output {
|
||||
# type filter hook output priority filter; policy accept;
|
||||
# }
|
||||
# }
|
|
@ -1,13 +0,0 @@
|
|||
interface br100
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 5;
|
||||
AdvDefaultLifetime 3600;
|
||||
|
||||
prefix 2a0a:e5c1:111:888::/64 {
|
||||
};
|
||||
|
||||
RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
|
||||
DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
|
||||
};
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
radvd -C ./radvd.conf -n -p ./radvdpid
|
|
@ -1,24 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
vmid=$1; shift
|
||||
|
||||
qemu=/usr/bin/qemu-system-x86_64
|
||||
|
||||
accel=kvm
|
||||
#accel=tcg
|
||||
|
||||
memory=1024
|
||||
cores=2
|
||||
uuid=732e08c7-84f8-4d43-9571-263db4f80080
|
||||
|
||||
export bridge=br100
|
||||
|
||||
$qemu -name uc${vmid} \
|
||||
-machine pc,accel=${accel} \
|
||||
-m ${memory} \
|
||||
-smp ${cores} \
|
||||
-uuid ${uuid} \
|
||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
||||
-netdev tap,id=netmain,script=./ifup.sh \
|
||||
-device virtio-net-pci,netdev=netmain,id=net0,mac=02:00:f0:a9:c4:4e
|
|
@ -1,29 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# if [ $# -ne 1 ]; then
|
||||
# echo "$0: owner"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
qemu=/usr/bin/qemu-system-x86_64
|
||||
|
||||
accel=kvm
|
||||
#accel=tcg
|
||||
|
||||
memory=1024
|
||||
cores=2
|
||||
uuid=$(uuidgen)
|
||||
mac=$(./mac-gen.py)
|
||||
owner=nico
|
||||
|
||||
export bridge=br100
|
||||
|
||||
set -x
|
||||
$qemu -name "uncloud-${uuid}" \
|
||||
-machine pc,accel=${accel} \
|
||||
-m ${memory} \
|
||||
-smp ${cores} \
|
||||
-uuid ${uuid} \
|
||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
||||
-netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
|
||||
-device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}
|
|
@ -1,75 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import uuid
|
||||
|
||||
from uncloud.hack.db import DB
|
||||
from uncloud import UncloudException
|
||||
|
||||
class Host(object):
|
||||
def __init__(self, config, db_entry=None):
|
||||
self.config = config
|
||||
self.db = DB(self.config, prefix="/hosts")
|
||||
|
||||
if db_entry:
|
||||
self.db_entry = db_entry
|
||||
|
||||
|
||||
def list_hosts(self, filter_key=None, filter_regexp=None):
|
||||
""" Return list of all hosts """
|
||||
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
|
||||
yield self.__class__(self.config, db_entry=entry)
|
||||
|
||||
def cmdline_add_host(self):
|
||||
""" FIXME: make this a bit smarter and less redundant """
|
||||
|
||||
for required_arg in [
|
||||
'add_vm_host',
|
||||
'max_cores_per_vm',
|
||||
'max_cores_total',
|
||||
'max_memory_in_gb' ]:
|
||||
if not required_arg in self.config.arguments:
|
||||
raise UncloudException("Missing argument: {}".format(required_arg))
|
||||
|
||||
return self.add_host(
|
||||
self.config.arguments['add_vm_host'],
|
||||
self.config.arguments['max_cores_per_vm'],
|
||||
self.config.arguments['max_cores_total'],
|
||||
self.config.arguments['max_memory_in_gb'])
|
||||
|
||||
|
||||
def add_host(self,
|
||||
hostname,
|
||||
max_cores_per_vm,
|
||||
max_cores_total,
|
||||
max_memory_in_gb):
|
||||
|
||||
db_entry = {}
|
||||
db_entry['uuid'] = str(uuid.uuid4())
|
||||
db_entry['hostname'] = hostname
|
||||
db_entry['max_cores_per_vm'] = max_cores_per_vm
|
||||
db_entry['max_cores_total'] = max_cores_total
|
||||
db_entry['max_memory_in_gb'] = max_memory_in_gb
|
||||
db_entry["db_version"] = 1
|
||||
db_entry["log"] = []
|
||||
|
||||
self.db.set(db_entry['uuid'], db_entry, as_json=True)
|
||||
|
||||
return self.__class__(self.config, db_entry)
|
|
@ -1,104 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2012 Nico Schottelius (nico-cinv at schottelius.org)
|
||||
#
|
||||
# This file is part of cinv.
|
||||
#
|
||||
# cinv is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# cinv is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with cinv. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os.path
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
|
||||
from uncloud import UncloudException
|
||||
from uncloud.hack.db import DB
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MAC(object):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.no_db = self.config.arguments['no_db']
|
||||
if not self.no_db:
|
||||
self.db = DB(config, prefix="/mac")
|
||||
|
||||
self.prefix = 0x420000000000
|
||||
self._number = 0 # Not set by default
|
||||
|
||||
@staticmethod
|
||||
def validate_mac(mac):
|
||||
if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
|
||||
raise UncloudException("Not a valid mac address: %s" % mac)
|
||||
else:
|
||||
return True
|
||||
|
||||
def last_used_index(self):
|
||||
if not self.no_db:
|
||||
value = self.db.get("last_used_index")
|
||||
if not value:
|
||||
self.db.set("last_used_index", "0")
|
||||
value = self.db.get("last_used_index")
|
||||
|
||||
else:
|
||||
value = "0"
|
||||
|
||||
return int(value)
|
||||
|
||||
def last_used_mac(self):
|
||||
return self.int_to_mac(self.prefix + self.last_used_index())
|
||||
|
||||
def to_colon_format(self):
|
||||
b = self._number.to_bytes(6, byteorder="big")
|
||||
return ':'.join(format(s, '02x') for s in b)
|
||||
|
||||
def to_str_format(self):
|
||||
b = self._number.to_bytes(6, byteorder="big")
|
||||
return ''.join(format(s, '02x') for s in b)
|
||||
|
||||
def create(self):
|
||||
last_number = self.last_used_index()
|
||||
|
||||
if last_number == int('0xffffffff', 16):
|
||||
raise UncloudException("Exhausted all possible mac addresses - try to free some")
|
||||
|
||||
next_number = last_number + 1
|
||||
self._number = self.prefix + next_number
|
||||
|
||||
#next_number_string = "{:012x}".format(next_number)
|
||||
#next_mac = self.int_to_mac(next_mac_number)
|
||||
# db_entry = {}
|
||||
# db_entry['vm_uuid'] = vmuuid
|
||||
# db_entry['index'] = next_number
|
||||
# db_entry['mac_address'] = next_mac
|
||||
|
||||
# should be one transaction
|
||||
# self.db.increment("last_used_index")
|
||||
# self.db.set("used/{}".format(next_mac),
|
||||
# db_entry, as_json=True)
|
||||
|
||||
def __int__(self):
|
||||
return self._number
|
||||
|
||||
def __repr__(self):
|
||||
return self.to_str_format()
|
||||
|
||||
def __str__(self):
|
||||
return self.to_colon_format()
|
|
@ -1,186 +0,0 @@
|
|||
import argparse
|
||||
import logging
|
||||
import re
|
||||
|
||||
import ldap3
|
||||
|
||||
|
||||
from uncloud.hack.vm import VM
|
||||
from uncloud.hack.host import Host
|
||||
from uncloud.hack.config import Config
|
||||
from uncloud.hack.mac import MAC
|
||||
from uncloud.hack.net import VXLANBridge, DNSRA
|
||||
|
||||
from uncloud import UncloudException
|
||||
from uncloud.hack.product import ProductOrder
|
||||
|
||||
arg_parser = argparse.ArgumentParser('hack', add_help=False)
|
||||
#description="Commands that are unfinished - use at own risk")
|
||||
arg_parser.add_argument('--last-used-mac', action='store_true')
|
||||
arg_parser.add_argument('--get-new-mac', action='store_true')
|
||||
|
||||
arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
|
||||
arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
|
||||
arg_parser.add_argument('--network', help="/64 IPv6 network")
|
||||
arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
|
||||
arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
|
||||
arg_parser.add_argument('--run-dns-ra', action='store_true',
|
||||
help="Provide router advertisements and DNS resolution via dnsmasq")
|
||||
arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
|
||||
|
||||
arg_parser.add_argument('--create-vm', action='store_true')
|
||||
arg_parser.add_argument('--destroy-vm', action='store_true')
|
||||
arg_parser.add_argument('--get-vm-status', action='store_true')
|
||||
arg_parser.add_argument('--get-vm-vnc', action='store_true')
|
||||
arg_parser.add_argument('--list-vms', action='store_true')
|
||||
arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int, default=2)
|
||||
arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int, default=1)
|
||||
arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
|
||||
|
||||
arg_parser.add_argument('--image-format', help="Image format: qcow2 or raw", choices=['raw', 'qcow2'])
|
||||
arg_parser.add_argument('--uuid', help="VM UUID")
|
||||
|
||||
arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
|
||||
arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
|
||||
|
||||
# order based commands => later to be shifted below "order"
|
||||
arg_parser.add_argument('--order', action='store_true')
|
||||
arg_parser.add_argument('--list-orders', help="List all orders", action='store_true')
|
||||
arg_parser.add_argument('--filter-order-key', help="Which key to filter on")
|
||||
arg_parser.add_argument('--filter-order-regexp', help="Which regexp the value should match")
|
||||
|
||||
arg_parser.add_argument('--process-orders', help="Process all (pending) orders", action='store_true')
|
||||
|
||||
arg_parser.add_argument('--product', choices=["dualstack-vm"])
|
||||
arg_parser.add_argument('--os-image-name', help="Name of OS image (successor to --image)")
|
||||
arg_parser.add_argument('--os-image-size', help="Size of OS image in GB", type=int, default=10)
|
||||
|
||||
arg_parser.add_argument('--username')
|
||||
arg_parser.add_argument('--password')
|
||||
|
||||
arg_parser.add_argument('--api', help="Run the API")
|
||||
arg_parser.add_argument('--mode',
|
||||
choices=["direct", "api", "client"],
|
||||
default="client",
|
||||
help="Directly manipulate etcd, spawn the API server or behave as a client")
|
||||
|
||||
|
||||
arg_parser.add_argument('--add-vm-host', help="Add a host that can run VMs")
|
||||
arg_parser.add_argument('--list-vm-hosts', action='store_true')
|
||||
|
||||
arg_parser.add_argument('--max-cores-per-vm')
|
||||
arg_parser.add_argument('--max-cores-total')
|
||||
arg_parser.add_argument('--max-memory-in-gb')
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def authenticate(username, password, totp_token=None):
|
||||
server = ldap3.Server("ldaps://ldap1.ungleich.ch")
|
||||
dn = "uid={},ou=customer,dc=ungleich,dc=ch".format(username)
|
||||
|
||||
log.debug("LDAP: connecting to {} as {}".format(server, dn))
|
||||
|
||||
try:
|
||||
conn = ldap3.Connection(server, dn, password, auto_bind=True)
|
||||
except ldap3.core.exceptions.LDAPBindError as e:
|
||||
raise UncloudException("Credentials not verified by LDAP server: {}".format(e))
|
||||
|
||||
|
||||
|
||||
def order(config):
|
||||
for required_arg in [ 'product', 'username', 'password' ]:
|
||||
if not config.arguments[required_arg]:
|
||||
raise UncloudException("Missing required argument: {}".format(required_arg))
|
||||
|
||||
if config.arguments['product'] == 'dualstack-vm':
|
||||
for required_arg in [ 'cores', 'memory', 'os_image_name', 'os_image_size' ]:
|
||||
if not config.arguments[required_arg]:
|
||||
raise UncloudException("Missing required argument: {}".format(required_arg))
|
||||
|
||||
log.debug(config.arguments)
|
||||
authenticate(config.arguments['username'], config.arguments['password'])
|
||||
|
||||
# create DB entry for VM
|
||||
vm = VM(config)
|
||||
return vm.product.place_order(owner=config.arguments['username'])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main(arguments):
|
||||
config = Config(arguments)
|
||||
|
||||
if arguments['add_vm_host']:
|
||||
h = Host(config)
|
||||
h.cmdline_add_host()
|
||||
|
||||
if arguments['list_vm_hosts']:
|
||||
h = Host(config)
|
||||
|
||||
for host in h.list_hosts(filter_key=arguments['filter_order_key'],
|
||||
filter_regexp=arguments['filter_order_regexp']):
|
||||
print("Host {}: {}".format(host.db_entry['uuid'], host.db_entry))
|
||||
|
||||
if arguments['order']:
|
||||
print("Created order: {}".format(order(config)))
|
||||
|
||||
if arguments['list_orders']:
|
||||
p = ProductOrder(config)
|
||||
for product_order in p.list_orders(filter_key=arguments['filter_order_key'],
|
||||
filter_regexp=arguments['filter_order_regexp']):
|
||||
print("Order {}: {}".format(product_order.db_entry['uuid'], product_order.db_entry))
|
||||
|
||||
if arguments['process_orders']:
|
||||
p = ProductOrder(config)
|
||||
p.process_orders()
|
||||
|
||||
if arguments['create_vm']:
|
||||
vm = VM(config)
|
||||
vm.create()
|
||||
|
||||
if arguments['destroy_vm']:
|
||||
vm = VM(config)
|
||||
vm.stop()
|
||||
|
||||
if arguments['get_vm_status']:
|
||||
vm = VM(config)
|
||||
vm.status()
|
||||
|
||||
if arguments['get_vm_vnc']:
|
||||
vm = VM(config)
|
||||
vm.vnc_addr()
|
||||
|
||||
if arguments['list_vms']:
|
||||
vm = VM(config)
|
||||
vm.list()
|
||||
|
||||
if arguments['last_used_mac']:
|
||||
m = MAC(config)
|
||||
print(m.last_used_mac())
|
||||
|
||||
if arguments['get_new_mac']:
|
||||
print(MAC(config).get_next())
|
||||
|
||||
#if arguments['init_network']:
|
||||
if arguments['create_vxlan']:
|
||||
if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
|
||||
raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
||||
vb = VXLANBridge(vni=arguments['vni'],
|
||||
route=arguments['network'],
|
||||
uplinkdev=arguments['vxlan_uplink_device'],
|
||||
use_sudo=arguments['use_sudo'])
|
||||
vb._setup_vxlan()
|
||||
vb._setup_bridge()
|
||||
vb._add_vxlan_to_bridge()
|
||||
vb._route_network()
|
||||
|
||||
if arguments['run_dns_ra']:
|
||||
if not arguments['network'] or not arguments['vni']:
|
||||
raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
||||
|
||||
dnsra = DNSRA(route=arguments['network'],
|
||||
vni=arguments['vni'],
|
||||
use_sudo=arguments['use_sudo'])
|
||||
dnsra._setup_dnsmasq()
|
|
@ -1,116 +0,0 @@
|
|||
import subprocess
|
||||
import ipaddress
|
||||
import logging
|
||||
|
||||
|
||||
from uncloud import UncloudException
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VXLANBridge(object):
|
||||
cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
|
||||
cmd_up_dev = "{sudo}ip link set {dev} up"
|
||||
cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
|
||||
cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
|
||||
cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
|
||||
cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
|
||||
|
||||
# VXLAN ids are at maximum 24 bit - use a /104
|
||||
multicast_network = ipaddress.IPv6Network("ff05::/104")
|
||||
max_vni = (2**24)-1
|
||||
|
||||
def __init__(self,
|
||||
vni,
|
||||
uplinkdev,
|
||||
route=None,
|
||||
use_sudo=False):
|
||||
self.config = {}
|
||||
|
||||
if vni > self.max_vni:
|
||||
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
||||
|
||||
if use_sudo:
|
||||
self.config['sudo'] = 'sudo '
|
||||
else:
|
||||
self.config['sudo'] = ''
|
||||
|
||||
self.config['vni_dec'] = vni
|
||||
self.config['vni_hex'] = "{:x}".format(vni)
|
||||
self.config['multicast_address'] = self.multicast_network[vni]
|
||||
|
||||
self.config['route_network'] = ipaddress.IPv6Network(route)
|
||||
self.config['route'] = route
|
||||
|
||||
self.config['uplinkdev'] = uplinkdev
|
||||
self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
|
||||
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
||||
|
||||
|
||||
def setup_networking(self):
|
||||
pass
|
||||
|
||||
def _setup_vxlan(self):
|
||||
self._execute_cmd(self.cmd_create_vxlan)
|
||||
self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
|
||||
|
||||
def _setup_bridge(self):
|
||||
self._execute_cmd(self.cmd_create_bridge)
|
||||
self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
|
||||
|
||||
def _route_network(self):
|
||||
self._execute_cmd(self.cmd_add_route_dev)
|
||||
|
||||
def _add_vxlan_to_bridge(self):
|
||||
self._execute_cmd(self.cmd_add_to_bridge)
|
||||
|
||||
def _execute_cmd(self, cmd_string, **kwargs):
|
||||
cmd = cmd_string.format(**self.config, **kwargs)
|
||||
log.info("Executing: {}".format(cmd))
|
||||
subprocess.run(cmd.split())
|
||||
|
||||
class ManagementBridge(VXLANBridge):
|
||||
pass
|
||||
|
||||
|
||||
class DNSRA(object):
|
||||
# VXLAN ids are at maximum 24 bit
|
||||
max_vni = (2**24)-1
|
||||
|
||||
|
||||
# Command to start dnsmasq
|
||||
cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra --no-daemon"
|
||||
|
||||
def __init__(self,
|
||||
vni,
|
||||
route=None,
|
||||
use_sudo=False):
|
||||
self.config = {}
|
||||
|
||||
if vni > self.max_vni:
|
||||
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
||||
|
||||
if use_sudo:
|
||||
self.config['sudo'] = 'sudo '
|
||||
else:
|
||||
self.config['sudo'] = ''
|
||||
|
||||
#TODO: remove if not needed
|
||||
#self.config['vni_dec'] = vni
|
||||
self.config['vni_hex'] = "{:x}".format(vni)
|
||||
|
||||
# dnsmasq only wants the network without the prefix, therefore, cut it off
|
||||
self.config['route'] = ipaddress.IPv6Network(route).network_address
|
||||
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
||||
|
||||
def _setup_dnsmasq(self):
|
||||
self._execute_cmd(self.cmd_start_dnsmasq)
|
||||
|
||||
def _execute_cmd(self, cmd_string, **kwargs):
|
||||
cmd = cmd_string.format(**self.config, **kwargs)
|
||||
log.info("Executing: {}".format(cmd))
|
||||
print("Executing: {}".format(cmd))
|
||||
subprocess.run(cmd.split())
|
||||
|
||||
class Firewall(object):
|
||||
pass
|
|
@ -1,206 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import json
|
||||
import uuid
|
||||
import logging
|
||||
import re
|
||||
import importlib
|
||||
|
||||
from uncloud import UncloudException
|
||||
from uncloud.hack.db import DB, db_logentry
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class ProductOrder(object):
|
||||
def __init__(self, config, product_entry=None, db_entry=None):
|
||||
self.config = config
|
||||
self.db = DB(self.config, prefix="/orders")
|
||||
self.db_entry = {}
|
||||
self.db_entry["product"] = product_entry
|
||||
|
||||
# Overwrite if we are loading an existing product order
|
||||
if db_entry:
|
||||
self.db_entry = db_entry
|
||||
|
||||
# FIXME: this should return a list of our class!
|
||||
def list_orders(self, filter_key=None, filter_regexp=None):
|
||||
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
|
||||
yield self.__class__(self.config, db_entry=entry)
|
||||
|
||||
|
||||
def set_required_values(self):
|
||||
"""Set values that are required to make the db entry valid"""
|
||||
if not "uuid" in self.db_entry:
|
||||
self.db_entry["uuid"] = str(uuid.uuid4())
|
||||
if not "status" in self.db_entry:
|
||||
self.db_entry["status"] = "NEW"
|
||||
if not "owner" in self.db_entry:
|
||||
self.db_entry["owner"] = "UNKNOWN"
|
||||
if not "log" in self.db_entry:
|
||||
self.db_entry["log"] = []
|
||||
if not "db_version" in self.db_entry:
|
||||
self.db_entry["db_version"] = 1
|
||||
|
||||
def validate_status(self):
|
||||
if "status" in self.db_entry:
|
||||
if self.db_entry["status"] in [ "NEW",
|
||||
"SCHEDULED",
|
||||
"CREATED_ACTIVE",
|
||||
"CANCELLED",
|
||||
"REJECTED" ]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def order(self):
|
||||
self.set_required_values()
|
||||
if not self.db_entry["status"] == "NEW":
|
||||
raise UncloudException("Cannot re-order same order. Status: {}".format(self.db_entry["status"]))
|
||||
self.db.set(self.db_entry["uuid"], self.db_entry, as_json=True)
|
||||
|
||||
return self.db_entry["uuid"]
|
||||
|
||||
def process_orders(self):
|
||||
"""processing orders can be done stand alone on server side"""
|
||||
for order in self.list_orders():
|
||||
if order.db_entry["status"] == "NEW":
|
||||
log.info("Handling new order: {}".format(order))
|
||||
|
||||
# FIXME: these all should be a transactions! -> fix concurrent access! !
|
||||
if not "log" in order.db_entry:
|
||||
order.db_entry['log'] = []
|
||||
|
||||
is_valid = True
|
||||
# Verify the order entry
|
||||
for must_attribute in [ "owner", "product" ]:
|
||||
if not must_attribute in order.db_entry:
|
||||
message = "Missing {} entry in order, rejecting order".format(must_attribute)
|
||||
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
|
||||
|
||||
order.db_entry['log'].append(db_logentry(message))
|
||||
order.db_entry['status'] = "REJECTED"
|
||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
||||
|
||||
is_valid = False
|
||||
|
||||
# Rejected the order
|
||||
if not is_valid:
|
||||
continue
|
||||
|
||||
# Verify the product entry
|
||||
for must_attribute in [ "python_product_class", "python_product_module" ]:
|
||||
if not must_attribute in order.db_entry['product']:
|
||||
message = "Missing {} entry in product of order, rejecting order".format(must_attribute)
|
||||
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
|
||||
|
||||
order.db_entry['log'].append(db_logentry(message))
|
||||
order.db_entry['status'] = "REJECTED"
|
||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
||||
|
||||
is_valid = False
|
||||
|
||||
# Rejected the order
|
||||
if not is_valid:
|
||||
continue
|
||||
|
||||
print(order.db_entry["product"]["python_product_class"])
|
||||
|
||||
# Create the product
|
||||
m = importlib.import_module(order.db_entry["product"]["python_product_module"])
|
||||
c = getattr(m, order.db_entry["product"]["python_product_class"])
|
||||
|
||||
product = c(config, db_entry=order.db_entry["product"])
|
||||
|
||||
# STOPPED
|
||||
product.create_product()
|
||||
|
||||
order.db_entry['status'] = "SCHEDULED"
|
||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
||||
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return str(self.db_entry)
|
||||
|
||||
class Product(object):
|
||||
def __init__(self,
|
||||
config,
|
||||
product_name,
|
||||
product_class,
|
||||
db_entry=None):
|
||||
self.config = config
|
||||
self.db = DB(self.config, prefix="/orders")
|
||||
|
||||
self.db_entry = {}
|
||||
self.db_entry["product_name"] = product_name
|
||||
self.db_entry["python_product_class"] = product_class.__qualname__
|
||||
self.db_entry["python_product_module"] = product_class.__module__
|
||||
self.db_entry["db_version"] = 1
|
||||
self.db_entry["log"] = []
|
||||
self.db_entry["features"] = {}
|
||||
|
||||
# Existing product? Read in db_entry
|
||||
if db_entry:
|
||||
self.db_entry = db_entry
|
||||
|
||||
self.valid_periods = [ "per_year", "per_month", "per_week",
|
||||
"per_day", "per_hour",
|
||||
"per_minute", "per_second" ]
|
||||
|
||||
def define_feature(self,
|
||||
name,
|
||||
one_time_price,
|
||||
recurring_price,
|
||||
recurring_period,
|
||||
minimum_period):
|
||||
|
||||
self.db_entry['features'][name] = {}
|
||||
self.db_entry['features'][name]['one_time_price'] = one_time_price
|
||||
self.db_entry['features'][name]['recurring_price'] = recurring_price
|
||||
|
||||
if not recurring_period in self.valid_periods:
|
||||
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
|
||||
|
||||
self.db_entry['features'][name]['recurring_period'] = recurring_period
|
||||
|
||||
if not minimum_period in self.valid_periods:
|
||||
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
|
||||
|
||||
recurring_index = self.valid_periods.index(recurring_period)
|
||||
minimum_index = self.valid_periods.index(minimum_period)
|
||||
|
||||
if minimum_index < recurring_index:
|
||||
raise UncloudException("Minimum period for product '{}' feature '{}' must be shorter or equal than/as recurring period: {} > {}".format(self.db_entry['product_name'], name, minimum_period, recurring_period))
|
||||
|
||||
self.db_entry['features'][name]['minimum_period'] = minimum_period
|
||||
|
||||
|
||||
def validate_product(self):
|
||||
for feature in self.db_entry['features']:
|
||||
pass
|
||||
|
||||
def place_order(self, owner):
|
||||
""" Schedule creating the product in etcd """
|
||||
order = ProductOrder(self.config, product_entry=self.db_entry)
|
||||
order.db_entry["owner"] = owner
|
||||
return order.order()
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps(self.db_entry)
|
|
@ -1,26 +0,0 @@
|
|||
id=100
|
||||
rawdev=eth0
|
||||
|
||||
# create vxlan
|
||||
ip -6 link add vxlan${id} type vxlan \
|
||||
id ${id} \
|
||||
dstport 4789 \
|
||||
group ff05::${id} \
|
||||
dev ${rawdev} \
|
||||
ttl 5
|
||||
|
||||
ip link set vxlan${id} up
|
||||
|
||||
# create bridge
|
||||
ip link set vxlan${id} up
|
||||
ip link set br${id} up
|
||||
|
||||
# Add vxlan into bridge
|
||||
ip link set vxlan${id} master br${id}
|
||||
|
||||
|
||||
# useradd -m uncloud
|
||||
# [18:05] tablett.place10:~# id uncloud
|
||||
# uid=1000(uncloud) gid=1000(uncloud) groups=1000(uncloud),34(kvm),36(qemu)
|
||||
# apk add qemu-system-x86_64
|
||||
# also needs group netdev
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo $0 vmid
|
||||
exit 1
|
||||
fi
|
||||
|
||||
id=$1; shift
|
||||
|
||||
memory=512
|
||||
macaddress=02:00:b9:cb:70:${id}
|
||||
netname=net${id}-1
|
||||
|
||||
qemu-system-x86_64 \
|
||||
-name uncloud-${id} \
|
||||
-accel kvm \
|
||||
-m ${memory} \
|
||||
-smp 2,sockets=2,cores=1,threads=1 \
|
||||
-device virtio-net-pci,netdev=net0,mac=$macaddress \
|
||||
-netdev tap,id=net0,ifname=${netname},script=no,downscript=no \
|
||||
-vnc [::]:0
|
||||
|
||||
# To be changed:
|
||||
# -vnc to unix path
|
||||
# or -spice
|
|
@ -1,193 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This module is directly called from the hack module, and can be used as follow:
|
||||
#
|
||||
# Create a new VM with default CPU/Memory. The path of the image file is relative to $hackprefix.
|
||||
# `uncloud hack --hackprefix /tmp/hackcloud --create-vm --image mysuperimage.qcow2`
|
||||
#
|
||||
# List running VMs (returns a list of UUIDs).
|
||||
# `uncloud hack --hackprefix /tmp/hackcloud --list-vms
|
||||
#
|
||||
# Get VM status:
|
||||
# `uncloud hack --hackprefix /tmp/hackcloud --get-vm-status --uuid my-vm-uuid`
|
||||
#
|
||||
# Stop a VM:
|
||||
# `uncloud hack --hackprefix /tmp/hackcloud --destroy-vm --uuid my-vm-uuid`
|
||||
# ``
|
||||
|
||||
import subprocess
|
||||
import uuid
|
||||
import os
|
||||
import logging
|
||||
|
||||
from uncloud.hack.db import DB
|
||||
from uncloud.hack.mac import MAC
|
||||
from uncloud.vmm import VMM
|
||||
from uncloud.hack.product import Product
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.DEBUG)
|
||||
|
||||
class VM(object):
|
||||
def __init__(self, config, db_entry=None):
|
||||
self.config = config
|
||||
|
||||
#TODO: Enable etcd lookup
|
||||
self.no_db = self.config.arguments['no_db']
|
||||
if not self.no_db:
|
||||
self.db = DB(self.config, prefix="/vm")
|
||||
|
||||
if db_entry:
|
||||
self.db_entry = db_entry
|
||||
|
||||
# General CLI arguments.
|
||||
self.hackprefix = self.config.arguments['hackprefix']
|
||||
self.uuid = self.config.arguments['uuid']
|
||||
self.memory = self.config.arguments['memory'] or '1024M'
|
||||
self.cores = self.config.arguments['cores'] or 1
|
||||
|
||||
if self.config.arguments['image']:
|
||||
self.image = os.path.join(self.hackprefix, self.config.arguments['image'])
|
||||
else:
|
||||
self.image = None
|
||||
|
||||
if self.config.arguments['image_format']:
|
||||
self.image_format=self.config.arguments['image_format']
|
||||
else:
|
||||
self.image_format='qcow2'
|
||||
|
||||
# External components.
|
||||
|
||||
# This one is broken:
|
||||
# TypeError: expected str, bytes or os.PathLike object, not NoneType
|
||||
# Fix before re-enabling
|
||||
# self.vmm = VMM(vmm_backend=self.hackprefix)
|
||||
self.mac = MAC(self.config)
|
||||
|
||||
# Harcoded & generated values.
|
||||
self.owner = 'uncloud'
|
||||
self.accel = 'kvm'
|
||||
self.threads = 1
|
||||
self.ifup = os.path.join(self.hackprefix, "ifup.sh")
|
||||
self.ifdown = os.path.join(self.hackprefix, "ifdown.sh")
|
||||
self.ifname = "uc{}".format(self.mac.to_str_format())
|
||||
|
||||
self.vm = {}
|
||||
|
||||
self.product = Product(config, product_name="dualstack-vm",
|
||||
product_class=self.__class__)
|
||||
self.product.define_feature(name="base",
|
||||
one_time_price=0,
|
||||
recurring_price=9,
|
||||
recurring_period="per_month",
|
||||
minimum_period="per_hour")
|
||||
|
||||
|
||||
self.features = []
|
||||
|
||||
|
||||
def get_qemu_args(self):
|
||||
command = (
|
||||
"-name {owner}-{name}"
|
||||
" -machine pc,accel={accel}"
|
||||
" -drive file={image},format={image_format},if=virtio"
|
||||
" -device virtio-rng-pci"
|
||||
" -m {memory} -smp cores={cores},threads={threads}"
|
||||
" -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname}"
|
||||
" -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
|
||||
).format(
|
||||
owner=self.owner, name=self.uuid,
|
||||
accel=self.accel,
|
||||
image=self.image, image_format=self.image_format,
|
||||
memory=self.memory, cores=self.cores, threads=self.threads,
|
||||
ifup=self.ifup, ifdown=self.ifdown, ifname=self.ifname,
|
||||
mac=self.mac
|
||||
)
|
||||
|
||||
return command.split(" ")
|
||||
|
||||
def create_product(self):
|
||||
"""Find a VM host and schedule on it"""
|
||||
pass
|
||||
|
||||
def create(self):
|
||||
# New VM: new UUID, new MAC.
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.mac=MAC(self.config)
|
||||
self.mac.create()
|
||||
|
||||
qemu_args = self.get_qemu_args()
|
||||
log.debug("QEMU args passed to VMM: {}".format(qemu_args))
|
||||
self.vmm.start(
|
||||
uuid=self.uuid,
|
||||
migration=False,
|
||||
*qemu_args
|
||||
)
|
||||
|
||||
|
||||
self.mac.create()
|
||||
self.vm['mac'] = self.mac
|
||||
self.vm['ifname'] = "uc{}".format(self.mac.__repr__())
|
||||
|
||||
# FIXME: TODO: turn this into a string and THEN
|
||||
# .split() it later -- easier for using .format()
|
||||
#self.vm['commandline'] = [ "{}".format(self.sudo),
|
||||
self.vm['commandline'] = "{sudo}{qemu} -name uncloud-{uuid} -machine pc,accel={accel} -m {memory} -smp {cores} -uuid {uuid} -drive file={os_image},media=cdrom -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname} -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
|
||||
# self.vm['commandline'] = [ "{}".format(self.sudo),
|
||||
# "{}".format(self.qemu),
|
||||
# "-name", "uncloud-{}".format(self.vm['uuid']),
|
||||
# "-machine", "pc,accel={}".format(self.accel),
|
||||
# "-m", "{}".format(self.vm['memory']),
|
||||
# "-smp", "{}".format(self.vm['cores']),
|
||||
# "-uuid", "{}".format(self.vm['uuid']),
|
||||
# "-drive", "file={},media=cdrom".format(self.vm['os_image']),
|
||||
# "-netdev", "tap,id=netmain,script={},downscript={},ifname={}".format(self.ifup, self.ifdown, self.vm['ifname']),
|
||||
# "-device", "virtio-net-pci,netdev=netmain,id=net0,mac={}".format(self.vm['mac'])
|
||||
# ]
|
||||
|
||||
def _execute_cmd(self, cmd_string, **kwargs):
|
||||
cmd = cmd_string.format(**self.vm, **kwargs)
|
||||
log.info("Executing: {}".format(cmd))
|
||||
subprocess.run(cmd.split())
|
||||
|
||||
def stop(self):
|
||||
if not self.uuid:
|
||||
print("Please specific an UUID with the --uuid flag.")
|
||||
exit(1)
|
||||
|
||||
self.vmm.stop(self.uuid)
|
||||
|
||||
def status(self):
|
||||
if not self.uuid:
|
||||
print("Please specific an UUID with the --uuid flag.")
|
||||
exit(1)
|
||||
|
||||
print(self.vmm.get_status(self.uuid))
|
||||
|
||||
def vnc_addr(self):
|
||||
if not self.uuid:
|
||||
print("Please specific an UUID with the --uuid flag.")
|
||||
exit(1)
|
||||
|
||||
print(self.vmm.get_vnc(self.uuid))
|
||||
|
||||
def list(self):
|
||||
print(self.vmm.discover())
|
|
@ -1,123 +0,0 @@
|
|||
import argparse
|
||||
import os
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
from uncloud.vmm import VMM
|
||||
from uncloud.host.virtualmachine import update_radvd_conf, create_vxlan_br_tap
|
||||
|
||||
from . import virtualmachine, logger
|
||||
|
||||
###
|
||||
# Argument parser loaded by scripts/uncloud.
|
||||
arg_parser = argparse.ArgumentParser('oneshot', add_help=False)
|
||||
|
||||
# Actions.
|
||||
arg_parser.add_argument('--list', action='store_true',
|
||||
help='list UUID and name of running VMs')
|
||||
arg_parser.add_argument('--start', nargs=4,
|
||||
metavar=('NAME', 'IMAGE', 'UPSTREAM_INTERFACE', 'NETWORK'),
|
||||
help='start a VM using the OS IMAGE (full path), configuring networking on NETWORK IPv6 prefix')
|
||||
arg_parser.add_argument('--stop', metavar='UUID',
|
||||
help='stop a VM')
|
||||
arg_parser.add_argument('--get-status', metavar='UUID',
|
||||
help='return the status of the VM')
|
||||
arg_parser.add_argument('--get-vnc', metavar='UUID',
|
||||
help='return the path of the VNC socket of the VM')
|
||||
arg_parser.add_argument('--reconfigure-radvd', metavar='NETWORK',
|
||||
help='regenerate and reload RADVD configuration for NETWORK IPv6 prefix')
|
||||
|
||||
# Arguments.
|
||||
arg_parser.add_argument('--workdir', default=Path.home(),
|
||||
help='Working directory, defaulting to $HOME')
|
||||
arg_parser.add_argument('--mac',
|
||||
help='MAC address of the VM to create (--start)')
|
||||
arg_parser.add_argument('--memory', type=int,
|
||||
help='Memory (MB) to allocate (--start)')
|
||||
arg_parser.add_argument('--cores', type=int,
|
||||
help='Number of cores to allocate (--start)')
|
||||
arg_parser.add_argument('--threads', type=int,
|
||||
help='Number of threads to allocate (--start)')
|
||||
arg_parser.add_argument('--image-format', choices=['raw', 'qcow2'],
|
||||
help='Format of OS image (--start)')
|
||||
arg_parser.add_argument('--accel', choices=['kvm', 'tcg'], default='kvm',
|
||||
help='QEMU acceleration to use (--start)')
|
||||
arg_parser.add_argument('--upstream-interface', default='eth0',
|
||||
help='Name of upstream interface (--start)')
|
||||
|
||||
###
|
||||
# Helpers.
|
||||
|
||||
# XXX: check if it is possible to use the type returned by ETCD queries.
|
||||
class UncloudEntryWrapper:
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def value(self):
|
||||
return self.value
|
||||
|
||||
def status_line(vm):
|
||||
return "VM: {} {} {}".format(vm.get_uuid(), vm.get_name(), vm.get_status())
|
||||
|
||||
###
|
||||
# Entrypoint.
|
||||
|
||||
def main(arguments):
|
||||
# Initialize VMM.
|
||||
workdir = arguments['workdir']
|
||||
vmm = VMM(vmm_backend=workdir)
|
||||
|
||||
# Harcoded debug values.
|
||||
net_id = 0
|
||||
|
||||
# Build VM configuration.
|
||||
vm_config = {}
|
||||
vm_options = [
|
||||
'mac', 'memory', 'cores', 'threads', 'image', 'image_format',
|
||||
'--upstream_interface', 'upstream_interface', 'network', 'accel'
|
||||
]
|
||||
for option in vm_options:
|
||||
if arguments.get(option):
|
||||
vm_config[option] = arguments[option]
|
||||
|
||||
vm_config['net_id'] = net_id
|
||||
|
||||
# Execute requested VM action.
|
||||
if arguments['reconfigure_radvd']:
|
||||
# TODO: check that RADVD is available.
|
||||
prefix = arguments['reconfigure_radvd']
|
||||
network = UncloudEntryWrapper({
|
||||
'id': net_id,
|
||||
'ipv6': prefix
|
||||
})
|
||||
|
||||
# Make use of uncloud.host.virtualmachine for network configuration.
|
||||
update_radvd_conf([network])
|
||||
elif arguments['start']:
|
||||
# Extract from --start positional arguments. Quite fragile.
|
||||
vm_config['name'] = arguments['start'][0]
|
||||
vm_config['image'] = arguments['start'][1]
|
||||
vm_config['network'] = arguments['start'][2]
|
||||
vm_config['upstream_interface'] = arguments['start'][3]
|
||||
|
||||
vm_config['tap_interface'] = "uc{}".format(len(vmm.discover()))
|
||||
vm = virtualmachine.VM(vmm, vm_config)
|
||||
vm.start()
|
||||
elif arguments['stop']:
|
||||
vm = virtualmachine.VM(vmm, {'uuid': arguments['stop']})
|
||||
vm.stop()
|
||||
elif arguments['get_status']:
|
||||
vm = virtualmachine.VM(vmm, {'uuid': arguments['get_status']})
|
||||
print(status_line(vm))
|
||||
elif arguments['get_vnc']:
|
||||
vm = virtualmachine.VM(vmm, {'uuid': arguments['get_vnc']})
|
||||
print(vm.get_vnc_addr())
|
||||
elif arguments['list']:
|
||||
vms = vmm.discover()
|
||||
print("Found {} VMs.".format(len(vms)))
|
||||
for uuid in vms:
|
||||
vm = virtualmachine.VM(vmm, {'uuid': uuid})
|
||||
print(status_line(vm))
|
||||
else:
|
||||
print('Please specify an action: --start, --stop, --list,\
|
||||
--get-status, --get-vnc, --reconfigure-radvd')
|
|
@ -1,81 +0,0 @@
|
|||
import uuid
|
||||
import os
|
||||
|
||||
from uncloud.host.virtualmachine import create_vxlan_br_tap
|
||||
from uncloud.oneshot import logger
|
||||
|
||||
class VM(object):
|
||||
def __init__(self, vmm, config):
|
||||
self.config = config
|
||||
self.vmm = vmm
|
||||
|
||||
# Extract VM specs/metadata from configuration.
|
||||
self.name = config.get('name', 'no-name')
|
||||
self.memory = config.get('memory', 1024)
|
||||
self.cores = config.get('cores', 1)
|
||||
self.threads = config.get('threads', 1)
|
||||
self.image_format = config.get('image_format', 'qcow2')
|
||||
self.image = config.get('image')
|
||||
self.uuid = config.get('uuid', str(uuid.uuid4()))
|
||||
self.mac = config.get('mac')
|
||||
self.accel = config.get('accel', 'kvm')
|
||||
|
||||
self.net_id = config.get('net_id', 0)
|
||||
self.upstream_interface = config.get('upstream_interface', 'eth0')
|
||||
self.tap_interface = config.get('tap_interface', 'uc0')
|
||||
self.network = config.get('network')
|
||||
|
||||
def get_qemu_args(self):
|
||||
command = (
|
||||
"-uuid {uuid} -name {name} -machine pc,accel={accel}"
|
||||
" -drive file={image},format={image_format},if=virtio"
|
||||
" -device virtio-rng-pci"
|
||||
" -m {memory} -smp cores={cores},threads={threads}"
|
||||
" -netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
|
||||
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}"
|
||||
).format(
|
||||
uuid=self.uuid, name=self.name, accel=self.accel,
|
||||
image=self.image, image_format=self.image_format,
|
||||
memory=self.memory, cores=self.cores, threads=self.threads,
|
||||
net_id=self.net_id, tap=self.tap_interface, mac=self.mac
|
||||
)
|
||||
|
||||
return command.split(" ")
|
||||
|
||||
def start(self):
|
||||
# Check that VM image is available.
|
||||
if not os.path.isfile(self.image):
|
||||
logger.error("Image {} does not exist. Aborting.".format(self.image))
|
||||
|
||||
# Create Bridge, VXLAN and tap interface for VM.
|
||||
create_vxlan_br_tap(
|
||||
self.net_id, self.upstream_interface, self.tap_interface, self.network
|
||||
)
|
||||
|
||||
# Generate config for and run QEMU.
|
||||
qemu_args = self.get_qemu_args()
|
||||
logger.debug("QEMU args for VM {}: {}".format(self.uuid, qemu_args))
|
||||
self.vmm.start(
|
||||
uuid=self.uuid,
|
||||
migration=False,
|
||||
*qemu_args
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
self.vmm.stop(self.uuid)
|
||||
|
||||
def get_status(self):
|
||||
return self.vmm.get_status(self.uuid)
|
||||
|
||||
def get_vnc_addr(self):
|
||||
return self.vmm.get_vnc(self.uuid)
|
||||
|
||||
def get_uuid(self):
|
||||
return self.uuid
|
||||
|
||||
def get_name(self):
|
||||
success, json = self.vmm.execute_command(self.uuid, 'query-name')
|
||||
if success:
|
||||
return json['return']['name']
|
||||
|
||||
return None
|
|
@ -1,3 +0,0 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
|
@ -1 +0,0 @@
|
|||
VERSION = "0.0.5-30-ge91fd9e"
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/sh
|
||||
# Nico Schottelius, 2021-01-17
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "$0 target-host"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
target_host=$1; shift
|
||||
user=app
|
||||
|
||||
dir=${0%/*}
|
||||
uncloud_base=$(cd ${dir}/.. && pwd -P)
|
||||
conf_name=local_settings-${target_host}.py
|
||||
conf_file=${uncloud_base}/uncloud/${conf_name}
|
||||
|
||||
if [ ! -e ${conf_file} ]; then
|
||||
echo "No settings for ${target_host}."
|
||||
echo "Create ${conf_file} before using this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Deploy
|
||||
rsync -av \
|
||||
--exclude venv/ \
|
||||
--exclude '*.pyc' \
|
||||
--exclude uncloud/local_settings.py \
|
||||
--delete \
|
||||
${uncloud_base}/ ${user}@${target_host}:app/
|
||||
|
||||
ssh "${user}@${target_host}" ". ~/pyvenv/bin/activate; cd ~/app; pip install -r requirements.txt"
|
||||
|
||||
# Config
|
||||
ssh "${user}@${target_host}" "cd ~/app/uncloud; ln -sf ${conf_name} local_settings.py"
|
||||
|
||||
# Restart / Apply
|
||||
ssh "${user}@${target_host}" "sudo /etc/init.d/uwsgi restart"
|
|
@ -1,22 +1,22 @@
|
|||
#!/bin/sh
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2019-2020 Nico Schottelius (nico-uncloud at schottelius.org)
|
||||
# 2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
# This file is part of ucloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# ucloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# ucloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
|
@ -26,4 +26,4 @@ dir=${0%/*}
|
|||
|
||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
||||
# the git version contains a dynamic version
|
||||
printf "VERSION = \"%s\"\n" "$(git describe --tags --abbrev=0)" > ${dir}/../uncloud/version.py
|
||||
printf "VERSION = \"%s\"\n" "$(git describe)" > ${dir}/../uncloud/version.py
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# For undoing/redoing everything
|
||||
# Needed in special cases and needs to be avoided as soon as
|
||||
# uncloud.version >= 1
|
||||
for a in */migrations; do rm ${a}/*.py; done
|
||||
for a in */migrations; do python manage.py makemigrations ${a%%/migrations}; done
|
|
@ -24,6 +24,6 @@
|
|||
dir=${0%/*}
|
||||
|
||||
${dir}/gen-version;
|
||||
pip uninstall -y uncloud >/dev/null
|
||||
python setup.py install >/dev/null
|
||||
pip uninstall -y uncloud
|
||||
python setup.py install
|
||||
${dir}/uncloud "$@"
|
2
doc/.gitignore
vendored
2
doc/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
*.pdf
|
||||
*.tex
|
|
@ -1,85 +0,0 @@
|
|||
* How to handle billing in general
|
||||
** Manual test flow / setting up bills
|
||||
- Needs orders
|
||||
-
|
||||
** Orders
|
||||
- Orders are the heart of uncloud billing
|
||||
- Have a starting date
|
||||
- Have an ending date
|
||||
- Orders are immutable
|
||||
- Can usually not be cancelled / cancellation is not a refund
|
||||
- Customer/user commits on a certain period -> gets discount
|
||||
based on it
|
||||
- Can be upgraded
|
||||
- Create a new order
|
||||
- We link the new order to the old order and say this one
|
||||
replaces it
|
||||
- If the price of the new order is HIGHER than the OLD order,
|
||||
then we charge the difference until the end of the order period
|
||||
- In the next billing run we set the OLD order to not to bill anymore
|
||||
- And only the NEW order will be billed afterwards
|
||||
- Can be downgraded in the next period (but not for this period)
|
||||
- We create a new order, same as for upgrade
|
||||
- The new order starts directly after the OLD order
|
||||
- As the amount is LOWER than the OLD order, no additional charge is done
|
||||
during this order period
|
||||
- We might need to have an activate datetime
|
||||
- When to implement this
|
||||
- Order periods can be
|
||||
*** Statuses
|
||||
- CREATING/PREPARING
|
||||
- INACTIVE (?)
|
||||
- TO_BILL
|
||||
- NOT_TO_BILL: we use this to accelerate queries to the DB
|
||||
*** Updating status of orders
|
||||
- If has succeeding order and billing date is last month -> set inactive
|
||||
** Bills
|
||||
- Are always for a month
|
||||
- Can be preliminary
|
||||
*** Which orders to include
|
||||
- Not the cancelled ones / not active ones
|
||||
** Flows / Approach
|
||||
*** Finding all orders for a bill
|
||||
- Get all orders, state != NOT_TO_BILL; for each order do:
|
||||
- is it a one time order?
|
||||
- has it a bill assigned?
|
||||
- yes: set to NOT_TO_BILL
|
||||
- no:
|
||||
- get_or_create_bill_for_this_month
|
||||
- assign bill to this order
|
||||
- set to NOT_TO_BILL
|
||||
- is it a recurring order?
|
||||
- if it has a REPLACING order:
|
||||
-
|
||||
- First of month
|
||||
- Last of month
|
||||
*** Handling replacement of orders
|
||||
- The OLD order will appear in the month that it was cancelled on
|
||||
the bill
|
||||
- The OLD order needs to be set to NOT_TO_BILL after it was billed
|
||||
the last time
|
||||
- The NEW order will be added pro rata if the amount is higher in
|
||||
the same month
|
||||
- The NEW order will be used next month
|
||||
**** Disabling the old order
|
||||
- On billing run
|
||||
- If order.replacement_order (naming!) is set
|
||||
- if the order.replacement_order starts during THIS_MONTH
|
||||
- add order to bill
|
||||
- if NOT:
|
||||
- the order was already replaced in a previous billing period
|
||||
- set the order to NOT_TO_BILL
|
||||
**** Billing the new order
|
||||
- If order.previous_order
|
||||
*** Handling multiple times a recurring order
|
||||
- For each recurring order check the order.period
|
||||
- Find out when it was billed last
|
||||
- lookup latest bill
|
||||
- Calculate how many times it has been used until 2359, last day
|
||||
of month
|
||||
- For preliminary bill: until datetime.now()
|
||||
- Call the bill_end_datetime
|
||||
- Getting duration: bill_end_datetime - order.last_billed
|
||||
- Amount in seconds; duration_in_seconds
|
||||
- Divide duration_in_seconds by order.period; amount_used:
|
||||
- If >= 1: add amount_used * order.recurring_amount to bill
|
|
@ -1,28 +0,0 @@
|
|||
* What is a remote uncloud client?
|
||||
** Systems that configure themselves for the use with uncloud
|
||||
** Examples are VMHosts, VPN Servers, cdist control server, etc.
|
||||
* Which access do these clients need?
|
||||
** They need read / write access to the database
|
||||
* Possible methods
|
||||
** Overview
|
||||
| | pros | cons |
|
||||
| SSL based | Once setup, can access all django parts natively, locally | X.509 infrastructure |
|
||||
| SSH -L tunnel | All nodes can use [::1]:5432 | SSH setup can be fragile |
|
||||
| ssh djangohost manage.py | All DB ops locally | Code is only executed on django host |
|
||||
| https + token | Rest alike / consistent access | Code is only executed on django host |
|
||||
| from_django | Everything is on the django host | main host can become bottleneck |
|
||||
** remote vs. local Django code execution
|
||||
- If manage.py is executed locally (= on the client), it can
|
||||
check/modify local configs
|
||||
- However local execution requires a pyvenv + packages + db access
|
||||
- Local execution also *could* make use of postgresql notify for
|
||||
triggering actions (which is quite neat)
|
||||
- Remote execution (= on the primary django host) can acess the db
|
||||
via unix socket
|
||||
- However remote execution cannot check local state
|
||||
** from_django
|
||||
- might reuse existing methods like celery
|
||||
- reduces the amount of things to be installed on the client to
|
||||
almost zero
|
||||
- follows the opennebula model
|
||||
- has a single point of failurebin
|
|
@ -1,485 +0,0 @@
|
|||
* Bootstrap / Installation / Deployment
|
||||
** Pre-requisites by operating system
|
||||
*** General
|
||||
To run uncloud you need:
|
||||
- ldap development libraries
|
||||
- libxml2-dev libxslt-dev
|
||||
- gcc / libc headers: for compiling things
|
||||
- python3-dev
|
||||
- wireguard: wg (for checking keys)
|
||||
*** Alpine
|
||||
#+BEGIN_SRC sh
|
||||
apk add openldap-dev postgresql-dev libxml2-dev libxslt-dev gcc python3-dev musl-dev wireguard-tools-wg
|
||||
#+END_SRC
|
||||
*** Debian/Devuan:
|
||||
#+BEGIN_SRC sh
|
||||
apt install postgresql-server-dev-all
|
||||
#+END_SRC
|
||||
** Creating a virtual environment / installing python requirements
|
||||
*** Virtual env
|
||||
To separate uncloud requirements, you can use a python virtual
|
||||
env as follows:
|
||||
#+BEGIN_SRC sh
|
||||
python3 -m venv venv
|
||||
. ./venv/bin/activate
|
||||
#+END_SRC
|
||||
Then install the requirements
|
||||
#+BEGIN_SRC sh
|
||||
pip install -r requirements.txt
|
||||
#+END_SRC
|
||||
** Setting up the the database
|
||||
*** Install the database service
|
||||
The database can run on the same host as uncloud, but can also run
|
||||
a different server. Consult the usual postgresql documentation for
|
||||
a secure configuration.
|
||||
|
||||
The database needs to be accessible from all worker nodes.
|
||||
**** Alpine
|
||||
#+BEGIN_SRC sh
|
||||
apk add postgresql-server
|
||||
rc-update add postgresql
|
||||
rc-service postgresql start`
|
||||
#+END_SRC
|
||||
|
||||
**** Debian/Devuan:
|
||||
#+BEGIN_SRC sh
|
||||
apt install postgresql
|
||||
#+END_SRC
|
||||
*** Create the database
|
||||
Due to the use of the JSONField, postgresql is required.
|
||||
To get started,
|
||||
create a database and have it owned by the user that runs uncloud
|
||||
(usually "uncloud"):
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
bridge:~# su - postgres
|
||||
bridge:~$ psql
|
||||
postgres=# create role uncloud login;
|
||||
postgres=# create database uncloud owner nico;
|
||||
#+END_SRC
|
||||
*** Creating the schema
|
||||
#+BEGIN_SRC sh
|
||||
python manage.py migrate
|
||||
#+END_SRC
|
||||
|
||||
*** Configuring remote access
|
||||
- Get a letsencrypt certificate
|
||||
- Expose SSL ports
|
||||
- Create a user
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
certbot certonly --standalone \
|
||||
-d <yourdbhostname> -m your@email.come \
|
||||
--agree-tos --no-eff-email
|
||||
#+END_SRC
|
||||
|
||||
- Configuring postgresql.conf:
|
||||
#+BEGIN_SRC sh
|
||||
listen_addresses = '*' # what IP address(es) to listen on;
|
||||
ssl = on
|
||||
ssl_cert_file = '/etc/postgresql/server.crt'
|
||||
ssl_key_file = '/etc/postgresql/server.key'
|
||||
|
||||
#+END_SRC
|
||||
|
||||
- Cannot load directly due to permission error:
|
||||
2020-12-26 13:01:55.235 CET [27805] FATAL: could not load server
|
||||
certificate file
|
||||
"/etc/letsencrypt/live/2a0a-e5c0-0013-0000-9f4b-e619-efe5-a4ac.has-a.name/fullchain.pem":
|
||||
Permission denied
|
||||
|
||||
- hook
|
||||
#+BEGIN_SRC sh
|
||||
bridge:/etc/letsencrypt/renewal-hooks/deploy# cat /etc/letsencrypt/renewal-hooks/deploy/postgresql
|
||||
#!/bin/sh
|
||||
|
||||
umask 0177
|
||||
export DOMAIN=2a0a-e5c0-0013-0000-9f4b-e619-efe5-a4ac.has-a.name
|
||||
export DATA_DIR=/etc/postgresql
|
||||
|
||||
cp /etc/letsencrypt/live/$DOMAIN/fullchain.pem $DATA_DIR/server.crt
|
||||
cp /etc/letsencrypt/live/$DOMAIN/privkey.pem $DATA_DIR/server.key
|
||||
chown postgres:postgres $DATA_DIR/server.crt $DATA_DIR/server.key
|
||||
#+END_SRC
|
||||
|
||||
- Allowing access with md5 encrypted password encrypted via TLS
|
||||
#+BEGIN_SRC sh
|
||||
hostssl all all ::/0 md5
|
||||
#+END_SRC
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
|
||||
postgres=# create role uncloud password '...';
|
||||
CREATE ROLE
|
||||
postgres=# alter role uncloud login ;
|
||||
ALTER ROLE
|
||||
#+END_SRC
|
||||
|
||||
Testing the connection:
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
psql postgresql://uncloud@2a0a-e5c0-0013-0000-9f4b-e619-efe5-a4ac.has-a.name/uncloud?sslmode
|
||||
=require
|
||||
g #+END_SRC
|
||||
|
||||
** Bootstrap
|
||||
- Login via a user so that the user object gets created
|
||||
- Run the following (replace nicocustomer with the username)
|
||||
#+BEGIN_SRC sh
|
||||
python manage.py bootstrap-user --username nicocustomer
|
||||
#+END_SRC
|
||||
|
||||
** Initialise the database
|
||||
While it is not strictly required to add default values to the
|
||||
database, it might significantly reduce the starting time with
|
||||
uncloud.
|
||||
|
||||
To add the default database values run:
|
||||
|
||||
#+BEGIN_SRC shell
|
||||
# Add local objects
|
||||
python manage.py db-add-defaults
|
||||
|
||||
# Import VAT rates
|
||||
python manage.py import-vat-rates
|
||||
#+END_SRC
|
||||
|
||||
** Worker nodes
|
||||
Nodes that realise services (VMHosts, VPNHosts, etc.) need to be
|
||||
accessible from the main node and also need access to the database.
|
||||
|
||||
Workers usually should have an "uncloud" user account, even though
|
||||
strictly speaking the username can be any.
|
||||
*** WireGuardVPN Server
|
||||
- Allow write access to /etc/wireguard for uncloud user
|
||||
- Allow sudo access to "ip" and "wg"
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
chown uncloud /etc/wireguard/
|
||||
[14:30] vpn-2a0ae5c1200:/etc/sudoers.d# cat uncloud
|
||||
app ALL=(ALL) NOPASSWD:/sbin/ip
|
||||
app ALL=(ALL) NOPASSWD:/usr/bin/wg
|
||||
#+END_SRC
|
||||
** Typical source code based deployment
|
||||
- Deploy using bin/deploy.sh on a remote server
|
||||
- Remote server should have
|
||||
- postgresql running, accessible via TLS from outside
|
||||
- rabbitmq-configured [in progress]
|
||||
|
||||
* Testing / CLI Access
|
||||
Access via the commandline (CLI) can be done using curl or
|
||||
httpie. In our examples we will use httpie.
|
||||
** Checkout out the API
|
||||
#+BEGIN_SRC sh
|
||||
http localhost:8000/api/
|
||||
#+END_SRC
|
||||
** Authenticate via ldap user in password store
|
||||
#+BEGIN_SRC sh
|
||||
http --auth nicocustomer:$(pass ldap/nicocustomer) localhost:8000/api/
|
||||
#+END_SRC
|
||||
* Database
|
||||
** uncloud clients access the data base from a variety of outside hosts
|
||||
** So the postgresql data base needs to be remotely accessible
|
||||
** Instead of exposing the tcp socket, we make postgresql bind to localhost via IPv6
|
||||
*** ::1, port 5432
|
||||
** Then we remotely connect to the database server with ssh tunneling
|
||||
*** ssh -L5432:localhost:5432 uncloud-database-host
|
||||
** Configuring your database for SSH based remote access
|
||||
*** host all all ::1/128 trust
|
||||
|
||||
* URLs
|
||||
- api/ - the rest API
|
||||
* uncloud Products
|
||||
** Product features
|
||||
- Dependencies on other products
|
||||
- Minimum parameters (min cpu, min ram, etc).
|
||||
- Can also realise the dcl vm
|
||||
- dualstack vm = VM + IPv4 + SSD
|
||||
- Need to have a non-misguiding name for the "bare VM"
|
||||
- Should support network boot (?)
|
||||
|
||||
** VPN
|
||||
*** How to add a new VPN Host
|
||||
**** Install wireguard to the host
|
||||
**** Install uncloud to the host
|
||||
**** Add `python manage.py vpn --hostname fqdn-of-this-host` to the crontab
|
||||
**** Use the CLI to configure one or more VPN Networks for this host
|
||||
*** Example of adding a VPN host at ungleich
|
||||
**** Create a new dual stack alpine VM
|
||||
**** Add it to DNS as vpn-XXX.ungleich.ch
|
||||
**** Route a /40 network to its IPv6 address
|
||||
**** Install wireguard on it
|
||||
**** TODO [#C] Enable wireguard on boot
|
||||
**** TODO [#C] Create a new VPNPool on uncloud with
|
||||
***** the network address (selecting from our existing pool)
|
||||
***** the network size (/...)
|
||||
***** the vpn host that provides the network (selecting the created VM)
|
||||
***** the wireguard private key of the vpn host (using wg genkey)
|
||||
***** http command
|
||||
```
|
||||
http -a nicoschottelius:$(pass
|
||||
ungleich.ch/nico.schottelius@ungleich.ch)
|
||||
http://localhost:8000/admin/vpnpool/ network=2a0a:e5c1:200:: \
|
||||
network_size=40 subnetwork_size=48
|
||||
vpn_hostname=vpn-2a0ae5c1200.ungleich.ch
|
||||
wireguard_private_key=...
|
||||
```
|
||||
*** Example http commands / REST calls
|
||||
**** creating a new vpn pool
|
||||
http -a nicoschottelius:$(pass
|
||||
ungleich.ch/nico.schottelius@ungleich.ch)
|
||||
http://localhost:8000/admin/vpnpool/ network_size=40
|
||||
subnetwork_size=48 network=2a0a:e5c1:200::
|
||||
vpn_hostname=vpn-2a0ae5c1200.ungleich.ch wireguard_private_key=$(wg
|
||||
genkey)
|
||||
**** Creating a new vpn network
|
||||
*** Creating a VPN pool
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
http -a uncloudadmin:$(pass uncloudadmin) https://localhost:8000/v1/admin/vpnpool/ \
|
||||
network=2a0a:e5c1:200:: network_size=40 subnetwork_size=48 \
|
||||
vpn_hostname=vpn-2a0ae5c1200.ungleich.ch wireguard_private_key=$(wg genkey)
|
||||
#+END_SRC
|
||||
|
||||
This will create the VPNPool 2a0a:e5c1:200::/40 from which /48
|
||||
networks will be used for clients.
|
||||
|
||||
VPNPools can only be managed by staff.
|
||||
|
||||
*** Managing VPNNetworks
|
||||
|
||||
To request a network as a client, use the following call:
|
||||
|
||||
#+BEGIN_SRC sh
|
||||
http -a user:$(pass user) https://localhost:8000/v1/net/vpn/ \
|
||||
network_size=48 \
|
||||
wireguard_public_key=$(wg genkey | tee privatekey | wg pubkey)
|
||||
```
|
||||
|
||||
VPNNetworks can be managed by all authenticated users.
|
||||
|
||||
* Developer Handbook
|
||||
The following section describe decisions / architecture of
|
||||
uncloud. These chapters are intended to be read by developers.
|
||||
** This Documentation
|
||||
This documentation is written in org-mode. To compile it to
|
||||
html/pdf, just open emacs and press *C-c C-e l p*.
|
||||
** Models
|
||||
*** Bill
|
||||
Bills are summarising usage in a specific timeframe. Bills usually
|
||||
spawn one month.
|
||||
*** BillRecord
|
||||
Bill records are used to model the usage of one order during the
|
||||
timeframe.
|
||||
*** Order
|
||||
Orders register the intent of a user to buy something. They might
|
||||
refer to a product. (???)
|
||||
Order register the one time price and the recurring price. These
|
||||
fields should be treated as immutable. If they need to be modified,
|
||||
a new order that replaces the current order should be created.
|
||||
**** Replacing orders
|
||||
If an order is updated, a new order is created and points to the
|
||||
old order. The old order stops one second before the new order
|
||||
starts.
|
||||
|
||||
If a order has been replaced can be seen by its replaced_by count:
|
||||
#+BEGIN_SRC sh
|
||||
>>> Order.objects.get(id=1).replaced_by.count()
|
||||
1
|
||||
#+END_SRC
|
||||
|
||||
*** Product and Product Children
|
||||
- A product describes something a user can buy
|
||||
- A product inherits from the uncloud_pay.models.Product model to
|
||||
get basic attributes
|
||||
** Identifiers
|
||||
*** Problem description
|
||||
Identifiers can be integers, strings or other objects. They should
|
||||
be unique.
|
||||
*** Approach 1: integers
|
||||
Integers are somewhat easy to remember, but also include
|
||||
predictable growth, which might allow access to guessed hacking
|
||||
(obivously proper permissions should prevent this).
|
||||
*** Approach 2: random uuids
|
||||
UUIDs are 128 bit integers. Python supports uuid.uuid4() for random
|
||||
uuids.
|
||||
*** Approach 3: IPv6 addresses
|
||||
uncloud heavily depends on IPv6 in the first place. uncloud could
|
||||
use a /48 to identify all objects. Objects that have IPv6 addresses
|
||||
on their own, don't need to draw from the system /48.
|
||||
**** Possible Subnetworks
|
||||
Assuming uncloud uses a /48 to represent all resources.
|
||||
|
||||
| Network | Name | Description |
|
||||
|-----------------+-----------------+----------------------------------------------|
|
||||
| 2001:db8::/48 | uncloud network | All identifiers drawn from here |
|
||||
| 2001:db8:1::/64 | VM network | Every VM has an IPv6 address in this network |
|
||||
| 2001:db8:2::/64 | Bill network | Every bill has an IPv6 address |
|
||||
| 2001:db8:3::/64 | Order network | Every order has an IPv6 address |
|
||||
| 2001:db8:5::/64 | Product network | Every product (?) has an IPv6 address |
|
||||
| 2001:db8:4::/64 | Disk network | Every disk is identified |
|
||||
|
||||
**** Tests
|
||||
[15:47:37] black3.place6:~# rbd create -s 10G ssd/2a0a:e5c0:1::8
|
||||
|
||||
*** Decision
|
||||
We use integers, because they are easy.
|
||||
|
||||
** Distributing/Dispatching/Orchestrating
|
||||
*** Variant 1: using cdist
|
||||
- The uncloud server can git commit things
|
||||
- The uncloud server loads cdist and configures the server
|
||||
- Advantages
|
||||
- Fully integrated into normal flow
|
||||
- Disadvantage
|
||||
- web frontend has access to more data than it needs
|
||||
- On compromise of the machine, more data leaks
|
||||
- Some cdist usual delay
|
||||
*** Variant 2: via celery
|
||||
- The uncloud server dispatches via celery
|
||||
- Every decentral node also runs celery/connects to the broker
|
||||
- Summary brokers:
|
||||
- If local only celery -> good to use redis - Broker
|
||||
- If remote: probably better to use rabbitmq
|
||||
- redis
|
||||
- simpler
|
||||
- rabbitmq
|
||||
- more versatile
|
||||
- made for remote connections
|
||||
- quorom queues would be nice, but not clear if supported
|
||||
- https://github.com/celery/py-amqp/issues/302
|
||||
- https://github.com/celery/celery/issues/6067
|
||||
- Cannot be installed on alpine Linux at the moment
|
||||
- Advantage
|
||||
- Very python / django integrated
|
||||
- Rather instant
|
||||
- Disadvantages
|
||||
- Every decentral node needs to have the uncloud code available
|
||||
- Decentral nodes *might* need to access the database
|
||||
- Tasks can probably be written to work without that
|
||||
(i.e. only strings/bytes)
|
||||
|
||||
**** log/tests
|
||||
(venv) [19:54] vpn-2a0ae5c1200:~/uncloud$ celery -A uncloud -b redis://bridge.place7.ungleich.ch worker -n worker1@%h --logfile ~/celery.log -
|
||||
Q vpn-2a0ae5c1200.ungleich.ch
|
||||
|
||||
|
||||
*** Variant 3: dedicated cdist instance via message broker
|
||||
- A separate VM/machine
|
||||
- Has Checkout of ~/.cdist
|
||||
- Has cdist checkout
|
||||
- Tiny API for management
|
||||
- Not directly web accessible
|
||||
- "cdist" queue
|
||||
|
||||
** Milestones :uncloud:
|
||||
*** 1.1 (cleanup 1)
|
||||
**** TODO [#C] Unify ValidationError, FieldError - define proper Exception
|
||||
- What do we use for model errors
|
||||
**** TODO [#C] Cleanup the results handling in celery
|
||||
- Remove the results broker?
|
||||
- Setup app to ignore results?
|
||||
- Actually use results?
|
||||
*** 1.0 (initial release)
|
||||
**** TODO [#C] Initial Generic product support
|
||||
- Product
|
||||
***** TODO [#C] Recurring product support
|
||||
****** TODO [#C] Support replacing orders for updates
|
||||
****** DONE [#A] Finish split of bill creation
|
||||
CLOSED: [2020-09-11 Fri 23:19]
|
||||
****** TODO [#C] Test the new functions in the Order class
|
||||
****** Define the correct order replacement logic
|
||||
Assumption:
|
||||
- recurringperiods are 30days
|
||||
******* Case 1: downgrading
|
||||
- User commits to 10 CHF for 30 days
|
||||
- Wants to downgrade after 15 days to 5 CHF product
|
||||
- Expected result:
|
||||
- order 1: 10 CHF until +30days
|
||||
- order 2: 5 CHF starting 30days + 1s
|
||||
- Sum of the two orders is 15 CHF
|
||||
- Question is
|
||||
- when is the VM shutdown?
|
||||
- a) instantly
|
||||
- b) at the end of the cycle
|
||||
- best solution
|
||||
- user can choose between a ... b any time
|
||||
******* Duration
|
||||
- You cannot cancel the duration
|
||||
- You can upgrade and with that cancel the duration
|
||||
- The idea of a duration is that you commit for it
|
||||
- If you want to commit lower (daily basis for instance) you
|
||||
have higher per period prices
|
||||
******* Case X
|
||||
- User has VM with 2 Core / 2 GB RAM
|
||||
- User modifies with to 1 core / 3 GB RAM
|
||||
- We treat it as down/upgrade independent of the modifications
|
||||
|
||||
******* Case 2: upgrading after 1 day
|
||||
- committed for 30 days
|
||||
- upgrade after 1 day
|
||||
- so first order will be charged for 1/30ths
|
||||
|
||||
******* Case 2: upgrading
|
||||
- User commits to 10 CHF for 30 days
|
||||
- Wants to upgrade after 15 days to 20 CHF product
|
||||
- Order 1 : 1 VM with 2 Core / 2 GB / 10 SSD -- 10 CHF
|
||||
- 30days period, stopped after 15, so quantity is 0.5 = 5 CHF
|
||||
- Order 2 : 1 VM with 2 Core / 6 GB / 10 SSD -- 20 CHF
|
||||
- after 15 days
|
||||
- VM is upgraded instantly
|
||||
- Expected result:
|
||||
- order 1: 10 CHF until +15days = 0.5 units = 5 CHF
|
||||
- order 2: 20 CHF starting 15days + 1s ... +30 days after
|
||||
the 15 days -> 45 days = 1 unit = 20 CHF
|
||||
- Total on bill: 25 CHF
|
||||
|
||||
******* Case 2: upgrading
|
||||
- User commits to 10 CHF for 30 days
|
||||
- Wants to upgrade after 15 days to 20 CHF product
|
||||
- Expected result:
|
||||
- order 1: 10 CHF until +30days = 1 units = 10 CHF
|
||||
|
||||
- order 2: 20 CHF starting 15days + 1s = 1 unit = 20 CHF
|
||||
- Total on bill: 30 CHF
|
||||
|
||||
|
||||
****** TODO [#C] Note: ending date not set if replaced by default (implicit!)
|
||||
- Should the new order modify the old order on save()?
|
||||
****** DONE Fix totally wrong bill dates in our test case
|
||||
CLOSED: [2020-09-09 Wed 01:00]
|
||||
- 2020 used instead of 2019
|
||||
- Was due to existing test data ...
|
||||
***** DONE Bill logic is still wrong
|
||||
CLOSED: [2020-11-05 Thu 18:58]
|
||||
- Bill starting_date is the date of the first order
|
||||
- However first encountered order does not have to be the
|
||||
earliest in the bill!
|
||||
- Bills should not have a duration
|
||||
- Bills should only have a (unique) issue date
|
||||
- We charge based on bill_records
|
||||
- Last time charged issue date of the bill OR earliest date
|
||||
after that
|
||||
- Every bill generation checks all (relevant) orders
|
||||
- add a flag "not_for_billing" or "closed"
|
||||
- query on that flag
|
||||
- verify it every time
|
||||
|
||||
***** TODO Generating bill for admins/staff
|
||||
-
|
||||
|
||||
|
||||
|
||||
|
||||
**** Bill fixes needed
|
||||
***** TODO Double bill in bill id
|
||||
***** TODO Name the currency
|
||||
***** TODO Maybe remove the chromium pdf rendering artefacts
|
||||
- date on the top
|
||||
- title on the top
|
||||
- filename bottom left
|
||||
- page number could even stay
|
||||
***** TODO Try to shorten the timestamp (remove time zone?)
|
||||
***** TODO Bill date might be required
|
||||
***** TODO Total and VAT are empty
|
||||
***** TODO Line below detail/ heading
|
21
manage.py
21
manage.py
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
"""Django's command-line utility for administrative tasks."""
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uncloud.settings')
|
||||
try:
|
||||
from django.core.management import execute_from_command_line
|
||||
except ImportError as exc:
|
||||
raise ImportError(
|
||||
"Couldn't import Django. Are you sure it's installed and "
|
||||
"available on your PYTHONPATH environment variable? Did you "
|
||||
"forget to activate a virtual environment?"
|
||||
) from exc
|
||||
execute_from_command_line(sys.argv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,4 +0,0 @@
|
|||
from django.contrib import admin
|
||||
from .models import VMInstance
|
||||
|
||||
admin.site.register(VMInstance)
|
|
@ -1,9 +0,0 @@
|
|||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class MatrixhostingConfig(AppConfig):
|
||||
default_auto_field = 'django.db.models.BigAutoField'
|
||||
name = 'matrixhosting'
|
||||
|
||||
def ready(self):
|
||||
from . import signals
|
|
@ -1,31 +0,0 @@
|
|||
from django import forms
|
||||
from django.utils.translation import get_language, ugettext_lazy as _
|
||||
from django.core.exceptions import ValidationError
|
||||
from .models import VMInstance
|
||||
from uncloud.forms import MainForm, MainModelForm, DomainNameField
|
||||
|
||||
|
||||
class InitialRequestForm(MainForm):
|
||||
cores = forms.IntegerField(label='CPU', min_value=1, max_value=48, initial=1)
|
||||
memory = forms.IntegerField(label='RAM', min_value=2, max_value=200, initial=2)
|
||||
storage = forms.IntegerField(label='Storage', min_value=100, max_value=10000, initial=100)
|
||||
pricing_name = forms.CharField(required=True)
|
||||
|
||||
class RequestDomainsNamesForm(MainForm):
|
||||
homeserver_name = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Homeserver Name *'}))
|
||||
webclient_name = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Webclient Name *'}))
|
||||
is_open_registration = forms.BooleanField(required=False, initial=False)
|
||||
|
||||
def clean_homeserver_name(self):
|
||||
homeserver_name = self.cleaned_data['homeserver_name']
|
||||
if VMInstance.objects.filter(homeserver_domain=f"{homeserver_name}.matrix.ungleich.cloud").exists():
|
||||
raise ValidationError("homeserver name already exists")
|
||||
return homeserver_name
|
||||
|
||||
def clean_webclient_name(self):
|
||||
webclient_name = self.cleaned_data['webclient_name']
|
||||
if VMInstance.objects.filter(webclient_domain=f"{webclient_name}.matrix.0co2.cloud").exists():
|
||||
raise ValidationError("webclient name already exists")
|
||||
return webclient_name
|
||||
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-06-30 07:42
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='VMPricing',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('name', models.CharField(max_length=255, unique=True)),
|
||||
('vat_inclusive', models.BooleanField(default=True)),
|
||||
('vat_percentage', models.DecimalField(blank=True, decimal_places=5, default=0, max_digits=7)),
|
||||
('set_up_fees', models.DecimalField(decimal_places=5, default=0, max_digits=7)),
|
||||
('cores_unit_price', models.DecimalField(decimal_places=5, default=0, max_digits=7)),
|
||||
('ram_unit_price', models.DecimalField(decimal_places=5, default=0, max_digits=7)),
|
||||
('storage_unit_price', models.DecimalField(decimal_places=5, default=0, max_digits=7)),
|
||||
('discount_name', models.CharField(blank=True, max_length=255, null=True)),
|
||||
('discount_amount', models.DecimalField(decimal_places=2, default=0, max_digits=6)),
|
||||
('stripe_coupon_id', models.CharField(blank=True, max_length=255, null=True)),
|
||||
],
|
||||
),
|
||||
]
|
|
@ -1,17 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-01 08:48
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0001_initial'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RenameModel(
|
||||
old_name='VMPricing',
|
||||
new_name='MatrixVMPricing',
|
||||
),
|
||||
]
|
|
@ -1,33 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-03 15:23
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0002_rename_vmpricing_matrixvmpricing'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='matrixvmpricing',
|
||||
name='cores_unit_price',
|
||||
field=models.DecimalField(decimal_places=2, default=0, max_digits=7),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='matrixvmpricing',
|
||||
name='ram_unit_price',
|
||||
field=models.DecimalField(decimal_places=2, default=0, max_digits=7),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='matrixvmpricing',
|
||||
name='set_up_fees',
|
||||
field=models.DecimalField(decimal_places=2, default=0, max_digits=7),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='matrixvmpricing',
|
||||
name='storage_unit_price',
|
||||
field=models.DecimalField(decimal_places=2, default=0, max_digits=7),
|
||||
),
|
||||
]
|
|
@ -1,43 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-05 06:52
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('uncloud_pay', '0014_auto_20210703_1747'),
|
||||
('matrixhosting', '0003_auto_20210703_1523'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='VMSpecs',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('cores', models.IntegerField(default=1)),
|
||||
('memory', models.IntegerField(default=2)),
|
||||
('storage', models.IntegerField(default=100)),
|
||||
('matrix_domain', models.CharField(max_length=255)),
|
||||
('homeserver_domain', models.CharField(max_length=255)),
|
||||
('webclient_domain', models.CharField(max_length=255)),
|
||||
('is_open_registration', models.BooleanField(default=False, null=True)),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='MatrixHostingOrder',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('vm_id', models.IntegerField(default=0)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('status', models.CharField(choices=[('draft', 'Draft'), ('declined', 'Declined'), ('approved', 'Approved')], default='draft', max_length=100)),
|
||||
('stripe_charge_id', models.CharField(max_length=100, null=True)),
|
||||
('price', models.FloatField()),
|
||||
('billing_address', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='uncloud_pay.billingaddress')),
|
||||
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='uncloud_pay.stripecustomer')),
|
||||
('specs', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='matrixhosting.vmspecs')),
|
||||
('vm_pricing', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='matrixhosting.matrixvmpricing')),
|
||||
],
|
||||
),
|
||||
]
|
|
@ -1,19 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-05 08:49
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0004_matrixhostingorder_vmspecs'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='MatrixHostingOrder',
|
||||
),
|
||||
migrations.DeleteModel(
|
||||
name='VMSpecs',
|
||||
),
|
||||
]
|
|
@ -1,16 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-06 13:21
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0005_auto_20210705_0849'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.DeleteModel(
|
||||
name='MatrixVMPricing',
|
||||
),
|
||||
]
|
|
@ -1,31 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-09 09:14
|
||||
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
('uncloud_pay', '0021_auto_20210709_0914'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
('matrixhosting', '0006_delete_matrixvmpricing'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='VMInstance',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('ip', models.TextField(default='')),
|
||||
('config', models.JSONField()),
|
||||
('creation_date', models.DateTimeField(auto_now_add=True)),
|
||||
('termination_date', models.DateTimeField(blank=True, null=True)),
|
||||
('order', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='instance_id', to='uncloud_pay.order')),
|
||||
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
|
||||
],
|
||||
),
|
||||
]
|
|
@ -1,17 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-10 14:29
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0007_vminstance'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='vminstance',
|
||||
name='ip',
|
||||
),
|
||||
]
|
|
@ -1,19 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-07-13 10:20
|
||||
|
||||
from django.db import migrations, models
|
||||
import uuid
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0008_remove_vminstance_ip'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='vminstance',
|
||||
name='vm_id',
|
||||
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
|
||||
),
|
||||
]
|
|
@ -1,22 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-08-06 15:11
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0009_vminstance_vm_id'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveField(
|
||||
model_name='vminstance',
|
||||
name='vm_id',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='vminstance',
|
||||
name='vm_name',
|
||||
field=models.CharField(blank=True, max_length=256, null=True),
|
||||
),
|
||||
]
|
|
@ -1,18 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-08-06 15:14
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0010_auto_20210806_1511'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='vminstance',
|
||||
name='vm_name',
|
||||
field=models.CharField(editable=False, max_length=253, unique=True),
|
||||
),
|
||||
]
|
|
@ -1,23 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-08-08 16:51
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0011_alter_vminstance_vm_name'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='vminstance',
|
||||
name='homeserver_domain',
|
||||
field=models.CharField(blank=True, max_length=253, null=True, unique=True),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='vminstance',
|
||||
name='webclient_domain',
|
||||
field=models.CharField(blank=True, max_length=253, null=True, unique=True),
|
||||
),
|
||||
]
|
|
@ -1,23 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-08-08 16:52
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('matrixhosting', '0012_auto_20210808_1651'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='vminstance',
|
||||
name='homeserver_domain',
|
||||
field=models.CharField(blank=True, max_length=253, unique=True),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='vminstance',
|
||||
name='webclient_domain',
|
||||
field=models.CharField(blank=True, max_length=253, unique=True),
|
||||
),
|
||||
]
|
|
@ -1,20 +0,0 @@
|
|||
# Generated by Django 3.2.4 on 2021-09-06 08:06
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('uncloud_pay', '0031_auto_20210819_1304'),
|
||||
('matrixhosting', '0013_auto_20210808_1652'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='vminstance',
|
||||
name='order',
|
||||
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='matrix_instance_id', to='uncloud_pay.order'),
|
||||
),
|
||||
]
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue