Compare commits
No commits in common. "master" and "0.2" have entirely different histories.
309 changed files with 2204 additions and 18177 deletions
18
.gitignore
vendored
18
.gitignore
vendored
|
|
@ -7,21 +7,3 @@ log.txt
|
||||||
test.py
|
test.py
|
||||||
STRIPE
|
STRIPE
|
||||||
venv/
|
venv/
|
||||||
|
|
||||||
uncloud/docs/build
|
|
||||||
logs.txt
|
|
||||||
|
|
||||||
uncloud.egg-info
|
|
||||||
|
|
||||||
# run artefacts
|
|
||||||
default.etcd
|
|
||||||
__pycache__
|
|
||||||
|
|
||||||
# build artefacts
|
|
||||||
uncloud/version.py
|
|
||||||
build/
|
|
||||||
venv/
|
|
||||||
dist/
|
|
||||||
|
|
||||||
*.iso
|
|
||||||
*.sqlite3
|
|
||||||
|
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
||||||
stages:
|
|
||||||
- lint
|
|
||||||
- test
|
|
||||||
|
|
||||||
run-tests:
|
|
||||||
stage: test
|
|
||||||
image: code.ungleich.ch:5050/uncloud/uncloud/uncloud-ci:latest
|
|
||||||
services:
|
|
||||||
- postgres:latest
|
|
||||||
variables:
|
|
||||||
DATABASE_HOST: postgres
|
|
||||||
DATABASE_USER: postgres
|
|
||||||
POSTGRES_HOST_AUTH_METHOD: trust
|
|
||||||
coverage: /^TOTAL.+?(\d+\%)$/
|
|
||||||
script:
|
|
||||||
- pip install -r requirements.txt
|
|
||||||
- coverage run --source='.' ./manage.py test
|
|
||||||
- coverage report
|
|
||||||
674
LICENSE
674
LICENSE
|
|
@ -1,674 +0,0 @@
|
||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
||||||
62
README.md
62
README.md
|
|
@ -1,62 +0,0 @@
|
||||||
# Uncloud
|
|
||||||
|
|
||||||
Cloud management platform, the ungleich way.
|
|
||||||
|
|
||||||
|
|
||||||
[](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
|
||||||
[](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
|
||||||
|
|
||||||
## Useful commands
|
|
||||||
|
|
||||||
* `./manage.py import-vat-rates path/to/csv`
|
|
||||||
* `./manage.py make-admin username`
|
|
||||||
|
|
||||||
## Development setup
|
|
||||||
|
|
||||||
Install system dependencies:
|
|
||||||
|
|
||||||
* On Fedora, you will need the following packages: `python3-virtualenv python3-devel openldap-devel gcc chromium`
|
|
||||||
|
|
||||||
NOTE: you will need to configure a LDAP server and credentials for authentication. See `uncloud/settings.py`.
|
|
||||||
|
|
||||||
```
|
|
||||||
# Initialize virtualenv.
|
|
||||||
» virtualenv .venv
|
|
||||||
Using base prefix '/usr'
|
|
||||||
New python executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python3
|
|
||||||
Also creating executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python
|
|
||||||
Installing setuptools, pip, wheel...
|
|
||||||
done.
|
|
||||||
|
|
||||||
# Enter virtualenv.
|
|
||||||
» source .venv/bin/activate
|
|
||||||
|
|
||||||
# Install dependencies.
|
|
||||||
» pip install -r requirements.txt
|
|
||||||
[...]
|
|
||||||
|
|
||||||
# Run migrations.
|
|
||||||
» ./manage.py migrate
|
|
||||||
Operations to perform:
|
|
||||||
Apply all migrations: admin, auth, contenttypes, opennebula, sessions, uncloud_auth, uncloud_net, uncloud_pay, uncloud_service, uncloud_vm
|
|
||||||
Running migrations:
|
|
||||||
[...]
|
|
||||||
|
|
||||||
# Run webserver.
|
|
||||||
» ./manage.py runserver
|
|
||||||
Watching for file changes with StatReloader
|
|
||||||
Performing system checks...
|
|
||||||
|
|
||||||
System check identified no issues (0 silenced).
|
|
||||||
May 07, 2020 - 10:17:08
|
|
||||||
Django version 3.0.6, using settings 'uncloud.settings'
|
|
||||||
Starting development server at http://127.0.0.1:8000/
|
|
||||||
Quit the server with CONTROL-C.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Note on PGSQL
|
|
||||||
|
|
||||||
If you want to use Postgres:
|
|
||||||
|
|
||||||
* Install on configure PGSQL on your base system.
|
|
||||||
* OR use a container! `podman run --rm -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust -it postgres:latest`
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
* Intro
|
|
||||||
This file lists issues that should be handled, are small and likely
|
|
||||||
not yet high prio.
|
|
||||||
* Issues
|
|
||||||
** TODO Register prefered address in User model
|
|
||||||
** TODO Allow to specify different recurring periods
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
dbhost=$1; shift
|
|
||||||
|
|
||||||
ssh -L5432:localhost:5432 "$dbhost" &
|
|
||||||
|
|
||||||
python manage.py "$@"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# command only needs to be active while manage command is running
|
|
||||||
|
|
||||||
# -T no pseudo terminal
|
|
||||||
|
|
||||||
|
|
||||||
# alternatively: commands output shell code
|
|
||||||
|
|
||||||
# ssh uncloud@dbhost "python manage.py --hostname xxx ..."
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
db.sqlite3
|
|
||||||
uncloud/secrets.py
|
|
||||||
debug.log
|
|
||||||
uncloud/local_settings.py
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2019-2020 Nico Schottelius (nico-uncloud at schottelius.org)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# Wrapper for real script to allow execution from checkout
|
|
||||||
dir=${0%/*}
|
|
||||||
|
|
||||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
|
||||||
# the git version contains a dynamic version
|
|
||||||
printf "VERSION = \"%s\"\n" "$(git describe --tags --abbrev=0)" > ${dir}/../uncloud/version.py
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
|
||||||
#
|
|
||||||
# This file is part of ucloud.
|
|
||||||
#
|
|
||||||
# ucloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# ucloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# Wrapper for real script to allow execution from checkout
|
|
||||||
dir=${0%/*}
|
|
||||||
|
|
||||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
|
||||||
# the git version contains a dynamic version
|
|
||||||
${dir}/gen-version
|
|
||||||
|
|
||||||
libdir=$(cd "${dir}/../" && pwd -P)
|
|
||||||
export PYTHONPATH="${libdir}"
|
|
||||||
|
|
||||||
"$dir/../scripts/uncloud" "$@"
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
|
||||||
#
|
|
||||||
# This file is part of ucloud.
|
|
||||||
#
|
|
||||||
# ucloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# ucloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# Wrapper for real script to allow execution from checkout
|
|
||||||
dir=${0%/*}
|
|
||||||
|
|
||||||
${dir}/gen-version;
|
|
||||||
pip uninstall -y uncloud >/dev/null
|
|
||||||
python setup.py install >/dev/null
|
|
||||||
${dir}/uncloud "$@"
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
[etcd]
|
|
||||||
url = localhost
|
|
||||||
port = 2379
|
|
||||||
base_prefix = /
|
|
||||||
ca_cert
|
|
||||||
cert_cert
|
|
||||||
cert_key
|
|
||||||
|
|
||||||
[client]
|
|
||||||
name = replace_me
|
|
||||||
realm = replace_me
|
|
||||||
seed = replace_me
|
|
||||||
api_server = http://localhost:5000
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
# Minimal makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line, and also
|
|
||||||
# from the environment for the first two.
|
|
||||||
SPHINXOPTS ?=
|
|
||||||
SPHINXBUILD ?= sphinx-build
|
|
||||||
SOURCEDIR = source/
|
|
||||||
BUILDDIR = build/
|
|
||||||
DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/uncloud/
|
|
||||||
|
|
||||||
.PHONY: all build clean
|
|
||||||
|
|
||||||
publish: build permissions
|
|
||||||
rsync -av $(BUILDDIR) $(DESTINATION)
|
|
||||||
|
|
||||||
permissions: build
|
|
||||||
find $(BUILDDIR) -type f -exec chmod 0644 {} \;
|
|
||||||
find $(BUILDDIR) -type d -exec chmod 0755 {} \;
|
|
||||||
|
|
||||||
build:
|
|
||||||
$(SPHINXBUILD) "$(SOURCEDIR)" "$(BUILDDIR)"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf $(BUILDDIR)
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
# uncloud docs
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
1. Python3
|
|
||||||
2. Sphinx
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
Run `make build` to build docs.
|
|
||||||
|
|
||||||
Run `make clean` to remove build directory.
|
|
||||||
|
|
||||||
Run `make publish` to push build dir to https://ungleich.ch/ucloud/
|
|
||||||
|
|
@ -1,131 +0,0 @@
|
||||||
.. _admin-guide:
|
|
||||||
|
|
||||||
|
|
||||||
Usage Guide For Administrators
|
|
||||||
==============================
|
|
||||||
|
|
||||||
Start API
|
|
||||||
----------
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud api
|
|
||||||
|
|
||||||
Host Creation
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Currently, we don't have any host (that runs virtual machines).
|
|
||||||
So, we need to create it by executing the following command
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli host create --hostname ungleich.ch --cpu 32 --ram '32GB' --os-ssd '32GB'
|
|
||||||
|
|
||||||
You should see something like the following
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"message": "Host Created"
|
|
||||||
}
|
|
||||||
|
|
||||||
Start Scheduler
|
|
||||||
---------------
|
|
||||||
Scheduler is responsible for scheduling VMs on appropriate host.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud scheduler
|
|
||||||
|
|
||||||
Start Host
|
|
||||||
----------
|
|
||||||
Host is responsible for handling the following actions
|
|
||||||
|
|
||||||
* Start VM.
|
|
||||||
* Stop VM.
|
|
||||||
* Create VM.
|
|
||||||
* Delete VM.
|
|
||||||
* Migrate VM.
|
|
||||||
* Manage Network Resources needed by VMs.
|
|
||||||
|
|
||||||
It uses a hypervisor such as QEMU to perform these actions.
|
|
||||||
|
|
||||||
To start host we created earlier, execute the following command
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud host ungleich.ch
|
|
||||||
|
|
||||||
File & image scanners
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Let's assume we have uploaded an *alpine-uploaded.qcow2* disk images to our
|
|
||||||
uncloud server. Currently, our *alpine-untouched.qcow2* is not tracked by
|
|
||||||
ucloud. We can only make images from tracked files. So, we need to track the
|
|
||||||
file by running File Scanner
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud filescanner
|
|
||||||
|
|
||||||
File Scanner would run, scan your uploaded image and track it. You can check whether your image
|
|
||||||
is successfully tracked by executing the :code:`ucloud-cli user files`, It will return something like the following
|
|
||||||
|
|
||||||
.. _list-user-files:
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"message": [
|
|
||||||
{
|
|
||||||
"filename": "alpine-untouched.qcow2",
|
|
||||||
"uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
Our file is now being tracked by ucloud. Lets create an OS image using the uploaded file.
|
|
||||||
|
|
||||||
An image belongs to an image store. There are two types of store
|
|
||||||
|
|
||||||
* Public Image Store
|
|
||||||
* Private Image Store (Not Implemented Yet)
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
**Quick Quiz** Have we created an image store yet?
|
|
||||||
|
|
||||||
The answer is **No, we haven't**. Creating a sample image store is very easy.
|
|
||||||
Just execute the following command
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
(cd ~/ucloud && pipenv run python api/create_image_store.py)
|
|
||||||
|
|
||||||
An image store (with name = "images") would be created. Now, we are fully ready for creating our
|
|
||||||
very own image. Executing the following command to create image using the file uploaded earlier
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli image create-from-file --name alpine --uuid 3f75bd20-45d6-4013-89c4-7fceaedc8dda --image-store-name images
|
|
||||||
|
|
||||||
Please note that your **uuid** would be different. See :ref:`List of user files <list-user-files>`.
|
|
||||||
|
|
||||||
Now, ucloud have received our request to create an image from file. We have to run Image Scanner to make the image.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud imagescanner
|
|
||||||
|
|
||||||
To make sure, that our image is create run :code:`ucloud-cli image list --public`. You would get
|
|
||||||
output something like the following
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"images": [
|
|
||||||
{
|
|
||||||
"name": "images:alpine",
|
|
||||||
"status": "CREATED"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
# Configuration file for the Sphinx documentation builder.
|
|
||||||
#
|
|
||||||
# This file only contains a selection of the most common options. For a full
|
|
||||||
# list see the documentation:
|
|
||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
|
||||||
|
|
||||||
# -- Path setup --------------------------------------------------------------
|
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
|
||||||
#
|
|
||||||
# import os
|
|
||||||
# import sys
|
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
|
||||||
|
|
||||||
|
|
||||||
# -- Project information -----------------------------------------------------
|
|
||||||
|
|
||||||
project = "uncloud"
|
|
||||||
copyright = "2019, ungleich"
|
|
||||||
author = "ungleich"
|
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
|
||||||
# ones.
|
|
||||||
extensions = [
|
|
||||||
"sphinx.ext.autodoc",
|
|
||||||
"sphinx_rtd_theme",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
|
||||||
templates_path = ["_templates"]
|
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
|
||||||
# directories to ignore when looking for source files.
|
|
||||||
# This pattern also affects html_static_path and html_extra_path.
|
|
||||||
exclude_patterns = []
|
|
||||||
|
|
||||||
# -- Options for HTML output -------------------------------------------------
|
|
||||||
|
|
||||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
|
||||||
# a list of builtin themes.
|
|
||||||
#
|
|
||||||
|
|
||||||
html_theme = "sphinx_rtd_theme"
|
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
|
||||||
html_static_path = ["_static"]
|
|
||||||
|
|
@ -1,44 +0,0 @@
|
||||||
graph LR
|
|
||||||
style ucloud fill:#FFD2FC
|
|
||||||
style cron fill:#FFF696
|
|
||||||
style infrastructure fill:#BDF0FF
|
|
||||||
subgraph ucloud[ucloud]
|
|
||||||
ucloud-cli[CLI]-->ucloud-api[API]
|
|
||||||
ucloud-api-->ucloud-scheduler[Scheduler]
|
|
||||||
ucloud-api-->ucloud-imagescanner[Image Scanner]
|
|
||||||
ucloud-api-->ucloud-host[Host]
|
|
||||||
ucloud-scheduler-->ucloud-host
|
|
||||||
|
|
||||||
ucloud-host-->need-networking{VM need Networking}
|
|
||||||
need-networking-->|Yes| networking-scripts
|
|
||||||
need-networking-->|No| VM[Virtual Machine]
|
|
||||||
need-networking-->|SLAAC?| radvd
|
|
||||||
networking-scripts-->VM
|
|
||||||
networking-scripts--Create Networks Devices-->networking-scripts
|
|
||||||
subgraph cron[Cron Jobs]
|
|
||||||
ucloud-imagescanner
|
|
||||||
ucloud-filescanner[File Scanner]
|
|
||||||
ucloud-filescanner--Track User files-->ucloud-filescanner
|
|
||||||
end
|
|
||||||
subgraph infrastructure[Infrastructure]
|
|
||||||
radvd
|
|
||||||
etcd
|
|
||||||
networking-scripts[Networking Scripts]
|
|
||||||
ucloud-imagescanner-->image-store
|
|
||||||
image-store{Image Store}
|
|
||||||
image-store-->|CEPH| ceph
|
|
||||||
image-store-->|FILE| file-system
|
|
||||||
ceph[CEPH]
|
|
||||||
file-system[File System]
|
|
||||||
end
|
|
||||||
subgraph virtual-machine[Virtual Machine]
|
|
||||||
VM
|
|
||||||
VM-->ucloud-init
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph metadata-group[Metadata Server]
|
|
||||||
metadata-->ucloud-init
|
|
||||||
ucloud-init<-->metadata
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
Hacking
|
|
||||||
=======
|
|
||||||
Using uncloud in hacking (aka development) mode.
|
|
||||||
|
|
||||||
|
|
||||||
Get the code
|
|
||||||
------------
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
git clone https://code.ungleich.ch/uncloud/uncloud.git
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Install python requirements
|
|
||||||
---------------------------
|
|
||||||
You need to have python3 installed.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
cd uncloud!
|
|
||||||
python -m venv venv
|
|
||||||
. ./venv/bin/activate
|
|
||||||
./bin/uncloud-run-reinstall
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Install os requirements
|
|
||||||
-----------------------
|
|
||||||
Install the following software packages: **dnsmasq**.
|
|
||||||
|
|
||||||
If you already have a working IPv6 SLAAC and DNS setup,
|
|
||||||
this step can be skipped.
|
|
||||||
|
|
||||||
Note that you need at least one /64 IPv6 network to run uncloud.
|
|
||||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 37 KiB |
|
|
@ -1,26 +0,0 @@
|
||||||
.. ucloud documentation master file, created by
|
|
||||||
sphinx-quickstart on Mon Nov 11 19:08:16 2019.
|
|
||||||
You can adapt this file completely to your liking, but it should at least
|
|
||||||
contain the root `toctree` directive.
|
|
||||||
|
|
||||||
Welcome to ucloud's documentation!
|
|
||||||
==================================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
:caption: Contents:
|
|
||||||
|
|
||||||
introduction
|
|
||||||
setup-install
|
|
||||||
vm-images
|
|
||||||
user-guide
|
|
||||||
admin-guide
|
|
||||||
troubleshooting
|
|
||||||
hacking
|
|
||||||
|
|
||||||
Indices and tables
|
|
||||||
==================
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
|
||||||
* :ref:`modindex`
|
|
||||||
* :ref:`search`
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
Introduction
|
|
||||||
============
|
|
||||||
|
|
||||||
ucloud is a modern, IPv6 first virtual machine management system.
|
|
||||||
It is an alternative to `OpenNebula <https://opennebula.org/>`_,
|
|
||||||
`OpenStack <https://www.openstack.org/>`_ or
|
|
||||||
`Cloudstack <https://cloudstack.apache.org/>`_.
|
|
||||||
|
|
||||||
ucloud is the first cloud management system that puts IPv6
|
|
||||||
first. ucloud also has an integral ordering process that we missed in
|
|
||||||
existing solutions.
|
|
||||||
|
|
||||||
The ucloud documentation is separated into various sections for the
|
|
||||||
different use cases:
|
|
||||||
|
|
||||||
* :ref:`The user guide <user-guide>` describes how to use an existing
|
|
||||||
ucloud installation
|
|
||||||
* There are :ref:`setup instructions <setup-install>` which describe on how to setup a new
|
|
||||||
ucloud instance
|
|
||||||
* :ref:`The admin guide <admin-guide>` describe on how to
|
|
||||||
administrate ucloud
|
|
||||||
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
We try to reuse existing components for ucloud. Generally speaking,
|
|
||||||
ucloud consist of a variety of daemons who handle specific tasks and
|
|
||||||
connect to a shared database.
|
|
||||||
|
|
||||||
All interactions with the clients are done through an API.
|
|
||||||
|
|
||||||
ucloud consists of the following components:
|
|
||||||
|
|
||||||
* API
|
|
||||||
* Scheduler
|
|
||||||
* Host
|
|
||||||
* File Scanner
|
|
||||||
* Image Scanner
|
|
||||||
* Metadata Server
|
|
||||||
* VM Init Scripts (dubbed as ucloud-init)How does ucloud work?
|
|
||||||
|
|
||||||
|
|
||||||
Tech Stack
|
|
||||||
----------
|
|
||||||
The following technologies are utilised:
|
|
||||||
|
|
||||||
* Python 3
|
|
||||||
* Flask
|
|
||||||
* QEMU as hypervisor
|
|
||||||
* etcd (key/value store)
|
|
||||||
* radvd for Router Advertisement
|
|
||||||
|
|
||||||
|
|
||||||
Optional components:
|
|
||||||
|
|
||||||
* CEPH for distributed image storage
|
|
||||||
* uotp for user authentication
|
|
||||||
* netbox for IPAM
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
TODO
|
|
||||||
====
|
|
||||||
|
|
||||||
Security
|
|
||||||
--------
|
|
||||||
|
|
||||||
* **Check Authentication:** Nico reported that some endpoints
|
|
||||||
even work without providing token. (e.g ListUserVM)
|
|
||||||
|
|
||||||
Refactoring/Feature
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
* Put overrides for **IMAGE_BASE**, **VM_BASE** in **ImageStorageHandler**.
|
|
||||||
* Expose more details in ListUserFiles.
|
|
||||||
* Throw KeyError instead of returning None when some key is not found in etcd.
|
|
||||||
* Create Network Manager
|
|
||||||
* That would handle tasks like up/down an interface
|
|
||||||
* Create VXLANs, Bridges, TAPs.
|
|
||||||
* Remove them when they are no longer used.
|
|
||||||
|
|
||||||
Reliability
|
|
||||||
-----------
|
|
||||||
|
|
||||||
* What to do if some command hangs forever? e.g CEPH commands
|
|
||||||
:code:`rbd ls ssd` etc. hangs forever if CEPH isn't running
|
|
||||||
or not responding.
|
|
||||||
* What to do if etcd goes down?
|
|
||||||
|
|
||||||
Misc.
|
|
||||||
-----
|
|
||||||
|
|
||||||
* Put "Always use only one StorageHandler"
|
|
||||||
|
|
@ -1,323 +0,0 @@
|
||||||
.. _setup-install:
|
|
||||||
|
|
||||||
Installation of ucloud
|
|
||||||
======================
|
|
||||||
To install ucloud, you will first need to install the requirements and
|
|
||||||
then ucloud itself.
|
|
||||||
|
|
||||||
We describe the installation in x sections:
|
|
||||||
|
|
||||||
* Installation overview
|
|
||||||
* Requirements on Alpine
|
|
||||||
* Installation on Arch Linux
|
|
||||||
|
|
||||||
|
|
||||||
Installation overview
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
ucloud requires the following components to run:
|
|
||||||
|
|
||||||
* python3
|
|
||||||
* an etcd cluster
|
|
||||||
|
|
||||||
|
|
||||||
Installation on Arch Linux
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
In Arch Linux, some packages can be installed from the regular
|
|
||||||
repositories, some packages need to be installed from AUR.
|
|
||||||
|
|
||||||
|
|
||||||
System packages
|
|
||||||
~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
pacman -Syu qemu
|
|
||||||
|
|
||||||
|
|
||||||
AUR packages
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
Use your favorite AUR manager to install the following packages:
|
|
||||||
|
|
||||||
* etcd
|
|
||||||
|
|
||||||
|
|
||||||
Alpine
|
|
||||||
------
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Python Wheel (Binary) Packages does not support Alpine Linux as it is
|
|
||||||
using musl libc instead of glibc. Therefore, expect longer installation
|
|
||||||
times than other linux distributions.
|
|
||||||
|
|
||||||
Enable Edge Repos, Update and Upgrade
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. warning::
|
|
||||||
The below commands would overwrite your repositories sources and
|
|
||||||
upgrade all packages and their dependencies to match those available
|
|
||||||
in edge repos. **So, be warned**
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
cat > /etc/apk/repositories << EOF
|
|
||||||
http://dl-cdn.alpinelinux.org/alpine/edge/main
|
|
||||||
http://dl-cdn.alpinelinux.org/alpine/edge/community
|
|
||||||
http://dl-cdn.alpinelinux.org/alpine/edge/testing
|
|
||||||
EOF
|
|
||||||
|
|
||||||
apk update
|
|
||||||
apk upgrade
|
|
||||||
|
|
||||||
reboot
|
|
||||||
|
|
||||||
|
|
||||||
Install Dependencies
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
.. note::
|
|
||||||
The installation and configuration of a production grade etcd cluster
|
|
||||||
is out of scope of this manual. So, we will install etcd with default
|
|
||||||
configuration.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
apk add git python3 alpine-sdk python3-dev etcd etcd-ctl openntpd \
|
|
||||||
libffi-dev openssl-dev make py3-protobuf py3-tempita chrony
|
|
||||||
|
|
||||||
pip3 install pipenv
|
|
||||||
|
|
||||||
|
|
||||||
**Install QEMU (For Filesystem based Installation)**
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
apk add qemu qemu-system-x86_64 qemu-img
|
|
||||||
|
|
||||||
**Install QEMU/CEPH/radvd (For CEPH based Installation)**
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
$(git clone https://code.ungleich.ch/ahmedbilal/qemu-with-rbd-alpine.git && cd qemu-with-rbd-alpine && apk add apks/*.apk --allow-untrusted)
|
|
||||||
apk add ceph radvd
|
|
||||||
|
|
||||||
Syncronize Date/Time
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
service chronyd start
|
|
||||||
rc-update add chronyd
|
|
||||||
|
|
||||||
|
|
||||||
Start etcd and enable it
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
The following :command:`curl` statement shouldn't be run once
|
|
||||||
etcd is fixed in alpine repos.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
curl https://raw.githubusercontent.com/etcd-io/etcd/release-3.4/etcd.conf.yml.sample -o /etc/etcd/conf.yml
|
|
||||||
service etcd start
|
|
||||||
rc-update add etcd
|
|
||||||
|
|
||||||
|
|
||||||
Install uotp
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
git clone https://code.ungleich.ch/ungleich-public/uotp.git
|
|
||||||
cd uotp
|
|
||||||
mv .env.sample .env
|
|
||||||
|
|
||||||
pipenv --three --site-packages
|
|
||||||
pipenv install
|
|
||||||
pipenv run python app.py
|
|
||||||
|
|
||||||
Run :code:`$(cd scripts && pipenv run python get-admin.py)` to get
|
|
||||||
admin seed. A sample output
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"seed": "FYTVQ72A2CJJ4TB4",
|
|
||||||
"realm": ["ungleich-admin"]
|
|
||||||
}
|
|
||||||
|
|
||||||
Now, run :code:`pipenv run python scripts/create-auth.py FYTVQ72A2CJJ4TB4`
|
|
||||||
(Replace **FYTVQ72A2CJJ4TB4** with your admin seed obtained in previous step).
|
|
||||||
A sample output is as below. It shows seed of auth.
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"message": "Account Created",
|
|
||||||
"name": "auth",
|
|
||||||
"realm": ["ungleich-auth"],
|
|
||||||
"seed": "XZLTUMX26TRAZOXC"
|
|
||||||
}
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Please note both **admin** and **auth** seeds as we would need them in setting up ucloud.
|
|
||||||
|
|
||||||
|
|
||||||
Install and configure ucloud
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
git clone https://code.ungleich.ch/ucloud/ucloud.git
|
|
||||||
cd ucloud
|
|
||||||
|
|
||||||
pipenv --three --site-packages
|
|
||||||
pipenv install
|
|
||||||
|
|
||||||
**Filesystem based Installation**
|
|
||||||
|
|
||||||
You just need to update **AUTH_SEED** in the below code to match your auth's seed.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
mkdir /etc/ucloud
|
|
||||||
|
|
||||||
cat > /etc/ucloud/ucloud.conf << EOF
|
|
||||||
AUTH_NAME=auth
|
|
||||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
|
||||||
AUTH_REALM=ungleich-auth
|
|
||||||
|
|
||||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
|
||||||
|
|
||||||
OTP_SERVER="http://127.0.0.1:8000/"
|
|
||||||
|
|
||||||
ETCD_URL=localhost
|
|
||||||
|
|
||||||
STORAGE_BACKEND=filesystem
|
|
||||||
|
|
||||||
BASE_DIR=/var/www
|
|
||||||
IMAGE_DIR=/var/image
|
|
||||||
VM_DIR=/var/vm
|
|
||||||
|
|
||||||
VM_PREFIX=/v1/vm/
|
|
||||||
HOST_PREFIX=/v1/host/
|
|
||||||
REQUEST_PREFIX=/v1/request/
|
|
||||||
FILE_PREFIX=/v1/file/
|
|
||||||
IMAGE_PREFIX=/v1/image/
|
|
||||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
|
||||||
USER_PREFIX=/v1/user/
|
|
||||||
NETWORK_PREFIX=/v1/network/
|
|
||||||
|
|
||||||
ssh_username=meow
|
|
||||||
ssh_pkey="~/.ssh/id_rsa"
|
|
||||||
|
|
||||||
VXLAN_PHY_DEV="eth0"
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**CEPH based Installation**
|
|
||||||
You need to update the following
|
|
||||||
|
|
||||||
* **AUTH_SEED**
|
|
||||||
* **NETBOX_URL**
|
|
||||||
* **NETBOX_TOKEN**
|
|
||||||
* **PREFIX**
|
|
||||||
* **PREFIX_LENGTH**
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
mkdir /etc/ucloud
|
|
||||||
|
|
||||||
cat > /etc/ucloud/ucloud.conf << EOF
|
|
||||||
AUTH_NAME=auth
|
|
||||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
|
||||||
AUTH_REALM=ungleich-auth
|
|
||||||
|
|
||||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
|
||||||
|
|
||||||
OTP_SERVER="http://127.0.0.1:8000/"
|
|
||||||
|
|
||||||
ETCD_URL=localhost
|
|
||||||
|
|
||||||
STORAGE_BACKEND=ceph
|
|
||||||
|
|
||||||
BASE_DIR=/var/www
|
|
||||||
IMAGE_DIR=/var/image
|
|
||||||
VM_DIR=/var/vm
|
|
||||||
|
|
||||||
VM_PREFIX=/v1/vm/
|
|
||||||
HOST_PREFIX=/v1/host/
|
|
||||||
REQUEST_PREFIX=/v1/request/
|
|
||||||
FILE_PREFIX=/v1/file/
|
|
||||||
IMAGE_PREFIX=/v1/image/
|
|
||||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
|
||||||
USER_PREFIX=/v1/user/
|
|
||||||
NETWORK_PREFIX=/v1/network/
|
|
||||||
|
|
||||||
ssh_username=meow
|
|
||||||
ssh_pkey="~/.ssh/id_rsa"
|
|
||||||
|
|
||||||
VXLAN_PHY_DEV="eth0"
|
|
||||||
|
|
||||||
NETBOX_URL="<url-for-your-netbox-installation>"
|
|
||||||
NETBOX_TOKEN="netbox-token"
|
|
||||||
PREFIX="your-prefix"
|
|
||||||
PREFIX_LENGTH="64"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
Install and configure ucloud-cli
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
git clone https://code.ungleich.ch/ucloud/ucloud-cli.git
|
|
||||||
cd ucloud-cli
|
|
||||||
pipenv --three --site-packages
|
|
||||||
pipenv install
|
|
||||||
|
|
||||||
cat > ~/.ucloud.conf << EOF
|
|
||||||
UCLOUD_API_SERVER=http://localhost:5000
|
|
||||||
EOF
|
|
||||||
|
|
||||||
mkdir /var/www/
|
|
||||||
|
|
||||||
**Only for Filesystem Based Installation**
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
mkdir /var/image/
|
|
||||||
mkdir /var/vm/
|
|
||||||
|
|
||||||
|
|
||||||
Environment Variables and aliases
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To ease usage of ucloud and its various components put the following in
|
|
||||||
your shell profile e.g *~/.profile*
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
export OTP_NAME=admin
|
|
||||||
export OTP_REALM=ungleich-admin
|
|
||||||
export OTP_SEED=FYTVQ72A2CJJ4TB4
|
|
||||||
|
|
||||||
alias ucloud='cd /root/ucloud/ && pipenv run python ucloud.py'
|
|
||||||
alias ucloud-cli='cd /root/ucloud-cli/ && pipenv run python ucloud-cli.py'
|
|
||||||
alias uotp='cd /root/uotp/ && pipenv run python app.py'
|
|
||||||
|
|
||||||
and run :code:`source ~/.profile`
|
|
||||||
|
|
@ -1,98 +0,0 @@
|
||||||
Summary
|
|
||||||
=======
|
|
||||||
|
|
||||||
.. image:: /images/ucloud.svg
|
|
||||||
|
|
||||||
.. code-block::
|
|
||||||
|
|
||||||
<cli>
|
|
||||||
|
|
|
||||||
|
|
|
||||||
|
|
|
||||||
+-------------------------<api>
|
|
||||||
| |
|
|
||||||
| |```````````````|```````````````|
|
|
||||||
| | | |
|
|
||||||
| <file_scanner> <scheduler> <image_scanner>
|
|
||||||
| |
|
|
||||||
| |
|
|
||||||
+-------------------------<host>
|
|
||||||
|
|
|
||||||
|
|
|
||||||
|
|
|
||||||
Virtual Machine------<init>------<metadata>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**ucloud-cli** interact with **ucloud-api** to do the following operations:
|
|
||||||
|
|
||||||
- Create/Delete/Start/Stop/Migrate/Probe (Status of) Virtual Machines
|
|
||||||
- Create/Delete Networks
|
|
||||||
- Add/Get/Delete SSH Keys
|
|
||||||
- Create OS Image out of a file (tracked by file_scanner)
|
|
||||||
- List User's files/networks/vms
|
|
||||||
- Add Host
|
|
||||||
|
|
||||||
ucloud can currently stores OS-Images on
|
|
||||||
|
|
||||||
* File System
|
|
||||||
* `CEPH <https://ceph.io/>`_
|
|
||||||
|
|
||||||
|
|
||||||
**ucloud-api** in turns creates appropriate Requests which are taken
|
|
||||||
by suitable components of ucloud. For Example, if user uses ucloud-cli
|
|
||||||
to create a VM, **ucloud-api** would create a **ScheduleVMRequest** containing
|
|
||||||
things like pointer to VM's entry which have specs, networking
|
|
||||||
configuration of VMs.
|
|
||||||
|
|
||||||
**ucloud-scheduler** accepts requests for VM's scheduling and
|
|
||||||
migration. It finds a host from a list of available host on which
|
|
||||||
the incoming VM can run and schedules it on that host.
|
|
||||||
|
|
||||||
**ucloud-host** runs on host servers i.e servers that
|
|
||||||
actually runs virtual machines, accepts requests
|
|
||||||
intended only for them. It creates/delete/start/stop/migrate
|
|
||||||
virtual machines. It also arrange network resources needed for the
|
|
||||||
incoming VM.
|
|
||||||
|
|
||||||
**ucloud-filescanner** keep tracks of user's files which would be needed
|
|
||||||
later for creating OS Images.
|
|
||||||
|
|
||||||
**ucloud-imagescanner** converts images files from qcow2 format to raw
|
|
||||||
format which would then be imported into image store.
|
|
||||||
|
|
||||||
* In case of **File System**, the converted image would be copied to
|
|
||||||
:file:`/var/image/` or the path referred by :envvar:`IMAGE_PATH`
|
|
||||||
environement variable mentioned in :file:`/etc/ucloud/ucloud.conf`.
|
|
||||||
|
|
||||||
* In case of **CEPH**, the converted image would be imported into
|
|
||||||
specific pool (it depends on the image store in which the image
|
|
||||||
belongs) of CEPH Block Storage.
|
|
||||||
|
|
||||||
**ucloud-metadata** provides metadata which is used to contextualize
|
|
||||||
VMs. When, the VM is created, it is just clone (duplicate) of OS
|
|
||||||
image from which it is created. So, to differentiate between my
|
|
||||||
VM and your VM, the VM need to be contextualized. This works
|
|
||||||
like the following
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
Actually, ucloud-init makes the GET request. You can also try it
|
|
||||||
yourself using curl but ucloud-init does that for yourself.
|
|
||||||
|
|
||||||
* VM make a GET requests http://metadata which resolves to actual
|
|
||||||
address of metadata server. The metadata server looks at the IPv6
|
|
||||||
Address of the requester and extracts the MAC Address which is possible
|
|
||||||
because the IPv6 address is
|
|
||||||
`IPv6 EUI-64 <https://community.cisco.com/t5/networking-documents/understanding-ipv6-eui-64-bit-address/ta-p/3116953>`_.
|
|
||||||
Metadata use this MAC address to find the actual VM to which it belongs
|
|
||||||
and its owner, ssh-keys and much more. Then, metadata return these
|
|
||||||
details back to the calling VM in JSON format. These details are
|
|
||||||
then used be the **ucloud-init** which is explained next.
|
|
||||||
|
|
||||||
**ucloud-init** gets the metadata from **ucloud-metadata** to contextualize
|
|
||||||
the VM. Specifically, it gets owner's ssh keys (or any other keys the
|
|
||||||
owner of VM added to authorized keys for this VM) and put them to ssh
|
|
||||||
server's (installed on VM) authorized keys so that owner can access
|
|
||||||
the VM using ssh. It also install softwares that are needed for correct
|
|
||||||
behavior of VM e.g rdnssd (needed for `SLAAC <https://en.wikipedia.org/wiki/IPv6#Stateless_address_autoconfiguration_(SLAAC)>`_).
|
|
||||||
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
Installation Troubleshooting
|
|
||||||
============================
|
|
||||||
|
|
||||||
etcd doesn't start
|
|
||||||
------------------
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
[root@archlinux ~]# systemctl start etcd
|
|
||||||
Job for etcd.service failed because the control process exited with error code.
|
|
||||||
See "systemctl status etcd.service" and "journalctl -xe" for details
|
|
||||||
|
|
||||||
possible solution
|
|
||||||
~~~~~~~~~~~~~~~~~
|
|
||||||
Try :code:`cat /etc/hosts` if its output contain the following
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
127.0.0.1 localhost.localdomain localhost
|
|
||||||
::1 localhost localhost.localdomain
|
|
||||||
|
|
||||||
|
|
||||||
then unfortunately, we can't help you. But, if it doesn't contain the
|
|
||||||
above you can put the above in :file:`/etc/hosts` to fix the issue.
|
|
||||||
|
|
@ -1,121 +0,0 @@
|
||||||
.. _user-guide:
|
|
||||||
|
|
||||||
User Guide
|
|
||||||
==========
|
|
||||||
|
|
||||||
Create VM
|
|
||||||
---------
|
|
||||||
|
|
||||||
The following command would create a Virtual Machine (name: meow)
|
|
||||||
with following specs
|
|
||||||
|
|
||||||
* CPU: 1
|
|
||||||
* RAM: 1GB
|
|
||||||
* OS-SSD: 4GB
|
|
||||||
* OS: Alpine Linux
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli vm create --vm-name meow --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine
|
|
||||||
|
|
||||||
|
|
||||||
.. _how-to-check-vm-status:
|
|
||||||
|
|
||||||
Check VM Status
|
|
||||||
---------------
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli vm status --vm-name meow
|
|
||||||
|
|
||||||
.. code-block:: json
|
|
||||||
|
|
||||||
{
|
|
||||||
"hostname": "/v1/host/74c21c332f664972bf5078e8de080eea",
|
|
||||||
"image_uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda",
|
|
||||||
"in_migration": null,
|
|
||||||
"log": [
|
|
||||||
"2019-11-12T09:11:09.800798 - Started successfully"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"ssh-keys": []
|
|
||||||
},
|
|
||||||
"name": "meow",
|
|
||||||
"network": [],
|
|
||||||
"owner": "admin",
|
|
||||||
"owner_realm": "ungleich-admin",
|
|
||||||
"specs": {
|
|
||||||
"cpu": 1,
|
|
||||||
"hdd": [],
|
|
||||||
"os-ssd": "4.0 GB",
|
|
||||||
"ram": "1.0 GB"
|
|
||||||
},
|
|
||||||
"status": "RUNNING",
|
|
||||||
"vnc_socket": "/tmp/tmpj1k6sdo_"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Connect to VM using VNC
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
We would need **socat** utility and a remote desktop client
|
|
||||||
e.g Remmina, KRDC etc. We can get the vnc socket path by getting
|
|
||||||
its status, see :ref:`how-to-check-vm-status`.
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
socat TCP-LISTEN:1234,reuseaddr,fork UNIX-CLIENT:/tmp/tmpj1k6sdo_
|
|
||||||
|
|
||||||
|
|
||||||
Then, launch your remote desktop client and connect to vnc://localhost:1234.
|
|
||||||
|
|
||||||
Create Network
|
|
||||||
--------------
|
|
||||||
|
|
||||||
Layer 2 Network with sample IPv6 range fd00::/64 (without IPAM and routing)
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli network create --network-name mynet --network-type vxlan
|
|
||||||
|
|
||||||
|
|
||||||
Layer 2 Network with /64 network with automatic IPAM
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli network create --network-name mynet --network-type vxlan --user True
|
|
||||||
|
|
||||||
Attach Network to VM
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
Currently, user can only attach network to his/her VM at
|
|
||||||
the time of creation. A sample command to create VM with
|
|
||||||
a network is as follow
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli vm create --vm-name meow2 --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine --network mynet
|
|
||||||
|
|
||||||
.. _get-list-of-hosts:
|
|
||||||
|
|
||||||
Get List of Hosts
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli host list
|
|
||||||
|
|
||||||
|
|
||||||
Migrate VM
|
|
||||||
----------
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
ucloud-cli vm migrate --vm-name meow --destination server1.place10
|
|
||||||
|
|
||||||
|
|
||||||
.. option:: --destination
|
|
||||||
|
|
||||||
The name of destination host. You can find a list of host
|
|
||||||
using :ref:`get-list-of-hosts`
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
How to create VM images for ucloud
|
|
||||||
==================================
|
|
||||||
|
|
||||||
Overview
|
|
||||||
---------
|
|
||||||
|
|
||||||
ucloud tries to be least invasise towards VMs and only require
|
|
||||||
strictly necessary changes for running in a virtualised
|
|
||||||
environment. This includes configurations for:
|
|
||||||
|
|
||||||
* Configuring the network
|
|
||||||
* Managing access via ssh keys
|
|
||||||
* Resizing the attached disk(s)
|
|
||||||
|
|
||||||
|
|
||||||
Network configuration
|
|
||||||
---------------------
|
|
||||||
All VMs in ucloud are required to support IPv6. The primary network
|
|
||||||
configuration is always done using SLAAC. A VM thus needs only to be
|
|
||||||
configured to
|
|
||||||
|
|
||||||
* accept router advertisements on all network interfaces
|
|
||||||
* use the router advertisements to configure the network interfaces
|
|
||||||
* accept the DNS entries from the router advertisements
|
|
||||||
|
|
||||||
|
|
||||||
Configuring SSH keys
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
|
||||||
|
|
||||||
To accept ssh keys in your VM, request the URL
|
|
||||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
|
||||||
**authorized_keys** file. Below you find sample code to accomplish
|
|
||||||
this task:
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
tmp=$(mktemp)
|
|
||||||
curl -s http://metadata/ssk_keys > "$tmp"
|
|
||||||
touch ~/.ssh/authorized_keys # ensure it exists
|
|
||||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
|
||||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
|
||||||
|
|
||||||
|
|
||||||
Disk resize
|
|
||||||
-----------
|
|
||||||
In virtualised environments, the disk sizes might grow. The operating
|
|
||||||
system should detect disks that are bigger than the existing partition
|
|
||||||
table and resize accordingly. This task is os specific.
|
|
||||||
|
|
||||||
ucloud does not support shrinking disks due to the complexity and
|
|
||||||
intra OS dependencies.
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
||||||
VM images
|
|
||||||
==================================
|
|
||||||
|
|
||||||
Overview
|
|
||||||
---------
|
|
||||||
|
|
||||||
ucloud tries to be least invasise towards VMs and only require
|
|
||||||
strictly necessary changes for running in a virtualised
|
|
||||||
environment. This includes configurations for:
|
|
||||||
|
|
||||||
* Configuring the network
|
|
||||||
* Managing access via ssh keys
|
|
||||||
* Resizing the attached disk(s)
|
|
||||||
|
|
||||||
Upstream images
|
|
||||||
---------------
|
|
||||||
|
|
||||||
The 'official' uncloud images are defined in the `uncloud/images
|
|
||||||
<https://code.ungleich.ch/uncloud/images>`_ repository.
|
|
||||||
|
|
||||||
How to make you own Uncloud images
|
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
It is fairly easy to create your own images for uncloud, as the common
|
|
||||||
operations (which are detailed below) can be automatically handled by the
|
|
||||||
`uncloud/uncloud-init <https://code.ungleich.ch/uncloud/uncloud-init>`_ tool.
|
|
||||||
|
|
||||||
Network configuration
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
All VMs in ucloud are required to support IPv6. The primary network
|
|
||||||
configuration is always done using SLAAC. A VM thus needs only to be
|
|
||||||
configured to
|
|
||||||
|
|
||||||
* accept router advertisements on all network interfaces
|
|
||||||
* use the router advertisements to configure the network interfaces
|
|
||||||
* accept the DNS entries from the router advertisements
|
|
||||||
|
|
||||||
|
|
||||||
Configuring SSH keys
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
|
||||||
|
|
||||||
To accept ssh keys in your VM, request the URL
|
|
||||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
|
||||||
**authorized_keys** file. Below you find sample code to accomplish
|
|
||||||
this task:
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
tmp=$(mktemp)
|
|
||||||
curl -s http://metadata/ssk_keys > "$tmp"
|
|
||||||
touch ~/.ssh/authorized_keys # ensure it exists
|
|
||||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
|
||||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
|
||||||
|
|
||||||
|
|
||||||
Disk resize
|
|
||||||
~~~~~~~~~~~
|
|
||||||
In virtualised environments, the disk sizes might grow. The operating
|
|
||||||
system should detect disks that are bigger than the existing partition
|
|
||||||
table and resize accordingly. This task is os specific.
|
|
||||||
|
|
||||||
ucloud does not support shrinking disks due to the complexity and
|
|
||||||
intra OS dependencies.
|
|
||||||
|
|
@ -1,89 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
import importlib
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
|
|
||||||
from etcd3.exceptions import ConnectionFailedError
|
|
||||||
|
|
||||||
from uncloud.common import settings
|
|
||||||
from uncloud import UncloudException
|
|
||||||
from uncloud.common.cli import resolve_otp_credentials
|
|
||||||
|
|
||||||
# Components that use etcd
|
|
||||||
ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
|
|
||||||
'imagescanner', 'metadata', 'configure', 'hack']
|
|
||||||
|
|
||||||
ALL_COMPONENTS = ETCD_COMPONENTS.copy()
|
|
||||||
ALL_COMPONENTS.append('oneshot')
|
|
||||||
#ALL_COMPONENTS.append('cli')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
arg_parser = argparse.ArgumentParser()
|
|
||||||
subparsers = arg_parser.add_subparsers(dest='command')
|
|
||||||
|
|
||||||
parent_parser = argparse.ArgumentParser(add_help=False)
|
|
||||||
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
|
|
||||||
help='More verbose logging')
|
|
||||||
parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
|
|
||||||
default=os.path.expanduser('~/uncloud'))
|
|
||||||
|
|
||||||
etcd_parser = argparse.ArgumentParser(add_help=False)
|
|
||||||
etcd_parser.add_argument('--etcd-host')
|
|
||||||
etcd_parser.add_argument('--etcd-port')
|
|
||||||
etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
|
|
||||||
etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
|
|
||||||
etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
|
|
||||||
|
|
||||||
for component in ALL_COMPONENTS:
|
|
||||||
mod = importlib.import_module('uncloud.{}.main'.format(component))
|
|
||||||
parser = getattr(mod, 'arg_parser')
|
|
||||||
|
|
||||||
if component in ETCD_COMPONENTS:
|
|
||||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
|
|
||||||
else:
|
|
||||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
|
|
||||||
|
|
||||||
arguments = vars(arg_parser.parse_args())
|
|
||||||
etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
|
|
||||||
etcd_arguments = {
|
|
||||||
'etcd': {
|
|
||||||
key.replace('etcd_', ''): arguments[key]
|
|
||||||
for key in etcd_arguments
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if not arguments['command']:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
# Initializing Settings and resolving otp_credentials
|
|
||||||
# It is neccessary to resolve_otp_credentials after argument parsing is done because
|
|
||||||
# previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
|
|
||||||
# providing the default values for --name, --realm and --seed arguments from the values
|
|
||||||
# we read from file. But, now we are asking user about where the config file lives. So,
|
|
||||||
# to providing default value is not possible before parsing arguments. So, we are doing
|
|
||||||
# it after..
|
|
||||||
# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
|
|
||||||
# resolve_otp_credentials(arguments)
|
|
||||||
|
|
||||||
name = arguments.pop('command')
|
|
||||||
mod = importlib.import_module('uncloud.{}.main'.format(name))
|
|
||||||
main = getattr(mod, 'main')
|
|
||||||
|
|
||||||
if arguments['debug']:
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
log = logging.getLogger()
|
|
||||||
|
|
||||||
try:
|
|
||||||
main(arguments)
|
|
||||||
except UncloudException as err:
|
|
||||||
log.error(err)
|
|
||||||
sys.exit(1)
|
|
||||||
# except ConnectionFailedError as err:
|
|
||||||
# log.error('Cannot connect to etcd: {}'.format(err))
|
|
||||||
except Exception as err:
|
|
||||||
log.exception(err)
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
import os
|
|
||||||
|
|
||||||
from setuptools import setup, find_packages
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
|
||||||
long_description = fh.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
import uncloud.version
|
|
||||||
|
|
||||||
version = uncloud.version.VERSION
|
|
||||||
except:
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
c = subprocess.check_output(["git", "describe"])
|
|
||||||
version = c.decode("utf-8").strip()
|
|
||||||
|
|
||||||
|
|
||||||
setup(
|
|
||||||
name="uncloud",
|
|
||||||
version=version,
|
|
||||||
description="uncloud cloud management",
|
|
||||||
url="https://code.ungleich.ch/uncloud/uncloud",
|
|
||||||
long_description=long_description,
|
|
||||||
long_description_content_type="text/markdown",
|
|
||||||
classifiers=[
|
|
||||||
"Development Status :: 3 - Alpha",
|
|
||||||
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
|
||||||
"Programming Language :: Python :: 3",
|
|
||||||
],
|
|
||||||
author="ungleich",
|
|
||||||
author_email="technik@ungleich.ch",
|
|
||||||
packages=find_packages(),
|
|
||||||
install_requires=[
|
|
||||||
"requests",
|
|
||||||
"Flask>=1.1.1",
|
|
||||||
"flask-restful",
|
|
||||||
"bitmath",
|
|
||||||
"pyotp",
|
|
||||||
"pynetbox",
|
|
||||||
"colorama",
|
|
||||||
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
|
|
||||||
"marshmallow",
|
|
||||||
"ldap3"
|
|
||||||
],
|
|
||||||
scripts=["scripts/uncloud"],
|
|
||||||
data_files=[
|
|
||||||
(os.path.expanduser("~/uncloud/"), ["conf/uncloud.conf"])
|
|
||||||
],
|
|
||||||
zip_safe=False,
|
|
||||||
)
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
import unittest
|
|
||||||
from unittest.mock import Mock
|
|
||||||
|
|
||||||
from uncloud.hack.mac import MAC
|
|
||||||
from uncloud import UncloudException
|
|
||||||
|
|
||||||
class TestMacLocal(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.config = Mock()
|
|
||||||
self.config.arguments = {"no_db":True}
|
|
||||||
self.mac = MAC(self.config)
|
|
||||||
self.mac.create()
|
|
||||||
|
|
||||||
def testMacInt(self):
|
|
||||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong first MAC index")
|
|
||||||
|
|
||||||
def testMacRepr(self):
|
|
||||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong first MAC index")
|
|
||||||
|
|
||||||
def testMacStr(self):
|
|
||||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong first MAC index")
|
|
||||||
|
|
||||||
def testValidationRaise(self):
|
|
||||||
with self.assertRaises(UncloudException):
|
|
||||||
self.mac.validate_mac("2")
|
|
||||||
|
|
||||||
def testValidation(self):
|
|
||||||
self.assertTrue(self.mac.validate_mac("42:00:00:00:00:01"), "Validation of a given MAC not working properly")
|
|
||||||
|
|
||||||
def testNextMAC(self):
|
|
||||||
self.mac.create()
|
|
||||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong second MAC index")
|
|
||||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong second MAC index")
|
|
||||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong second MAC index")
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
class UncloudException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
# ucloud-api
|
|
||||||
[](https://www.repostatus.org/#wip)
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
**Make sure you have Python >= 3.5 and Pipenv installed.**
|
|
||||||
|
|
||||||
1. Clone the repository and `cd` into it.
|
|
||||||
2. Run the following commands
|
|
||||||
- `pipenv install`
|
|
||||||
- `pipenv shell`
|
|
||||||
- `python main.py`
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
import os
|
|
||||||
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
|
|
||||||
class Optional:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Field:
|
|
||||||
def __init__(self, _name, _type, _value=None):
|
|
||||||
self.name = _name
|
|
||||||
self.value = _value
|
|
||||||
self.type = _type
|
|
||||||
self.__errors = []
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def is_valid(self):
|
|
||||||
if self.value == KeyError:
|
|
||||||
self.add_error(
|
|
||||||
"'{}' field is a required field".format(self.name)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if isinstance(self.value, Optional):
|
|
||||||
pass
|
|
||||||
elif not isinstance(self.value, self.type):
|
|
||||||
self.add_error(
|
|
||||||
"Incorrect Type for '{}' field".format(self.name)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.validation()
|
|
||||||
|
|
||||||
if self.__errors:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_errors(self):
|
|
||||||
return self.__errors
|
|
||||||
|
|
||||||
def add_error(self, error):
|
|
||||||
self.__errors.append(error)
|
|
||||||
|
|
||||||
|
|
||||||
class VmUUIDField(Field):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.uuid = data.get("uuid", KeyError)
|
|
||||||
|
|
||||||
super().__init__("uuid", str, self.uuid)
|
|
||||||
|
|
||||||
self.validation = self.vm_uuid_validation
|
|
||||||
|
|
||||||
def vm_uuid_validation(self):
|
|
||||||
r = shared.etcd_client.get(
|
|
||||||
os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
|
|
||||||
)
|
|
||||||
if not r:
|
|
||||||
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'is_public': True,
|
|
||||||
'type': 'ceph',
|
|
||||||
'name': 'images',
|
|
||||||
'description': 'first ever public image-store',
|
|
||||||
'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
|
||||||
}
|
|
||||||
|
|
||||||
shared.etcd_client.put(
|
|
||||||
os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
|
|
||||||
json.dumps(data),
|
|
||||||
)
|
|
||||||
|
|
@ -1,148 +0,0 @@
|
||||||
import binascii
|
|
||||||
import ipaddress
|
|
||||||
import random
|
|
||||||
import logging
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from pyotp import TOTP
|
|
||||||
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def check_otp(name, realm, token):
|
|
||||||
try:
|
|
||||||
data = {
|
|
||||||
"auth_name": shared.settings["otp"]["auth_name"],
|
|
||||||
"auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
|
|
||||||
"auth_realm": shared.settings["otp"]["auth_realm"],
|
|
||||||
"name": name,
|
|
||||||
"realm": realm,
|
|
||||||
"token": token,
|
|
||||||
}
|
|
||||||
except binascii.Error as err:
|
|
||||||
logger.error(
|
|
||||||
"Cannot compute OTP for seed: {}".format(
|
|
||||||
shared.settings["otp"]["auth_seed"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return 400
|
|
||||||
|
|
||||||
response = requests.post(
|
|
||||||
shared.settings["otp"]["verification_controller_url"], json=data
|
|
||||||
)
|
|
||||||
return response.status_code
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_vm_name(name, owner):
|
|
||||||
"""Return UUID of Virtual Machine of name == name and owner == owner
|
|
||||||
|
|
||||||
Input: name of vm, owner of vm.
|
|
||||||
Output: uuid of vm if found otherwise None
|
|
||||||
"""
|
|
||||||
result = next(
|
|
||||||
filter(
|
|
||||||
lambda vm: vm.value["owner"] == owner
|
|
||||||
and vm.value["name"] == name,
|
|
||||||
shared.vm_pool.vms,
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if result:
|
|
||||||
return result.key.split("/")[-1]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_image_name(name, etcd_client):
|
|
||||||
"""Return image uuid given its name and its store
|
|
||||||
|
|
||||||
* If the provided name is not in correct format
|
|
||||||
i.e {store_name}:{image_name} return ValueError
|
|
||||||
* If no such image found then return KeyError
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
seperator = ":"
|
|
||||||
|
|
||||||
# Ensure, user/program passed valid name that is of type string
|
|
||||||
try:
|
|
||||||
store_name_and_image_name = name.split(seperator)
|
|
||||||
|
|
||||||
"""
|
|
||||||
Examples, where it would work and where it would raise exception
|
|
||||||
"images:alpine" --> ["images", "alpine"]
|
|
||||||
|
|
||||||
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
|
||||||
|
|
||||||
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
|
||||||
as too many values to unpack
|
|
||||||
"""
|
|
||||||
store_name, image_name = store_name_and_image_name
|
|
||||||
except Exception:
|
|
||||||
raise ValueError(
|
|
||||||
"Image name not in correct format i.e {store_name}:{image_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
images = etcd_client.get_prefix(
|
|
||||||
shared.settings["etcd"]["image_prefix"], value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try to find image with name == image_name and store_name == store_name
|
|
||||||
try:
|
|
||||||
image = next(
|
|
||||||
filter(
|
|
||||||
lambda im: im.value["name"] == image_name
|
|
||||||
and im.value["store_name"] == store_name,
|
|
||||||
images,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except StopIteration:
|
|
||||||
raise KeyError("No image with name {} found.".format(name))
|
|
||||||
else:
|
|
||||||
image_uuid = image.key.split("/")[-1]
|
|
||||||
|
|
||||||
return image_uuid
|
|
||||||
|
|
||||||
|
|
||||||
def random_bytes(num=6):
|
|
||||||
return [random.randrange(256) for _ in range(num)]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
|
|
||||||
mac = random_bytes()
|
|
||||||
if oui:
|
|
||||||
if type(oui) == str:
|
|
||||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
|
||||||
mac = oui + random_bytes(num=6 - len(oui))
|
|
||||||
else:
|
|
||||||
if multicast:
|
|
||||||
mac[0] |= 1 # set bit 0
|
|
||||||
else:
|
|
||||||
mac[0] &= ~1 # clear bit 0
|
|
||||||
if uaa:
|
|
||||||
mac[0] &= ~(1 << 1) # clear bit 1
|
|
||||||
else:
|
|
||||||
mac[0] |= 1 << 1 # set bit 1
|
|
||||||
return separator.join(byte_fmt % b for b in mac)
|
|
||||||
|
|
||||||
|
|
||||||
def mac2ipv6(mac, prefix):
|
|
||||||
# only accept MACs separated by a colon
|
|
||||||
parts = mac.split(":")
|
|
||||||
|
|
||||||
# modify parts to match IPv6 value
|
|
||||||
parts.insert(3, "ff")
|
|
||||||
parts.insert(4, "fe")
|
|
||||||
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
|
|
||||||
|
|
||||||
# format output
|
|
||||||
ipv6_parts = [str(0)] * 4
|
|
||||||
for i in range(0, len(parts), 2):
|
|
||||||
ipv6_parts.append("".join(parts[i : i + 2]))
|
|
||||||
|
|
||||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
|
||||||
prefix = ipaddress.IPv6Address(prefix)
|
|
||||||
return str(prefix + int(lower_part))
|
|
||||||
|
|
||||||
|
|
@ -1,600 +0,0 @@
|
||||||
import json
|
|
||||||
import pynetbox
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
from flask import Flask, request
|
|
||||||
from flask_restful import Resource, Api
|
|
||||||
from werkzeug.exceptions import HTTPException
|
|
||||||
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
from uncloud.common import counters
|
|
||||||
from uncloud.common.vm import VMStatus
|
|
||||||
from uncloud.common.request import RequestEntry, RequestType
|
|
||||||
from uncloud.api import schemas
|
|
||||||
from uncloud.api.helper import generate_mac, mac2ipv6
|
|
||||||
from uncloud import UncloudException
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
api = Api(app)
|
|
||||||
app.logger.handlers.clear()
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('api', add_help=False)
|
|
||||||
arg_parser.add_argument('--port', '-p')
|
|
||||||
|
|
||||||
|
|
||||||
@app.errorhandler(Exception)
|
|
||||||
def handle_exception(e):
|
|
||||||
app.logger.error(e)
|
|
||||||
# pass through HTTP errors
|
|
||||||
if isinstance(e, HTTPException):
|
|
||||||
return e
|
|
||||||
|
|
||||||
# now you're handling non-HTTP exceptions only
|
|
||||||
return {'message': 'Server Error'}, 500
|
|
||||||
|
|
||||||
|
|
||||||
class CreateVM(Resource):
|
|
||||||
"""API Request to Handle Creation of VM"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateVMSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
vm_uuid = uuid4().hex
|
|
||||||
vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
|
|
||||||
specs = {
|
|
||||||
'cpu': validator.specs['cpu'],
|
|
||||||
'ram': validator.specs['ram'],
|
|
||||||
'os-ssd': validator.specs['os-ssd'],
|
|
||||||
'hdd': validator.specs['hdd'],
|
|
||||||
}
|
|
||||||
macs = [generate_mac() for _ in range(len(data['network']))]
|
|
||||||
tap_ids = [
|
|
||||||
counters.increment_etcd_counter(
|
|
||||||
shared.etcd_client, shared.settings['etcd']['tap_counter']
|
|
||||||
)
|
|
||||||
for _ in range(len(data['network']))
|
|
||||||
]
|
|
||||||
vm_entry = {
|
|
||||||
'name': data['vm_name'],
|
|
||||||
'owner': data['name'],
|
|
||||||
'owner_realm': data['realm'],
|
|
||||||
'specs': specs,
|
|
||||||
'hostname': '',
|
|
||||||
'status': VMStatus.stopped,
|
|
||||||
'image_uuid': validator.image_uuid,
|
|
||||||
'log': [],
|
|
||||||
'vnc_socket': '',
|
|
||||||
'network': list(zip(data['network'], macs, tap_ids)),
|
|
||||||
'metadata': {'ssh-keys': []},
|
|
||||||
'in_migration': False,
|
|
||||||
}
|
|
||||||
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
|
||||||
|
|
||||||
# Create ScheduleVM Request
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.ScheduleVM,
|
|
||||||
uuid=vm_uuid,
|
|
||||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
|
||||||
)
|
|
||||||
shared.request_pool.put(r)
|
|
||||||
|
|
||||||
return {'message': 'VM Creation Queued'}, 200
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class VmStatus(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VMStatusSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
vm = shared.vm_pool.get(
|
|
||||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
|
||||||
)
|
|
||||||
vm_value = vm.value.copy()
|
|
||||||
vm_value['ip'] = []
|
|
||||||
for network_mac_and_tap in vm.network:
|
|
||||||
network_name, mac, tap = network_mac_and_tap
|
|
||||||
network = shared.etcd_client.get(
|
|
||||||
join_path(
|
|
||||||
shared.settings['etcd']['network_prefix'],
|
|
||||||
data['name'],
|
|
||||||
network_name,
|
|
||||||
),
|
|
||||||
value_in_json=True,
|
|
||||||
)
|
|
||||||
ipv6_addr = (
|
|
||||||
network.value.get('ipv6').split('::')[0] + '::'
|
|
||||||
)
|
|
||||||
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
|
|
||||||
vm.value = vm_value
|
|
||||||
return vm.value
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateImage(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateImageSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
file_entry = shared.etcd_client.get(
|
|
||||||
join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
|
|
||||||
)
|
|
||||||
file_entry_value = json.loads(file_entry.value)
|
|
||||||
|
|
||||||
image_entry_json = {
|
|
||||||
'status': 'TO_BE_CREATED',
|
|
||||||
'owner': file_entry_value['owner'],
|
|
||||||
'filename': file_entry_value['filename'],
|
|
||||||
'name': data['name'],
|
|
||||||
'store_name': data['image_store'],
|
|
||||||
'visibility': 'public',
|
|
||||||
}
|
|
||||||
shared.etcd_client.put(
|
|
||||||
join_path(
|
|
||||||
shared.settings['etcd']['image_prefix'], data['uuid']
|
|
||||||
),
|
|
||||||
json.dumps(image_entry_json),
|
|
||||||
)
|
|
||||||
|
|
||||||
return {'message': 'Image queued for creation.'}
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListPublicImages(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
images = shared.etcd_client.get_prefix(
|
|
||||||
shared.settings['etcd']['image_prefix'], value_in_json=True
|
|
||||||
)
|
|
||||||
r = {'images': []}
|
|
||||||
for image in images:
|
|
||||||
image_key = '{}:{}'.format(
|
|
||||||
image.value['store_name'], image.value['name']
|
|
||||||
)
|
|
||||||
r['images'].append(
|
|
||||||
{'name': image_key, 'status': image.value['status']}
|
|
||||||
)
|
|
||||||
return r, 200
|
|
||||||
|
|
||||||
|
|
||||||
class VMAction(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VmActionSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vm_entry = shared.vm_pool.get(
|
|
||||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
|
||||||
)
|
|
||||||
action = data['action']
|
|
||||||
|
|
||||||
if action == 'start':
|
|
||||||
action = 'schedule'
|
|
||||||
|
|
||||||
if action == 'delete' and vm_entry.hostname == '':
|
|
||||||
if shared.storage_handler.is_vm_image_exists(
|
|
||||||
vm_entry.uuid
|
|
||||||
):
|
|
||||||
r_status = shared.storage_handler.delete_vm_image(
|
|
||||||
vm_entry.uuid
|
|
||||||
)
|
|
||||||
if r_status:
|
|
||||||
shared.etcd_client.client.delete(vm_entry.key)
|
|
||||||
return {'message': 'VM successfully deleted'}
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
'Some Error Occurred while deleting VM'
|
|
||||||
)
|
|
||||||
return {'message': 'VM deletion unsuccessfull'}
|
|
||||||
else:
|
|
||||||
shared.etcd_client.client.delete(vm_entry.key)
|
|
||||||
return {'message': 'VM successfully deleted'}
|
|
||||||
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type='{}VM'.format(action.title()),
|
|
||||||
uuid=data['uuid'],
|
|
||||||
hostname=vm_entry.hostname,
|
|
||||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
|
||||||
)
|
|
||||||
shared.request_pool.put(r)
|
|
||||||
return (
|
|
||||||
{'message': 'VM {} Queued'.format(action.title())},
|
|
||||||
200,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class VMMigration(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.VmMigrationSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vm = shared.vm_pool.get(data['uuid'])
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.InitVMMigration,
|
|
||||||
uuid=vm.uuid,
|
|
||||||
hostname=join_path(
|
|
||||||
shared.settings['etcd']['host_prefix'],
|
|
||||||
validator.destination.value,
|
|
||||||
),
|
|
||||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
|
||||||
)
|
|
||||||
|
|
||||||
shared.request_pool.put(r)
|
|
||||||
return (
|
|
||||||
{'message': 'VM Migration Initialization Queued'},
|
|
||||||
200,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserVM(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
vms = shared.etcd_client.get_prefix(
|
|
||||||
shared.settings['etcd']['vm_prefix'], value_in_json=True
|
|
||||||
)
|
|
||||||
return_vms = []
|
|
||||||
user_vms = filter(
|
|
||||||
lambda v: v.value['owner'] == data['name'], vms
|
|
||||||
)
|
|
||||||
for vm in user_vms:
|
|
||||||
return_vms.append(
|
|
||||||
{
|
|
||||||
'name': vm.value['name'],
|
|
||||||
'vm_uuid': vm.key.split('/')[-1],
|
|
||||||
'specs': vm.value['specs'],
|
|
||||||
'status': vm.value['status'],
|
|
||||||
'hostname': vm.value['hostname'],
|
|
||||||
'vnc_socket': vm.value.get('vnc_socket', None),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if return_vms:
|
|
||||||
return {'message': return_vms}, 200
|
|
||||||
return {'message': 'No VM found'}, 404
|
|
||||||
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserFiles(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
files = shared.etcd_client.get_prefix(
|
|
||||||
shared.settings['etcd']['file_prefix'], value_in_json=True
|
|
||||||
)
|
|
||||||
return_files = []
|
|
||||||
user_files = [f for f in files if f.value['owner'] == data['name']]
|
|
||||||
for file in user_files:
|
|
||||||
file_uuid = file.key.split('/')[-1]
|
|
||||||
file = file.value
|
|
||||||
file['uuid'] = file_uuid
|
|
||||||
|
|
||||||
file.pop('sha512sum', None)
|
|
||||||
file.pop('owner', None)
|
|
||||||
|
|
||||||
return_files.append(file)
|
|
||||||
return {'message': return_files}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateHost(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateHostSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
host_key = join_path(
|
|
||||||
shared.settings['etcd']['host_prefix'], uuid4().hex
|
|
||||||
)
|
|
||||||
host_entry = {
|
|
||||||
'specs': data['specs'],
|
|
||||||
'hostname': data['hostname'],
|
|
||||||
'status': 'DEAD',
|
|
||||||
'last_heartbeat': '',
|
|
||||||
}
|
|
||||||
shared.etcd_client.put(
|
|
||||||
host_key, host_entry, value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
return {'message': 'Host Created'}, 200
|
|
||||||
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListHost(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def get():
|
|
||||||
hosts = shared.host_pool.hosts
|
|
||||||
r = {
|
|
||||||
host.key: {
|
|
||||||
'status': host.status,
|
|
||||||
'specs': host.specs,
|
|
||||||
'hostname': host.hostname,
|
|
||||||
}
|
|
||||||
for host in hosts
|
|
||||||
}
|
|
||||||
return r, 200
|
|
||||||
|
|
||||||
|
|
||||||
class GetSSHKeys(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.GetSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
if not validator.key_name.value:
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/
|
|
||||||
etcd_key = join_path(
|
|
||||||
shared.settings['etcd']['user_prefix'],
|
|
||||||
data['realm'],
|
|
||||||
data['name'],
|
|
||||||
'key',
|
|
||||||
)
|
|
||||||
etcd_entry = shared.etcd_client.get_prefix(
|
|
||||||
etcd_key, value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
keys = {
|
|
||||||
key.key.split('/')[-1]: key.value
|
|
||||||
for key in etcd_entry
|
|
||||||
}
|
|
||||||
return {'keys': keys}
|
|
||||||
else:
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
shared.settings['etcd']['user_prefix'],
|
|
||||||
data['realm'],
|
|
||||||
data['name'],
|
|
||||||
'key',
|
|
||||||
data['key_name'],
|
|
||||||
)
|
|
||||||
etcd_entry = shared.etcd_client.get(
|
|
||||||
etcd_key, value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
if etcd_entry:
|
|
||||||
return {
|
|
||||||
'keys': {
|
|
||||||
etcd_entry.key.split('/')[
|
|
||||||
-1
|
|
||||||
]: etcd_entry.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
return {'keys': {}}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class AddSSHKey(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.AddSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
shared.settings['etcd']['user_prefix'],
|
|
||||||
data['realm'],
|
|
||||||
data['name'],
|
|
||||||
'key',
|
|
||||||
data['key_name'],
|
|
||||||
)
|
|
||||||
etcd_entry = shared.etcd_client.get(
|
|
||||||
etcd_key, value_in_json=True
|
|
||||||
)
|
|
||||||
if etcd_entry:
|
|
||||||
return {
|
|
||||||
'message': 'Key with name "{}" already exists'.format(
|
|
||||||
data['key_name']
|
|
||||||
)
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
# Key Not Found. It implies user' haven't added any key yet.
|
|
||||||
shared.etcd_client.put(
|
|
||||||
etcd_key, data['key'], value_in_json=True
|
|
||||||
)
|
|
||||||
return {'message': 'Key added successfully'}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class RemoveSSHKey(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.RemoveSSHSchema(data)
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
|
||||||
etcd_key = join_path(
|
|
||||||
shared.settings['etcd']['user_prefix'],
|
|
||||||
data['realm'],
|
|
||||||
data['name'],
|
|
||||||
'key',
|
|
||||||
data['key_name'],
|
|
||||||
)
|
|
||||||
etcd_entry = shared.etcd_client.get(
|
|
||||||
etcd_key, value_in_json=True
|
|
||||||
)
|
|
||||||
if etcd_entry:
|
|
||||||
shared.etcd_client.client.delete(etcd_key)
|
|
||||||
return {'message': 'Key successfully removed.'}
|
|
||||||
else:
|
|
||||||
return {
|
|
||||||
'message': 'No Key with name "{}" Exists at all.'.format(
|
|
||||||
data['key_name']
|
|
||||||
)
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class CreateNetwork(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.CreateNetwork(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
|
|
||||||
network_entry = {
|
|
||||||
'id': counters.increment_etcd_counter(
|
|
||||||
shared.etcd_client, shared.settings['etcd']['vxlan_counter']
|
|
||||||
),
|
|
||||||
'type': data['type'],
|
|
||||||
}
|
|
||||||
if validator.user.value:
|
|
||||||
try:
|
|
||||||
nb = pynetbox.api(
|
|
||||||
url=shared.settings['netbox']['url'],
|
|
||||||
token=shared.settings['netbox']['token'],
|
|
||||||
)
|
|
||||||
nb_prefix = nb.ipam.prefixes.get(
|
|
||||||
prefix=shared.settings['network']['prefix']
|
|
||||||
)
|
|
||||||
prefix = nb_prefix.available_prefixes.create(
|
|
||||||
data={
|
|
||||||
'prefix_length': int(
|
|
||||||
shared.settings['network']['prefix_length']
|
|
||||||
),
|
|
||||||
'description': '{}\'s network "{}"'.format(
|
|
||||||
data['name'], data['network_name']
|
|
||||||
),
|
|
||||||
'is_pool': True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
except Exception as err:
|
|
||||||
app.logger.error(err)
|
|
||||||
return {
|
|
||||||
'message': 'Error occured while creating network.'
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
network_entry['ipv6'] = prefix['prefix']
|
|
||||||
else:
|
|
||||||
network_entry['ipv6'] = 'fd00::/64'
|
|
||||||
|
|
||||||
network_key = join_path(
|
|
||||||
shared.settings['etcd']['network_prefix'],
|
|
||||||
data['name'],
|
|
||||||
data['network_name'],
|
|
||||||
)
|
|
||||||
shared.etcd_client.put(
|
|
||||||
network_key, network_entry, value_in_json=True
|
|
||||||
)
|
|
||||||
return {'message': 'Network successfully added.'}
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
class ListUserNetwork(Resource):
|
|
||||||
@staticmethod
|
|
||||||
def post():
|
|
||||||
data = request.json
|
|
||||||
validator = schemas.OTPSchema(data)
|
|
||||||
|
|
||||||
if validator.is_valid():
|
|
||||||
prefix = join_path(
|
|
||||||
shared.settings['etcd']['network_prefix'], data['name']
|
|
||||||
)
|
|
||||||
networks = shared.etcd_client.get_prefix(
|
|
||||||
prefix, value_in_json=True
|
|
||||||
)
|
|
||||||
user_networks = []
|
|
||||||
for net in networks:
|
|
||||||
net.value['name'] = net.key.split('/')[-1]
|
|
||||||
user_networks.append(net.value)
|
|
||||||
return {'networks': user_networks}, 200
|
|
||||||
else:
|
|
||||||
return validator.get_errors(), 400
|
|
||||||
|
|
||||||
|
|
||||||
api.add_resource(CreateVM, '/vm/create')
|
|
||||||
api.add_resource(VmStatus, '/vm/status')
|
|
||||||
|
|
||||||
api.add_resource(VMAction, '/vm/action')
|
|
||||||
api.add_resource(VMMigration, '/vm/migrate')
|
|
||||||
|
|
||||||
api.add_resource(CreateImage, '/image/create')
|
|
||||||
api.add_resource(ListPublicImages, '/image/list-public')
|
|
||||||
|
|
||||||
api.add_resource(ListUserVM, '/user/vms')
|
|
||||||
api.add_resource(ListUserFiles, '/user/files')
|
|
||||||
api.add_resource(ListUserNetwork, '/user/networks')
|
|
||||||
|
|
||||||
api.add_resource(AddSSHKey, '/user/add-ssh')
|
|
||||||
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
|
|
||||||
api.add_resource(GetSSHKeys, '/user/get-ssh')
|
|
||||||
|
|
||||||
api.add_resource(CreateHost, '/host/create')
|
|
||||||
api.add_resource(ListHost, '/host/list')
|
|
||||||
|
|
||||||
api.add_resource(CreateNetwork, '/network/create')
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
debug = arguments['debug']
|
|
||||||
port = arguments['port']
|
|
||||||
|
|
||||||
try:
|
|
||||||
image_stores = list(
|
|
||||||
shared.etcd_client.get_prefix(
|
|
||||||
shared.settings['etcd']['image_store_prefix'], value_in_json=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
image_stores = False
|
|
||||||
|
|
||||||
# Do not inject default values that might be very wrong
|
|
||||||
# fail when required, not before
|
|
||||||
#
|
|
||||||
# if not image_stores:
|
|
||||||
# data = {
|
|
||||||
# 'is_public': True,
|
|
||||||
# 'type': 'ceph',
|
|
||||||
# 'name': 'images',
|
|
||||||
# 'description': 'first ever public image-store',
|
|
||||||
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
|
||||||
# }
|
|
||||||
|
|
||||||
# shared.etcd_client.put(
|
|
||||||
# join_path(
|
|
||||||
# shared.settings['etcd']['image_store_prefix'], uuid4().hex
|
|
||||||
# ),
|
|
||||||
# json.dumps(data),
|
|
||||||
# )
|
|
||||||
|
|
||||||
try:
|
|
||||||
app.run(host='::', port=port, debug=debug)
|
|
||||||
except OSError as e:
|
|
||||||
raise UncloudException('Failed to start Flask: {}'.format(e))
|
|
||||||
|
|
@ -1,557 +0,0 @@
|
||||||
"""
|
|
||||||
This module contain classes thats validates and intercept/modify
|
|
||||||
data coming from uncloud-cli (user)
|
|
||||||
|
|
||||||
It was primarily developed as an alternative to argument parser
|
|
||||||
of Flask_Restful which is going to be deprecated. I also tried
|
|
||||||
marshmallow for that purpose but it was an overkill (because it
|
|
||||||
do validation + serialization + deserialization) and little
|
|
||||||
inflexible for our purpose.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO: Fix error message when user's mentioned VM (referred by name)
|
|
||||||
# does not exists.
|
|
||||||
#
|
|
||||||
# Currently, it says uuid is a required field.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
import bitmath
|
|
||||||
|
|
||||||
from uncloud.common.host import HostStatus
|
|
||||||
from uncloud.common.vm import VMStatus
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
from . import helper, logger
|
|
||||||
from .common_fields import Field, VmUUIDField
|
|
||||||
from .helper import check_otp, resolve_vm_name
|
|
||||||
|
|
||||||
|
|
||||||
class BaseSchema:
|
|
||||||
def __init__(self, data, fields=None):
|
|
||||||
_ = data # suppress linter warning
|
|
||||||
self.__errors = []
|
|
||||||
if fields is None:
|
|
||||||
self.fields = []
|
|
||||||
else:
|
|
||||||
self.fields = fields
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
# custom validation is optional
|
|
||||||
return True
|
|
||||||
|
|
||||||
def is_valid(self):
|
|
||||||
for field in self.fields:
|
|
||||||
field.is_valid()
|
|
||||||
self.add_field_errors(field)
|
|
||||||
|
|
||||||
for parent in self.__class__.__bases__:
|
|
||||||
try:
|
|
||||||
parent.validation(self)
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
if not self.__errors:
|
|
||||||
self.validation()
|
|
||||||
|
|
||||||
if self.__errors:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_errors(self):
|
|
||||||
return {"message": self.__errors}
|
|
||||||
|
|
||||||
def add_field_errors(self, field: Field):
|
|
||||||
self.__errors += field.get_errors()
|
|
||||||
|
|
||||||
def add_error(self, error):
|
|
||||||
self.__errors.append(error)
|
|
||||||
|
|
||||||
|
|
||||||
class OTPSchema(BaseSchema):
|
|
||||||
def __init__(self, data: dict, fields=None):
|
|
||||||
self.name = Field("name", str, data.get("name", KeyError))
|
|
||||||
self.realm = Field("realm", str, data.get("realm", KeyError))
|
|
||||||
self.token = Field("token", str, data.get("token", KeyError))
|
|
||||||
|
|
||||||
_fields = [self.name, self.realm, self.token]
|
|
||||||
if fields:
|
|
||||||
_fields += fields
|
|
||||||
super().__init__(data=data, fields=_fields)
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
if (
|
|
||||||
check_otp(
|
|
||||||
self.name.value, self.realm.value, self.token.value
|
|
||||||
)
|
|
||||||
!= 200
|
|
||||||
):
|
|
||||||
self.add_error("Wrong Credentials")
|
|
||||||
|
|
||||||
|
|
||||||
########################## Image Operations ###############################################
|
|
||||||
|
|
||||||
|
|
||||||
class CreateImageSchema(BaseSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
# Fields
|
|
||||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
|
||||||
self.name = Field("name", str, data.get("name", KeyError))
|
|
||||||
self.image_store = Field(
|
|
||||||
"image_store", str, data.get("image_store", KeyError)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validations
|
|
||||||
self.uuid.validation = self.file_uuid_validation
|
|
||||||
self.image_store.validation = self.image_store_name_validation
|
|
||||||
|
|
||||||
# All Fields
|
|
||||||
fields = [self.uuid, self.name, self.image_store]
|
|
||||||
super().__init__(data, fields)
|
|
||||||
|
|
||||||
def file_uuid_validation(self):
|
|
||||||
file_entry = shared.etcd_client.get(
|
|
||||||
os.path.join(
|
|
||||||
shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if file_entry is None:
|
|
||||||
self.add_error(
|
|
||||||
"Image File with uuid '{}' Not Found".format(
|
|
||||||
self.uuid.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def image_store_name_validation(self):
|
|
||||||
image_stores = list(
|
|
||||||
shared.etcd_client.get_prefix(
|
|
||||||
shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
image_store = next(
|
|
||||||
filter(
|
|
||||||
lambda s: json.loads(s.value)["name"]
|
|
||||||
== self.image_store.value,
|
|
||||||
image_stores,
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if not image_store:
|
|
||||||
self.add_error(
|
|
||||||
"Store '{}' does not exists".format(
|
|
||||||
self.image_store.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Host Operations
|
|
||||||
|
|
||||||
|
|
||||||
class CreateHostSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
# Fields
|
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
|
||||||
self.hostname = Field(
|
|
||||||
"hostname", str, data.get("hostname", KeyError)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validation
|
|
||||||
self.specs.validation = self.specs_validation
|
|
||||||
|
|
||||||
fields = [self.hostname, self.specs]
|
|
||||||
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
def specs_validation(self):
|
|
||||||
ALLOWED_BASE = 10
|
|
||||||
|
|
||||||
_cpu = self.specs.value.get("cpu", KeyError)
|
|
||||||
_ram = self.specs.value.get("ram", KeyError)
|
|
||||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
|
||||||
_hdd = self.specs.value.get("hdd", KeyError)
|
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
|
||||||
self.add_error(
|
|
||||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified RAM is not in correct units"
|
|
||||||
)
|
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified OS-SSD is not in correct units"
|
|
||||||
)
|
|
||||||
|
|
||||||
if _cpu < 1:
|
|
||||||
self.add_error("CPU must be atleast 1")
|
|
||||||
|
|
||||||
if parsed_ram < bitmath.GB(1):
|
|
||||||
self.add_error("RAM must be atleast 1 GB")
|
|
||||||
|
|
||||||
if parsed_os_ssd < bitmath.GB(10):
|
|
||||||
self.add_error("OS-SSD must be atleast 10 GB")
|
|
||||||
|
|
||||||
parsed_hdd = []
|
|
||||||
for hdd in _hdd:
|
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified HDD is not in correct units"
|
|
||||||
)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
# TODO: Find some good error message
|
|
||||||
self.add_error("Specs are not correct.")
|
|
||||||
else:
|
|
||||||
if self.get_errors():
|
|
||||||
self.specs = {
|
|
||||||
"cpu": _cpu,
|
|
||||||
"ram": str(parsed_ram),
|
|
||||||
"os-ssd": str(parsed_os_ssd),
|
|
||||||
"hdd": parsed_hdd,
|
|
||||||
}
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
if self.realm.value != "ungleich-admin":
|
|
||||||
self.add_error(
|
|
||||||
"Invalid Credentials/Insufficient Permission"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# VM Operations
|
|
||||||
|
|
||||||
|
|
||||||
class CreateVMSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
# Fields
|
|
||||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
|
||||||
self.vm_name = Field(
|
|
||||||
"vm_name", str, data.get("vm_name", KeyError)
|
|
||||||
)
|
|
||||||
self.image = Field("image", str, data.get("image", KeyError))
|
|
||||||
self.network = Field(
|
|
||||||
"network", list, data.get("network", KeyError)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Validation
|
|
||||||
self.image.validation = self.image_validation
|
|
||||||
self.vm_name.validation = self.vm_name_validation
|
|
||||||
self.specs.validation = self.specs_validation
|
|
||||||
self.network.validation = self.network_validation
|
|
||||||
|
|
||||||
fields = [self.vm_name, self.image, self.specs, self.network]
|
|
||||||
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
def image_validation(self):
|
|
||||||
try:
|
|
||||||
image_uuid = helper.resolve_image_name(
|
|
||||||
self.image.value, shared.etcd_client
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(
|
|
||||||
"Cannot resolve image name = %s", self.image.value
|
|
||||||
)
|
|
||||||
self.add_error(str(e))
|
|
||||||
else:
|
|
||||||
self.image_uuid = image_uuid
|
|
||||||
|
|
||||||
def vm_name_validation(self):
|
|
||||||
if resolve_vm_name(
|
|
||||||
name=self.vm_name.value, owner=self.name.value
|
|
||||||
):
|
|
||||||
self.add_error(
|
|
||||||
'VM with same name "{}" already exists'.format(
|
|
||||||
self.vm_name.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def network_validation(self):
|
|
||||||
_network = self.network.value
|
|
||||||
|
|
||||||
if _network:
|
|
||||||
for net in _network:
|
|
||||||
network = shared.etcd_client.get(
|
|
||||||
os.path.join(
|
|
||||||
shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
|
|
||||||
self.name.value,
|
|
||||||
net,
|
|
||||||
),
|
|
||||||
value_in_json=True,
|
|
||||||
)
|
|
||||||
if not network:
|
|
||||||
self.add_error(
|
|
||||||
"Network with name {} does not exists".format(
|
|
||||||
net
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def specs_validation(self):
|
|
||||||
ALLOWED_BASE = 10
|
|
||||||
|
|
||||||
_cpu = self.specs.value.get("cpu", KeyError)
|
|
||||||
_ram = self.specs.value.get("ram", KeyError)
|
|
||||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
|
||||||
_hdd = self.specs.value.get("hdd", KeyError)
|
|
||||||
|
|
||||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
|
||||||
self.add_error(
|
|
||||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
|
||||||
)
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
|
||||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
|
||||||
|
|
||||||
if parsed_ram.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified RAM is not in correct units"
|
|
||||||
)
|
|
||||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified OS-SSD is not in correct units"
|
|
||||||
)
|
|
||||||
|
|
||||||
if int(_cpu) < 1:
|
|
||||||
self.add_error("CPU must be atleast 1")
|
|
||||||
|
|
||||||
if parsed_ram < bitmath.GB(1):
|
|
||||||
self.add_error("RAM must be atleast 1 GB")
|
|
||||||
|
|
||||||
if parsed_os_ssd < bitmath.GB(1):
|
|
||||||
self.add_error("OS-SSD must be atleast 1 GB")
|
|
||||||
|
|
||||||
parsed_hdd = []
|
|
||||||
for hdd in _hdd:
|
|
||||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
|
||||||
if _parsed_hdd.base != ALLOWED_BASE:
|
|
||||||
self.add_error(
|
|
||||||
"Your specified HDD is not in correct units"
|
|
||||||
)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
parsed_hdd.append(str(_parsed_hdd))
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
# TODO: Find some good error message
|
|
||||||
self.add_error("Specs are not correct.")
|
|
||||||
else:
|
|
||||||
if self.get_errors():
|
|
||||||
self.specs = {
|
|
||||||
"cpu": _cpu,
|
|
||||||
"ram": str(parsed_ram),
|
|
||||||
"os-ssd": str(parsed_os_ssd),
|
|
||||||
"hdd": parsed_hdd,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class VMStatusSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
data["uuid"] = (
|
|
||||||
resolve_vm_name(
|
|
||||||
name=data.get("vm_name", None),
|
|
||||||
owner=(
|
|
||||||
data.get("in_support_of", None)
|
|
||||||
or data.get("name", None)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
or KeyError
|
|
||||||
)
|
|
||||||
self.uuid = VmUUIDField(data)
|
|
||||||
|
|
||||||
fields = [self.uuid]
|
|
||||||
|
|
||||||
super().__init__(data, fields)
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
|
||||||
if not (
|
|
||||||
vm.value["owner"] == self.name.value
|
|
||||||
or self.realm.value == "ungleich-admin"
|
|
||||||
):
|
|
||||||
self.add_error("Invalid User")
|
|
||||||
|
|
||||||
|
|
||||||
class VmActionSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
data["uuid"] = (
|
|
||||||
resolve_vm_name(
|
|
||||||
name=data.get("vm_name", None),
|
|
||||||
owner=(
|
|
||||||
data.get("in_support_of", None)
|
|
||||||
or data.get("name", None)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
or KeyError
|
|
||||||
)
|
|
||||||
self.uuid = VmUUIDField(data)
|
|
||||||
self.action = Field("action", str, data.get("action", KeyError))
|
|
||||||
|
|
||||||
self.action.validation = self.action_validation
|
|
||||||
|
|
||||||
_fields = [self.uuid, self.action]
|
|
||||||
|
|
||||||
super().__init__(data=data, fields=_fields)
|
|
||||||
|
|
||||||
def action_validation(self):
|
|
||||||
allowed_actions = ["start", "stop", "delete"]
|
|
||||||
if self.action.value not in allowed_actions:
|
|
||||||
self.add_error(
|
|
||||||
"Invalid Action. Allowed Actions are {}".format(
|
|
||||||
allowed_actions
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
|
||||||
if not (
|
|
||||||
vm.value["owner"] == self.name.value
|
|
||||||
or self.realm.value == "ungleich-admin"
|
|
||||||
):
|
|
||||||
self.add_error("Invalid User")
|
|
||||||
|
|
||||||
if (
|
|
||||||
self.action.value == "start"
|
|
||||||
and vm.status == VMStatus.running
|
|
||||||
and vm.hostname != ""
|
|
||||||
):
|
|
||||||
self.add_error("VM Already Running")
|
|
||||||
|
|
||||||
if self.action.value == "stop":
|
|
||||||
if vm.status == VMStatus.stopped:
|
|
||||||
self.add_error("VM Already Stopped")
|
|
||||||
elif vm.status != VMStatus.running:
|
|
||||||
self.add_error("Cannot stop non-running VM")
|
|
||||||
|
|
||||||
|
|
||||||
class VmMigrationSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
data["uuid"] = (
|
|
||||||
resolve_vm_name(
|
|
||||||
name=data.get("vm_name", None),
|
|
||||||
owner=(
|
|
||||||
data.get("in_support_of", None)
|
|
||||||
or data.get("name", None)
|
|
||||||
),
|
|
||||||
)
|
|
||||||
or KeyError
|
|
||||||
)
|
|
||||||
|
|
||||||
self.uuid = VmUUIDField(data)
|
|
||||||
self.destination = Field(
|
|
||||||
"destination", str, data.get("destination", KeyError)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.destination.validation = self.destination_validation
|
|
||||||
|
|
||||||
fields = [self.destination]
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
def destination_validation(self):
|
|
||||||
hostname = self.destination.value
|
|
||||||
host = next(
|
|
||||||
filter(
|
|
||||||
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if not host:
|
|
||||||
self.add_error(
|
|
||||||
"No Such Host ({}) exists".format(
|
|
||||||
self.destination.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
elif host.status != HostStatus.alive:
|
|
||||||
self.add_error("Destination Host is dead")
|
|
||||||
else:
|
|
||||||
self.destination.value = host.key
|
|
||||||
|
|
||||||
def validation(self):
|
|
||||||
vm = shared.vm_pool.get(self.uuid.value)
|
|
||||||
if not (
|
|
||||||
vm.value["owner"] == self.name.value
|
|
||||||
or self.realm.value == "ungleich-admin"
|
|
||||||
):
|
|
||||||
self.add_error("Invalid User")
|
|
||||||
|
|
||||||
if vm.status != VMStatus.running:
|
|
||||||
self.add_error("Can't migrate non-running VM")
|
|
||||||
|
|
||||||
if vm.hostname == os.path.join(
|
|
||||||
shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
|
|
||||||
):
|
|
||||||
self.add_error(
|
|
||||||
"Destination host couldn't be same as Source Host"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AddSSHSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.key_name = Field(
|
|
||||||
"key_name", str, data.get("key_name", KeyError)
|
|
||||||
)
|
|
||||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
|
||||||
|
|
||||||
fields = [self.key_name, self.key]
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
|
|
||||||
class RemoveSSHSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.key_name = Field(
|
|
||||||
"key_name", str, data.get("key_name", KeyError)
|
|
||||||
)
|
|
||||||
|
|
||||||
fields = [self.key_name]
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
|
|
||||||
class GetSSHSchema(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.key_name = Field(
|
|
||||||
"key_name", str, data.get("key_name", None)
|
|
||||||
)
|
|
||||||
|
|
||||||
fields = [self.key_name]
|
|
||||||
super().__init__(data=data, fields=fields)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateNetwork(OTPSchema):
|
|
||||||
def __init__(self, data):
|
|
||||||
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
|
|
||||||
self.type = Field("type", str, data.get("type", KeyError))
|
|
||||||
self.user = Field("user", bool, bool(data.get("user", False)))
|
|
||||||
|
|
||||||
self.network_name.validation = self.network_name_validation
|
|
||||||
self.type.validation = self.network_type_validation
|
|
||||||
|
|
||||||
fields = [self.network_name, self.type, self.user]
|
|
||||||
super().__init__(data, fields=fields)
|
|
||||||
|
|
||||||
def network_name_validation(self):
|
|
||||||
key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
|
|
||||||
network = shared.etcd_client.get(key, value_in_json=True)
|
|
||||||
if network:
|
|
||||||
self.add_error(
|
|
||||||
"Network with name {} already exists".format(
|
|
||||||
self.network_name.value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def network_type_validation(self):
|
|
||||||
supported_network_types = ["vxlan"]
|
|
||||||
if self.type.value not in supported_network_types:
|
|
||||||
self.add_error(
|
|
||||||
"Unsupported Network Type. Supported network types are {}".format(
|
|
||||||
supported_network_types
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import argparse
|
|
||||||
import binascii
|
|
||||||
|
|
||||||
from pyotp import TOTP
|
|
||||||
from os.path import join as join_path
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
|
|
||||||
def get_otp_parser():
|
|
||||||
otp_parser = argparse.ArgumentParser('otp')
|
|
||||||
otp_parser.add_argument('--name')
|
|
||||||
otp_parser.add_argument('--realm')
|
|
||||||
otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
|
|
||||||
|
|
||||||
return otp_parser
|
|
||||||
|
|
||||||
|
|
||||||
def load_dump_pretty(content):
|
|
||||||
if isinstance(content, bytes):
|
|
||||||
content = content.decode('utf-8')
|
|
||||||
parsed = json.loads(content)
|
|
||||||
return json.dumps(parsed, indent=4, sort_keys=True)
|
|
||||||
|
|
||||||
|
|
||||||
def make_request(*args, data=None, request_method=requests.post):
|
|
||||||
try:
|
|
||||||
r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
|
|
||||||
except requests.exceptions.RequestException:
|
|
||||||
print('Error occurred while connecting to API server.')
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
print(load_dump_pretty(r.content))
|
|
||||||
except Exception:
|
|
||||||
print('Error occurred while getting output from api server.')
|
|
||||||
|
|
||||||
|
|
||||||
def get_token(seed):
|
|
||||||
if seed is not None:
|
|
||||||
try:
|
|
||||||
token = TOTP(seed).now()
|
|
||||||
except binascii.Error:
|
|
||||||
raise argparse.ArgumentTypeError('Invalid seed')
|
|
||||||
else:
|
|
||||||
return token
|
|
||||||
|
|
@ -1,45 +0,0 @@
|
||||||
import requests
|
|
||||||
|
|
||||||
from uncloud.cli.helper import make_request, get_otp_parser
|
|
||||||
from uncloud.common.parser import BaseParser
|
|
||||||
|
|
||||||
|
|
||||||
class HostParser(BaseParser):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__('host')
|
|
||||||
|
|
||||||
def create(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
|
||||||
p.add_argument('--hostname', required=True)
|
|
||||||
p.add_argument('--cpu', required=True, type=int)
|
|
||||||
p.add_argument('--ram', required=True)
|
|
||||||
p.add_argument('--os-ssd', required=True)
|
|
||||||
p.add_argument('--hdd', default=list())
|
|
||||||
|
|
||||||
def list(self, **kwargs):
|
|
||||||
self.subparser.add_parser('list', **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
parser = HostParser()
|
|
||||||
arg_parser = parser.arg_parser
|
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
|
||||||
subcommand = kwargs.pop('host_subcommand')
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
request_method = requests.post
|
|
||||||
data = None
|
|
||||||
if subcommand == 'create':
|
|
||||||
kwargs['specs'] = {
|
|
||||||
'cpu': kwargs.pop('cpu'),
|
|
||||||
'ram': kwargs.pop('ram'),
|
|
||||||
'os-ssd': kwargs.pop('os_ssd'),
|
|
||||||
'hdd': kwargs.pop('hdd')
|
|
||||||
}
|
|
||||||
data = kwargs
|
|
||||||
elif subcommand == 'list':
|
|
||||||
request_method = requests.get
|
|
||||||
|
|
||||||
make_request('host', subcommand, data=data, request_method=request_method)
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
import requests
|
|
||||||
|
|
||||||
from uncloud.cli.helper import make_request
|
|
||||||
from uncloud.common.parser import BaseParser
|
|
||||||
|
|
||||||
|
|
||||||
class ImageParser(BaseParser):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__('image')
|
|
||||||
|
|
||||||
def create(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('create', **kwargs)
|
|
||||||
p.add_argument('--name', required=True)
|
|
||||||
p.add_argument('--uuid', required=True)
|
|
||||||
p.add_argument('--image-store', required=True, dest='image_store')
|
|
||||||
|
|
||||||
def list(self, **kwargs):
|
|
||||||
self.subparser.add_parser('list', **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
parser = ImageParser()
|
|
||||||
arg_parser = parser.arg_parser
|
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
|
||||||
subcommand = kwargs.pop('image_subcommand')
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
data = None
|
|
||||||
request_method = requests.post
|
|
||||||
if subcommand == 'list':
|
|
||||||
subcommand = 'list-public'
|
|
||||||
request_method = requests.get
|
|
||||||
elif subcommand == 'create':
|
|
||||||
data = kwargs
|
|
||||||
|
|
||||||
make_request('image', subcommand, data=data, request_method=request_method)
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import importlib
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('cli', add_help=False)
|
|
||||||
subparser = arg_parser.add_subparsers(dest='subcommand')
|
|
||||||
|
|
||||||
for component in ['user', 'host', 'image', 'network', 'vm']:
|
|
||||||
module = importlib.import_module('uncloud.cli.{}'.format(component))
|
|
||||||
parser = getattr(module, 'arg_parser')
|
|
||||||
subparser.add_parser(name=parser.prog, parents=[parser])
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
if not arguments['subcommand']:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
name = arguments.pop('subcommand')
|
|
||||||
arguments.pop('debug')
|
|
||||||
mod = importlib.import_module('uncloud.cli.{}'.format(name))
|
|
||||||
_main = getattr(mod, 'main')
|
|
||||||
_main(**arguments)
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
import requests
|
|
||||||
|
|
||||||
from uncloud.cli.helper import make_request, get_otp_parser
|
|
||||||
from uncloud.common.parser import BaseParser
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkParser(BaseParser):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__('network')
|
|
||||||
|
|
||||||
def create(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
|
||||||
p.add_argument('--network-name', required=True)
|
|
||||||
p.add_argument('--network-type', required=True, dest='type')
|
|
||||||
p.add_argument('--user', action='store_true')
|
|
||||||
|
|
||||||
|
|
||||||
parser = NetworkParser()
|
|
||||||
arg_parser = parser.arg_parser
|
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
|
||||||
subcommand = kwargs.pop('network_subcommand')
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
data = None
|
|
||||||
request_method = requests.post
|
|
||||||
if subcommand == 'create':
|
|
||||||
data = kwargs
|
|
||||||
|
|
||||||
make_request('network', subcommand, data=data, request_method=request_method)
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
from uncloud.cli.helper import make_request, get_otp_parser
|
|
||||||
from uncloud.common.parser import BaseParser
|
|
||||||
|
|
||||||
|
|
||||||
class UserParser(BaseParser):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__('user')
|
|
||||||
|
|
||||||
def files(self, **kwargs):
|
|
||||||
self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
|
|
||||||
|
|
||||||
def vms(self, **kwargs):
|
|
||||||
self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
|
|
||||||
|
|
||||||
def networks(self, **kwargs):
|
|
||||||
self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
|
|
||||||
|
|
||||||
def add_ssh(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
|
|
||||||
p.add_argument('--key-name', required=True)
|
|
||||||
p.add_argument('--key', required=True)
|
|
||||||
|
|
||||||
def get_ssh(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
|
|
||||||
p.add_argument('--key-name', default='')
|
|
||||||
|
|
||||||
def remove_ssh(self, **kwargs):
|
|
||||||
p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
|
|
||||||
p.add_argument('--key-name', required=True)
|
|
||||||
|
|
||||||
|
|
||||||
parser = UserParser()
|
|
||||||
arg_parser = parser.arg_parser
|
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
|
||||||
subcommand = kwargs.pop('user_subcommand')
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
make_request('user', subcommand, data=kwargs)
|
|
||||||
|
|
@ -1,62 +0,0 @@
|
||||||
from uncloud.common.parser import BaseParser
|
|
||||||
from uncloud.cli.helper import make_request, get_otp_parser
|
|
||||||
|
|
||||||
|
|
||||||
class VMParser(BaseParser):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__('vm')
|
|
||||||
|
|
||||||
def start(self, **args):
|
|
||||||
p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
|
|
||||||
def stop(self, **args):
|
|
||||||
p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
|
|
||||||
def status(self, **args):
|
|
||||||
p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
|
|
||||||
def delete(self, **args):
|
|
||||||
p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
|
|
||||||
def migrate(self, **args):
|
|
||||||
p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
p.add_argument('--destination', required=True)
|
|
||||||
|
|
||||||
def create(self, **args):
|
|
||||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
|
|
||||||
p.add_argument('--cpu', required=True)
|
|
||||||
p.add_argument('--ram', required=True)
|
|
||||||
p.add_argument('--os-ssd', required=True)
|
|
||||||
p.add_argument('--hdd', action='append', default=list())
|
|
||||||
p.add_argument('--image', required=True)
|
|
||||||
p.add_argument('--network', action='append', default=[])
|
|
||||||
p.add_argument('--vm-name', required=True)
|
|
||||||
|
|
||||||
|
|
||||||
parser = VMParser()
|
|
||||||
arg_parser = parser.arg_parser
|
|
||||||
|
|
||||||
|
|
||||||
def main(**kwargs):
|
|
||||||
subcommand = kwargs.pop('vm_subcommand')
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
data = kwargs
|
|
||||||
endpoint = subcommand
|
|
||||||
if subcommand in ['start', 'stop', 'delete']:
|
|
||||||
endpoint = 'action'
|
|
||||||
data['action'] = subcommand
|
|
||||||
elif subcommand == 'create':
|
|
||||||
kwargs['specs'] = {
|
|
||||||
'cpu': kwargs.pop('cpu'),
|
|
||||||
'ram': kwargs.pop('ram'),
|
|
||||||
'os-ssd': kwargs.pop('os_ssd'),
|
|
||||||
'hdd': kwargs.pop('hdd')
|
|
||||||
}
|
|
||||||
make_request('vm', endpoint, data=data)
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
||||||
import argparse
|
|
||||||
import etcd3
|
|
||||||
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('client', add_help=False)
|
|
||||||
arg_parser.add_argument('--dump-etcd-contents-prefix', help="Dump contents below the given prefix")
|
|
||||||
|
|
||||||
def dump_etcd_contents(prefix):
|
|
||||||
etcd = Etcd3Wrapper()
|
|
||||||
for k,v in etcd.get_prefix_raw(prefix):
|
|
||||||
k = k.decode('utf-8')
|
|
||||||
v = v.decode('utf-8')
|
|
||||||
print("{} = {}".format(k,v))
|
|
||||||
# print("{} = {}".format(k,v))
|
|
||||||
|
|
||||||
# for k,v in etcd.get_prefix(prefix):
|
|
||||||
#
|
|
||||||
print("done")
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
if 'dump_etcd_contents_prefix' in arguments:
|
|
||||||
dump_etcd_contents(prefix=arguments['dump_etcd_contents_prefix'])
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
from .etcd_wrapper import EtcdEntry
|
|
||||||
|
|
||||||
|
|
||||||
class SpecificEtcdEntryBase:
|
|
||||||
def __init__(self, e: EtcdEntry):
|
|
||||||
self.key = e.key
|
|
||||||
|
|
||||||
for k in e.value.keys():
|
|
||||||
self.__setattr__(k, e.value[k])
|
|
||||||
|
|
||||||
def original_keys(self):
|
|
||||||
r = dict(self.__dict__)
|
|
||||||
if "key" in r:
|
|
||||||
del r["key"]
|
|
||||||
return r
|
|
||||||
|
|
||||||
@property
|
|
||||||
def value(self):
|
|
||||||
return self.original_keys()
|
|
||||||
|
|
||||||
@value.setter
|
|
||||||
def value(self, v):
|
|
||||||
self.__dict__ = v
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return str(dict(self.__dict__))
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
from uncloud.common.shared import shared
|
|
||||||
from pyotp import TOTP
|
|
||||||
|
|
||||||
|
|
||||||
def get_token(seed):
|
|
||||||
if seed is not None:
|
|
||||||
try:
|
|
||||||
token = TOTP(seed).now()
|
|
||||||
except Exception:
|
|
||||||
raise Exception('Invalid seed')
|
|
||||||
else:
|
|
||||||
return token
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_otp_credentials(kwargs):
|
|
||||||
d = {
|
|
||||||
'name': shared.settings['client']['name'],
|
|
||||||
'realm': shared.settings['client']['realm'],
|
|
||||||
'token': get_token(shared.settings['client']['seed'])
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v in d.items():
|
|
||||||
if k in kwargs and kwargs[k] is None:
|
|
||||||
kwargs.update({k: v})
|
|
||||||
|
|
||||||
return d
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
from .etcd_wrapper import Etcd3Wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
|
||||||
kv = etcd_client.get(key)
|
|
||||||
|
|
||||||
if kv:
|
|
||||||
counter = int(kv.value)
|
|
||||||
counter = counter + 1
|
|
||||||
else:
|
|
||||||
counter = 1
|
|
||||||
|
|
||||||
etcd_client.put(key, str(counter))
|
|
||||||
return counter
|
|
||||||
|
|
||||||
|
|
||||||
def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
|
||||||
kv = etcd_client.get(key)
|
|
||||||
if kv:
|
|
||||||
return int(kv.value)
|
|
||||||
return None
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
import etcd3
|
|
||||||
import json
|
|
||||||
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
from uncloud import UncloudException
|
|
||||||
from uncloud.common import logger
|
|
||||||
|
|
||||||
|
|
||||||
class EtcdEntry:
|
|
||||||
def __init__(self, meta_or_key, value, value_in_json=False):
|
|
||||||
if hasattr(meta_or_key, 'key'):
|
|
||||||
# if meta has attr 'key' then get it
|
|
||||||
self.key = meta_or_key.key.decode('utf-8')
|
|
||||||
else:
|
|
||||||
# otherwise meta is the 'key'
|
|
||||||
self.key = meta_or_key
|
|
||||||
self.value = value.decode('utf-8')
|
|
||||||
|
|
||||||
if value_in_json:
|
|
||||||
self.value = json.loads(self.value)
|
|
||||||
|
|
||||||
|
|
||||||
def readable_errors(func):
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except etcd3.exceptions.ConnectionFailedError:
|
|
||||||
raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
|
|
||||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
|
||||||
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
|
||||||
except Exception:
|
|
||||||
logger.exception('Some etcd error occured. See syslog for details.')
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class Etcd3Wrapper:
|
|
||||||
@readable_errors
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.client = etcd3.client(*args, **kwargs)
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def get(self, *args, value_in_json=False, **kwargs):
|
|
||||||
_value, _key = self.client.get(*args, **kwargs)
|
|
||||||
if _key is None or _value is None:
|
|
||||||
return None
|
|
||||||
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def put(self, *args, value_in_json=False, **kwargs):
|
|
||||||
_key, _value = args
|
|
||||||
if value_in_json:
|
|
||||||
_value = json.dumps(_value)
|
|
||||||
|
|
||||||
if not isinstance(_key, str):
|
|
||||||
_key = _key.decode('utf-8')
|
|
||||||
|
|
||||||
return self.client.put(_key, _value, **kwargs)
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
|
|
||||||
event_iterator = self.client.get_prefix(*args, **kwargs)
|
|
||||||
for e in event_iterator:
|
|
||||||
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def watch_prefix(self, key, raise_exception=True, value_in_json=False):
|
|
||||||
event_iterator, cancel = self.client.watch_prefix(key)
|
|
||||||
for e in event_iterator:
|
|
||||||
if hasattr(e, '_event'):
|
|
||||||
e = e._event
|
|
||||||
if e.type == e.PUT:
|
|
||||||
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
||||||
import time
|
|
||||||
from datetime import datetime
|
|
||||||
from os.path import join
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from .classes import SpecificEtcdEntryBase
|
|
||||||
|
|
||||||
|
|
||||||
class HostStatus:
|
|
||||||
"""Possible Statuses of uncloud host."""
|
|
||||||
|
|
||||||
alive = "ALIVE"
|
|
||||||
dead = "DEAD"
|
|
||||||
|
|
||||||
|
|
||||||
class HostEntry(SpecificEtcdEntryBase):
|
|
||||||
"""Represents Host Entry Structure and its supporting methods."""
|
|
||||||
|
|
||||||
def __init__(self, e):
|
|
||||||
self.specs = None # type: dict
|
|
||||||
self.hostname = None # type: str
|
|
||||||
self.status = None # type: str
|
|
||||||
self.last_heartbeat = None # type: str
|
|
||||||
|
|
||||||
super().__init__(e)
|
|
||||||
|
|
||||||
def update_heartbeat(self):
|
|
||||||
self.status = HostStatus.alive
|
|
||||||
self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
|
|
||||||
def is_alive(self):
|
|
||||||
last_heartbeat = datetime.strptime(
|
|
||||||
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
|
|
||||||
)
|
|
||||||
delta = datetime.utcnow() - last_heartbeat
|
|
||||||
if delta.total_seconds() > 60:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def declare_dead(self):
|
|
||||||
self.status = HostStatus.dead
|
|
||||||
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
|
||||||
|
|
||||||
|
|
||||||
class HostPool:
|
|
||||||
def __init__(self, etcd_client, host_prefix):
|
|
||||||
self.client = etcd_client
|
|
||||||
self.prefix = host_prefix
|
|
||||||
|
|
||||||
@property
|
|
||||||
def hosts(self) -> List[HostEntry]:
|
|
||||||
_hosts = self.client.get_prefix(self.prefix, value_in_json=True)
|
|
||||||
return [HostEntry(host) for host in _hosts]
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
if not key.startswith(self.prefix):
|
|
||||||
key = join(self.prefix, key)
|
|
||||||
v = self.client.get(key, value_in_json=True)
|
|
||||||
if v:
|
|
||||||
return HostEntry(v)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def put(self, obj: HostEntry):
|
|
||||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
|
||||||
|
|
||||||
def by_status(self, status, _hosts=None):
|
|
||||||
if _hosts is None:
|
|
||||||
_hosts = self.hosts
|
|
||||||
return list(filter(lambda x: x.status == status, _hosts))
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
import subprocess as sp
|
|
||||||
import random
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def random_bytes(num=6):
|
|
||||||
return [random.randrange(256) for _ in range(num)]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_mac(
|
|
||||||
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
|
||||||
):
|
|
||||||
mac = random_bytes()
|
|
||||||
if oui:
|
|
||||||
if type(oui) == str:
|
|
||||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
|
||||||
mac = oui + random_bytes(num=6 - len(oui))
|
|
||||||
else:
|
|
||||||
if multicast:
|
|
||||||
mac[0] |= 1 # set bit 0
|
|
||||||
else:
|
|
||||||
mac[0] &= ~1 # clear bit 0
|
|
||||||
if uaa:
|
|
||||||
mac[0] &= ~(1 << 1) # clear bit 1
|
|
||||||
else:
|
|
||||||
mac[0] |= 1 << 1 # set bit 1
|
|
||||||
return separator.join(byte_fmt % b for b in mac)
|
|
||||||
|
|
||||||
|
|
||||||
def create_dev(script, _id, dev, ip=None):
|
|
||||||
command = [
|
|
||||||
"sudo",
|
|
||||||
"-p",
|
|
||||||
"Enter password to create network devices for vm: ",
|
|
||||||
script,
|
|
||||||
str(_id),
|
|
||||||
dev,
|
|
||||||
]
|
|
||||||
if ip:
|
|
||||||
command.append(ip)
|
|
||||||
try:
|
|
||||||
output = sp.check_output(command, stderr=sp.PIPE)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Creation of interface %s failed.", dev)
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return output.decode("utf-8").strip()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_network_interface(iface):
|
|
||||||
try:
|
|
||||||
sp.check_output(
|
|
||||||
[
|
|
||||||
"sudo",
|
|
||||||
"-p",
|
|
||||||
"Enter password to remove {} network device: ".format(
|
|
||||||
iface
|
|
||||||
),
|
|
||||||
"ip",
|
|
||||||
"link",
|
|
||||||
"del",
|
|
||||||
iface,
|
|
||||||
],
|
|
||||||
stderr=sp.PIPE,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Interface %s Deletion failed", iface)
|
|
||||||
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
import argparse
|
|
||||||
|
|
||||||
|
|
||||||
class BaseParser:
|
|
||||||
def __init__(self, command):
|
|
||||||
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
|
|
||||||
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
|
|
||||||
self.common_args = {'add_help': False}
|
|
||||||
|
|
||||||
methods = [attr for attr in dir(self) if not attr.startswith('__')
|
|
||||||
and type(getattr(self, attr)).__name__ == 'method']
|
|
||||||
for method in methods:
|
|
||||||
getattr(self, method)(**self.common_args)
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
import json
|
|
||||||
from os.path import join
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from uncloud.common.etcd_wrapper import EtcdEntry
|
|
||||||
from uncloud.common.classes import SpecificEtcdEntryBase
|
|
||||||
|
|
||||||
|
|
||||||
class RequestType:
|
|
||||||
CreateVM = "CreateVM"
|
|
||||||
ScheduleVM = "ScheduleVM"
|
|
||||||
StartVM = "StartVM"
|
|
||||||
StopVM = "StopVM"
|
|
||||||
InitVMMigration = "InitVMMigration"
|
|
||||||
TransferVM = "TransferVM"
|
|
||||||
DeleteVM = "DeleteVM"
|
|
||||||
|
|
||||||
|
|
||||||
class RequestEntry(SpecificEtcdEntryBase):
|
|
||||||
def __init__(self, e):
|
|
||||||
self.destination_sock_path = None
|
|
||||||
self.destination_host_key = None
|
|
||||||
self.type = None # type: str
|
|
||||||
self.migration = None # type: bool
|
|
||||||
self.destination = None # type: str
|
|
||||||
self.uuid = None # type: str
|
|
||||||
self.hostname = None # type: str
|
|
||||||
super().__init__(e)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_scratch(cls, request_prefix, **kwargs):
|
|
||||||
e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
|
|
||||||
value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
|
|
||||||
return cls(e)
|
|
||||||
|
|
||||||
|
|
||||||
class RequestPool:
|
|
||||||
def __init__(self, etcd_client, request_prefix):
|
|
||||||
self.client = etcd_client
|
|
||||||
self.prefix = request_prefix
|
|
||||||
|
|
||||||
def put(self, obj: RequestEntry):
|
|
||||||
if not obj.key.startswith(self.prefix):
|
|
||||||
obj.key = join(self.prefix, obj.key)
|
|
||||||
|
|
||||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
import bitmath
|
|
||||||
|
|
||||||
from marshmallow import fields, Schema
|
|
||||||
|
|
||||||
|
|
||||||
class StorageUnit(fields.Field):
|
|
||||||
def _serialize(self, value, attr, obj, **kwargs):
|
|
||||||
return str(value)
|
|
||||||
|
|
||||||
def _deserialize(self, value, attr, data, **kwargs):
|
|
||||||
return bitmath.parse_string_unsafe(value)
|
|
||||||
|
|
||||||
|
|
||||||
class SpecsSchema(Schema):
|
|
||||||
cpu = fields.Int()
|
|
||||||
ram = StorageUnit()
|
|
||||||
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
|
|
||||||
hdd = fields.List(StorageUnit())
|
|
||||||
|
|
||||||
|
|
||||||
class VMSchema(Schema):
|
|
||||||
name = fields.Str()
|
|
||||||
owner = fields.Str()
|
|
||||||
owner_realm = fields.Str()
|
|
||||||
specs = fields.Nested(SpecsSchema)
|
|
||||||
status = fields.Str()
|
|
||||||
log = fields.List(fields.Str())
|
|
||||||
vnc_socket = fields.Str()
|
|
||||||
image_uuid = fields.Str()
|
|
||||||
hostname = fields.Str()
|
|
||||||
metadata = fields.Dict()
|
|
||||||
network = fields.List(
|
|
||||||
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
|
|
||||||
)
|
|
||||||
in_migration = fields.Bool()
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkSchema(Schema):
|
|
||||||
_id = fields.Int(data_key="id", attribute="id")
|
|
||||||
_type = fields.Str(data_key="type", attribute="type")
|
|
||||||
ipv6 = fields.Str()
|
|
||||||
|
|
@ -1,136 +0,0 @@
|
||||||
import configparser
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
settings = None
|
|
||||||
|
|
||||||
|
|
||||||
class CustomConfigParser(configparser.RawConfigParser):
|
|
||||||
def __getitem__(self, key):
|
|
||||||
try:
|
|
||||||
result = super().__getitem__(key)
|
|
||||||
except KeyError as err:
|
|
||||||
raise KeyError(
|
|
||||||
'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
|
|
||||||
key
|
|
||||||
)
|
|
||||||
) from err
|
|
||||||
else:
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class Settings(object):
|
|
||||||
def __init__(self, conf_dir, seed_value=None):
|
|
||||||
conf_name = 'uncloud.conf'
|
|
||||||
self.config_file = join_path(conf_dir, conf_name)
|
|
||||||
|
|
||||||
# this is used to cache config from etcd for 1 minutes. Without this we
|
|
||||||
# would make a lot of requests to etcd which slows down everything.
|
|
||||||
self.last_config_update = datetime.fromtimestamp(0)
|
|
||||||
|
|
||||||
self.config_parser = CustomConfigParser(allow_no_value=True)
|
|
||||||
self.config_parser.add_section('etcd')
|
|
||||||
self.config_parser.set('etcd', 'base_prefix', '/')
|
|
||||||
|
|
||||||
if os.access(self.config_file, os.R_OK):
|
|
||||||
self.config_parser.read(self.config_file)
|
|
||||||
else:
|
|
||||||
raise FileNotFoundError('Config file %s not found!', self.config_file)
|
|
||||||
self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
|
|
||||||
|
|
||||||
self.read_internal_values()
|
|
||||||
|
|
||||||
if seed_value is None:
|
|
||||||
seed_value = dict()
|
|
||||||
|
|
||||||
self.config_parser.read_dict(seed_value)
|
|
||||||
|
|
||||||
def get_etcd_client(self):
|
|
||||||
args = tuple()
|
|
||||||
try:
|
|
||||||
kwargs = {
|
|
||||||
'host': self.config_parser.get('etcd', 'url'),
|
|
||||||
'port': self.config_parser.get('etcd', 'port'),
|
|
||||||
'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
|
|
||||||
'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
|
|
||||||
'cert_key': self.config_parser.get('etcd', 'cert_key'),
|
|
||||||
}
|
|
||||||
except configparser.Error as err:
|
|
||||||
raise configparser.Error(
|
|
||||||
'{} in config file {}'.format(
|
|
||||||
err.message, self.config_file
|
|
||||||
)
|
|
||||||
) from err
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
wrapper = Etcd3Wrapper(*args, **kwargs)
|
|
||||||
except Exception as err:
|
|
||||||
logger.error(
|
|
||||||
'etcd connection not successfull. Please check your config file.'
|
|
||||||
'\nDetails: %s\netcd connection parameters: %s',
|
|
||||||
err,
|
|
||||||
kwargs,
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def read_internal_values(self):
|
|
||||||
base_prefix = self['etcd']['base_prefix']
|
|
||||||
self.config_parser.read_dict(
|
|
||||||
{
|
|
||||||
'etcd': {
|
|
||||||
'file_prefix': join_path(base_prefix, 'files/'),
|
|
||||||
'host_prefix': join_path(base_prefix, 'hosts/'),
|
|
||||||
'image_prefix': join_path(base_prefix, 'images/'),
|
|
||||||
'image_store_prefix': join_path(base_prefix, 'imagestore/'),
|
|
||||||
'network_prefix': join_path(base_prefix, 'networks/'),
|
|
||||||
'request_prefix': join_path(base_prefix, 'requests/'),
|
|
||||||
'user_prefix': join_path(base_prefix, 'users/'),
|
|
||||||
'vm_prefix': join_path(base_prefix, 'vms/'),
|
|
||||||
'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
|
|
||||||
'tap_counter': join_path(base_prefix, 'counters/tap')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def read_config_file_values(self, config_file):
|
|
||||||
try:
|
|
||||||
# Trying to read configuration file
|
|
||||||
with open(config_file) as config_file_handle:
|
|
||||||
self.config_parser.read_file(config_file_handle)
|
|
||||||
except FileNotFoundError:
|
|
||||||
sys.exit('Configuration file {} not found!'.format(config_file))
|
|
||||||
except Exception as err:
|
|
||||||
logger.exception(err)
|
|
||||||
sys.exit('Error occurred while reading configuration file')
|
|
||||||
|
|
||||||
def read_values_from_etcd(self):
|
|
||||||
etcd_client = self.get_etcd_client()
|
|
||||||
if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
|
|
||||||
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
|
|
||||||
if config_from_etcd:
|
|
||||||
self.config_parser.read_dict(config_from_etcd.value)
|
|
||||||
self.last_config_update = datetime.utcnow()
|
|
||||||
else:
|
|
||||||
raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
# Allow failing to read from etcd if we have
|
|
||||||
# it locally
|
|
||||||
if key not in self.config_parser.sections():
|
|
||||||
try:
|
|
||||||
self.read_values_from_etcd()
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
return self.config_parser[key]
|
|
||||||
|
|
||||||
|
|
||||||
def get_settings():
|
|
||||||
return settings
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
from uncloud.common.settings import get_settings
|
|
||||||
from uncloud.common.vm import VmPool
|
|
||||||
from uncloud.common.host import HostPool
|
|
||||||
from uncloud.common.request import RequestPool
|
|
||||||
import uncloud.common.storage_handlers as storage_handlers
|
|
||||||
|
|
||||||
|
|
||||||
class Shared:
|
|
||||||
@property
|
|
||||||
def settings(self):
|
|
||||||
return get_settings()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def etcd_client(self):
|
|
||||||
return self.settings.get_etcd_client()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def host_pool(self):
|
|
||||||
return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
|
|
||||||
|
|
||||||
@property
|
|
||||||
def vm_pool(self):
|
|
||||||
return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
|
|
||||||
|
|
||||||
@property
|
|
||||||
def request_pool(self):
|
|
||||||
return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
|
|
||||||
|
|
||||||
@property
|
|
||||||
def storage_handler(self):
|
|
||||||
return storage_handlers.get_storage_handler()
|
|
||||||
|
|
||||||
|
|
||||||
shared = Shared()
|
|
||||||
|
|
@ -1,207 +0,0 @@
|
||||||
import shutil
|
|
||||||
import subprocess as sp
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
|
|
||||||
from abc import ABC
|
|
||||||
from . import logger
|
|
||||||
from os.path import join as join_path
|
|
||||||
import uncloud.common.shared as shared
|
|
||||||
|
|
||||||
|
|
||||||
class ImageStorageHandler(ABC):
|
|
||||||
handler_name = "base"
|
|
||||||
|
|
||||||
def __init__(self, image_base, vm_base):
|
|
||||||
self.image_base = image_base
|
|
||||||
self.vm_base = vm_base
|
|
||||||
|
|
||||||
def import_image(self, image_src, image_dest, protect=False):
|
|
||||||
"""Put an image at the destination
|
|
||||||
:param image_src: An Image file
|
|
||||||
:param image_dest: A path where :param src: is to be put.
|
|
||||||
:param protect: If protect is true then the dest is protect (readonly etc)
|
|
||||||
The obj must exist on filesystem.
|
|
||||||
"""
|
|
||||||
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def make_vm_image(self, image_path, path):
|
|
||||||
"""Copy image from src to dest
|
|
||||||
|
|
||||||
:param image_path: A path
|
|
||||||
:param path: A path
|
|
||||||
|
|
||||||
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def resize_vm_image(self, path, size):
|
|
||||||
"""Resize image located at :param path:
|
|
||||||
:param path: The file which is to be resized
|
|
||||||
:param size: Size must be in Megabytes
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def delete_vm_image(self, path):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def execute_command(self, command, report=True, error_origin=None):
|
|
||||||
if not error_origin:
|
|
||||||
error_origin = self.handler_name
|
|
||||||
|
|
||||||
command = list(map(str, command))
|
|
||||||
try:
|
|
||||||
sp.check_output(command, stderr=sp.PIPE)
|
|
||||||
except sp.CalledProcessError as e:
|
|
||||||
_stderr = e.stderr.decode("utf-8").strip()
|
|
||||||
if report:
|
|
||||||
logger.exception("%s:- %s", error_origin, _stderr)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def vm_path_string(self, path):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def qemu_path_string(self, path):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def is_vm_image_exists(self, path):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
|
||||||
handler_name = "Filesystem"
|
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
|
||||||
dest = join_path(self.image_base, dest)
|
|
||||||
try:
|
|
||||||
shutil.copy(src, dest)
|
|
||||||
if protect:
|
|
||||||
os.chmod(
|
|
||||||
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def make_vm_image(self, src, dest):
|
|
||||||
src = join_path(self.image_base, src)
|
|
||||||
dest = join_path(self.vm_base, dest)
|
|
||||||
try:
|
|
||||||
shutil.copyfile(src, dest)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def resize_vm_image(self, path, size):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
command = [
|
|
||||||
"qemu-img",
|
|
||||||
"resize",
|
|
||||||
"-f",
|
|
||||||
"raw",
|
|
||||||
path,
|
|
||||||
"{}M".format(size),
|
|
||||||
]
|
|
||||||
if self.execute_command(command):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.delete_vm_image(path)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def delete_vm_image(self, path):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
try:
|
|
||||||
os.remove(path)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def vm_path_string(self, path):
|
|
||||||
return join_path(self.vm_base, path)
|
|
||||||
|
|
||||||
def qemu_path_string(self, path):
|
|
||||||
return self.vm_path_string(path)
|
|
||||||
|
|
||||||
def is_vm_image_exists(self, path):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
command = ["ls", path]
|
|
||||||
return self.execute_command(command, report=False)
|
|
||||||
|
|
||||||
|
|
||||||
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
|
||||||
handler_name = "Ceph"
|
|
||||||
|
|
||||||
def import_image(self, src, dest, protect=False):
|
|
||||||
dest = join_path(self.image_base, dest)
|
|
||||||
import_command = ["rbd", "import", src, dest]
|
|
||||||
commands = [import_command]
|
|
||||||
if protect:
|
|
||||||
snap_create_command = [
|
|
||||||
"rbd",
|
|
||||||
"snap",
|
|
||||||
"create",
|
|
||||||
"{}@protected".format(dest),
|
|
||||||
]
|
|
||||||
snap_protect_command = [
|
|
||||||
"rbd",
|
|
||||||
"snap",
|
|
||||||
"protect",
|
|
||||||
"{}@protected".format(dest),
|
|
||||||
]
|
|
||||||
commands.append(snap_create_command)
|
|
||||||
commands.append(snap_protect_command)
|
|
||||||
|
|
||||||
result = True
|
|
||||||
for command in commands:
|
|
||||||
result = result and self.execute_command(command)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def make_vm_image(self, src, dest):
|
|
||||||
src = join_path(self.image_base, src)
|
|
||||||
dest = join_path(self.vm_base, dest)
|
|
||||||
|
|
||||||
command = ["rbd", "clone", "{}@protected".format(src), dest]
|
|
||||||
return self.execute_command(command)
|
|
||||||
|
|
||||||
def resize_vm_image(self, path, size):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
command = ["rbd", "resize", path, "--size", size]
|
|
||||||
return self.execute_command(command)
|
|
||||||
|
|
||||||
def delete_vm_image(self, path):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
command = ["rbd", "rm", path]
|
|
||||||
return self.execute_command(command)
|
|
||||||
|
|
||||||
def vm_path_string(self, path):
|
|
||||||
return join_path(self.vm_base, path)
|
|
||||||
|
|
||||||
def qemu_path_string(self, path):
|
|
||||||
return "rbd:{}".format(self.vm_path_string(path))
|
|
||||||
|
|
||||||
def is_vm_image_exists(self, path):
|
|
||||||
path = join_path(self.vm_base, path)
|
|
||||||
command = ["rbd", "info", path]
|
|
||||||
return self.execute_command(command, report=False)
|
|
||||||
|
|
||||||
|
|
||||||
def get_storage_handler():
|
|
||||||
__storage_backend = shared.shared.settings["storage"]["storage_backend"]
|
|
||||||
if __storage_backend == "filesystem":
|
|
||||||
return FileSystemBasedImageStorageHandler(
|
|
||||||
vm_base=shared.shared.settings["storage"]["vm_dir"],
|
|
||||||
image_base=shared.shared.settings["storage"]["image_dir"],
|
|
||||||
)
|
|
||||||
elif __storage_backend == "ceph":
|
|
||||||
return CEPHBasedImageStorageHandler(
|
|
||||||
vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
|
|
||||||
image_base=shared.shared.settings["storage"]["ceph_image_pool"],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise Exception("Unknown Image Storage Handler")
|
|
||||||
|
|
@ -1,102 +0,0 @@
|
||||||
from contextlib import contextmanager
|
|
||||||
from datetime import datetime
|
|
||||||
from os.path import join
|
|
||||||
|
|
||||||
from .classes import SpecificEtcdEntryBase
|
|
||||||
|
|
||||||
|
|
||||||
class VMStatus:
|
|
||||||
stopped = "STOPPED" # After requested_shutdown
|
|
||||||
killed = "KILLED" # either host died or vm died itself
|
|
||||||
running = "RUNNING"
|
|
||||||
error = "ERROR" # An error occurred that cannot be resolved automatically
|
|
||||||
|
|
||||||
|
|
||||||
def declare_stopped(vm):
|
|
||||||
vm["hostname"] = ""
|
|
||||||
vm["in_migration"] = False
|
|
||||||
vm["status"] = VMStatus.stopped
|
|
||||||
|
|
||||||
|
|
||||||
class VMEntry(SpecificEtcdEntryBase):
|
|
||||||
def __init__(self, e):
|
|
||||||
self.owner = None # type: str
|
|
||||||
self.specs = None # type: dict
|
|
||||||
self.hostname = None # type: str
|
|
||||||
self.status = None # type: str
|
|
||||||
self.image_uuid = None # type: str
|
|
||||||
self.log = None # type: list
|
|
||||||
self.in_migration = None # type: bool
|
|
||||||
|
|
||||||
super().__init__(e)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uuid(self):
|
|
||||||
return self.key.split("/")[-1]
|
|
||||||
|
|
||||||
def declare_killed(self):
|
|
||||||
self.hostname = ""
|
|
||||||
self.in_migration = False
|
|
||||||
if self.status == VMStatus.running:
|
|
||||||
self.status = VMStatus.killed
|
|
||||||
|
|
||||||
def declare_stopped(self):
|
|
||||||
self.hostname = ""
|
|
||||||
self.in_migration = False
|
|
||||||
self.status = VMStatus.stopped
|
|
||||||
|
|
||||||
def add_log(self, msg):
|
|
||||||
self.log = self.log[:5]
|
|
||||||
self.log.append(
|
|
||||||
"{} - {}".format(datetime.now().isoformat(), msg)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VmPool:
|
|
||||||
def __init__(self, etcd_client, vm_prefix):
|
|
||||||
self.client = etcd_client
|
|
||||||
self.prefix = vm_prefix
|
|
||||||
|
|
||||||
@property
|
|
||||||
def vms(self):
|
|
||||||
_vms = self.client.get_prefix(self.prefix, value_in_json=True)
|
|
||||||
return [VMEntry(vm) for vm in _vms]
|
|
||||||
|
|
||||||
def by_host(self, host, _vms=None):
|
|
||||||
if _vms is None:
|
|
||||||
_vms = self.vms
|
|
||||||
return list(filter(lambda x: x.hostname == host, _vms))
|
|
||||||
|
|
||||||
def by_status(self, status, _vms=None):
|
|
||||||
if _vms is None:
|
|
||||||
_vms = self.vms
|
|
||||||
return list(filter(lambda x: x.status == status, _vms))
|
|
||||||
|
|
||||||
def by_owner(self, owner, _vms=None):
|
|
||||||
if _vms is None:
|
|
||||||
_vms = self.vms
|
|
||||||
return list(filter(lambda x: x.owner == owner, _vms))
|
|
||||||
|
|
||||||
def except_status(self, status, _vms=None):
|
|
||||||
if _vms is None:
|
|
||||||
_vms = self.vms
|
|
||||||
return list(filter(lambda x: x.status != status, _vms))
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
if not key.startswith(self.prefix):
|
|
||||||
key = join(self.prefix, key)
|
|
||||||
v = self.client.get(key, value_in_json=True)
|
|
||||||
if v:
|
|
||||||
return VMEntry(v)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def put(self, obj: VMEntry):
|
|
||||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def get_put(self, key) -> VMEntry:
|
|
||||||
# Updates object at key on exit
|
|
||||||
obj = self.get(key)
|
|
||||||
yield obj
|
|
||||||
if obj:
|
|
||||||
self.put(obj)
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
import os
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('configure', add_help=False)
|
|
||||||
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
|
|
||||||
|
|
||||||
otp_parser = configure_subparsers.add_parser('otp')
|
|
||||||
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
|
|
||||||
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
|
|
||||||
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
|
|
||||||
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
|
|
||||||
|
|
||||||
network_parser = configure_subparsers.add_parser('network')
|
|
||||||
network_parser.add_argument('--prefix-length', required=True, type=int)
|
|
||||||
network_parser.add_argument('--prefix', required=True)
|
|
||||||
network_parser.add_argument('--vxlan-phy-dev', required=True)
|
|
||||||
|
|
||||||
netbox_parser = configure_subparsers.add_parser('netbox')
|
|
||||||
netbox_parser.add_argument('--url', required=True)
|
|
||||||
netbox_parser.add_argument('--token', required=True)
|
|
||||||
|
|
||||||
ssh_parser = configure_subparsers.add_parser('ssh')
|
|
||||||
ssh_parser.add_argument('--username', default='root')
|
|
||||||
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
|
|
||||||
|
|
||||||
storage_parser = configure_subparsers.add_parser('storage')
|
|
||||||
storage_parser.add_argument('--file-dir', required=True)
|
|
||||||
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
|
|
||||||
|
|
||||||
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
|
|
||||||
filesystem_storage_parser.add_argument('--vm-dir', required=True)
|
|
||||||
filesystem_storage_parser.add_argument('--image-dir', required=True)
|
|
||||||
|
|
||||||
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
|
|
||||||
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
|
|
||||||
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
|
|
||||||
|
|
||||||
|
|
||||||
def update_config(section, kwargs):
|
|
||||||
uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
|
|
||||||
if not uncloud_config:
|
|
||||||
uncloud_config = {}
|
|
||||||
else:
|
|
||||||
uncloud_config = uncloud_config.value
|
|
||||||
|
|
||||||
uncloud_config[section] = kwargs
|
|
||||||
shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
subcommand = arguments['subcommand']
|
|
||||||
if not subcommand:
|
|
||||||
arg_parser.print_help()
|
|
||||||
else:
|
|
||||||
update_config(subcommand, arguments)
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
@ -1,85 +0,0 @@
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import subprocess as sp
|
|
||||||
import time
|
|
||||||
import argparse
|
|
||||||
import bitmath
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from . import logger
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
|
|
||||||
arg_parser.add_argument('--hostname', required=True)
|
|
||||||
|
|
||||||
|
|
||||||
def sha512sum(file: str):
|
|
||||||
"""Use sha512sum utility to compute sha512 sum of arg:file
|
|
||||||
|
|
||||||
IF arg:file does not exists:
|
|
||||||
raise FileNotFoundError exception
|
|
||||||
ELSE IF sum successfully computer:
|
|
||||||
return computed sha512 sum
|
|
||||||
ELSE:
|
|
||||||
return None
|
|
||||||
"""
|
|
||||||
if not isinstance(file, str):
|
|
||||||
raise TypeError
|
|
||||||
try:
|
|
||||||
output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
|
|
||||||
except sp.CalledProcessError as e:
|
|
||||||
error = e.stderr.decode('utf-8')
|
|
||||||
if 'No such file or directory' in error:
|
|
||||||
raise FileNotFoundError from None
|
|
||||||
else:
|
|
||||||
output = output.decode('utf-8').strip()
|
|
||||||
output = output.split(' ')
|
|
||||||
return output[0]
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def track_file(file, base_dir, host):
|
|
||||||
file_path = file.relative_to(base_dir)
|
|
||||||
file_str = str(file)
|
|
||||||
# Get Username
|
|
||||||
try:
|
|
||||||
owner = file_path.parts[0]
|
|
||||||
except IndexError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
file_path = file_path.relative_to(owner)
|
|
||||||
creation_date = time.ctime(os.stat(file_str).st_ctime)
|
|
||||||
|
|
||||||
entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
|
|
||||||
entry_value = {
|
|
||||||
'filename': str(file_path),
|
|
||||||
'owner': owner,
|
|
||||||
'sha512sum': sha512sum(file_str),
|
|
||||||
'creation_date': creation_date,
|
|
||||||
'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
|
|
||||||
'host': host
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info('Tracking %s', file_str)
|
|
||||||
|
|
||||||
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
hostname = arguments['hostname']
|
|
||||||
base_dir = shared.settings['storage']['file_dir']
|
|
||||||
# Recursively Get All Files and Folder below BASE_DIR
|
|
||||||
files = glob.glob('{}/**'.format(base_dir), recursive=True)
|
|
||||||
files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
|
|
||||||
|
|
||||||
# Files that are already tracked
|
|
||||||
tracked_files = [
|
|
||||||
pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
|
|
||||||
for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
|
|
||||||
if f.value['host'] == hostname
|
|
||||||
]
|
|
||||||
untracked_files = set(files) - set(tracked_files)
|
|
||||||
for file in untracked_files:
|
|
||||||
track_file(file, base_dir, hostname)
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
This directory contains unfinishe hacks / inspirations
|
|
||||||
* firewalling / networking in ucloud
|
|
||||||
** automatically route a network per VM - /64?
|
|
||||||
** nft: one chain per VM on each vm host (?)
|
|
||||||
*** might have scaling issues?
|
|
||||||
** firewall rules on each VM host
|
|
||||||
- mac filtering:
|
|
||||||
* To add / block
|
|
||||||
** TODO arp poisoning
|
|
||||||
** TODO ndp "poisoning"
|
|
||||||
** TODO ipv4 dhcp server
|
|
||||||
*** drop dhcpv4 requests
|
|
||||||
*** drop dhcpv4 answers
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
HOSTNAME=server1.place10
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
class Config(object):
|
|
||||||
def __init__(self, arguments):
|
|
||||||
""" read arguments dicts as a base """
|
|
||||||
|
|
||||||
self.arguments = arguments
|
|
||||||
|
|
||||||
# Split them so *etcd_args can be used and we can
|
|
||||||
# iterate over etcd_hosts
|
|
||||||
self.etcd_hosts = [ arguments['etcd_host'] ]
|
|
||||||
self.etcd_args = {
|
|
||||||
'ca_cert': arguments['etcd_ca_cert'],
|
|
||||||
'cert_cert': arguments['etcd_cert_cert'],
|
|
||||||
'cert_key': arguments['etcd_cert_key'],
|
|
||||||
# 'user': None,
|
|
||||||
# 'password': None
|
|
||||||
}
|
|
||||||
self.etcd_prefix = '/nicohack/'
|
|
||||||
|
|
@ -1,149 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
import etcd3
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import datetime
|
|
||||||
import re
|
|
||||||
|
|
||||||
from functools import wraps
|
|
||||||
from uncloud import UncloudException
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def db_logentry(message):
|
|
||||||
timestamp = datetime.datetime.now()
|
|
||||||
return {
|
|
||||||
"timestamp": str(timestamp),
|
|
||||||
"message": message
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def readable_errors(func):
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except etcd3.exceptions.ConnectionFailedError as e:
|
|
||||||
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
|
|
||||||
except etcd3.exceptions.ConnectionTimeoutError as e:
|
|
||||||
raise UncloudException('etcd connection timeout. {}'.format(e))
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class DB(object):
|
|
||||||
def __init__(self, config, prefix="/"):
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
# Root for everything
|
|
||||||
self.base_prefix= '/nicohack'
|
|
||||||
|
|
||||||
# Can be set from outside
|
|
||||||
self.prefix = prefix
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.connect()
|
|
||||||
except FileNotFoundError as e:
|
|
||||||
raise UncloudException("Is the path to the etcd certs correct? {}".format(e))
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def connect(self):
|
|
||||||
self._db_clients = []
|
|
||||||
for endpoint in self.config.etcd_hosts:
|
|
||||||
client = etcd3.client(host=endpoint, **self.config.etcd_args)
|
|
||||||
self._db_clients.append(client)
|
|
||||||
|
|
||||||
def realkey(self, key):
|
|
||||||
return "{}{}/{}".format(self.base_prefix,
|
|
||||||
self.prefix,
|
|
||||||
key)
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def get(self, key, as_json=False, **kwargs):
|
|
||||||
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
|
|
||||||
|
|
||||||
if as_json:
|
|
||||||
value = json.loads(value)
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def get_prefix(self, key, as_json=False, **kwargs):
|
|
||||||
for value, meta in self._db_clients[0].get_prefix(self.realkey(key), **kwargs):
|
|
||||||
k = meta.key.decode("utf-8")
|
|
||||||
value = value.decode("utf-8")
|
|
||||||
if as_json:
|
|
||||||
value = json.loads(value)
|
|
||||||
|
|
||||||
yield (k, value)
|
|
||||||
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def set(self, key, value, as_json=False, **kwargs):
|
|
||||||
if as_json:
|
|
||||||
value = json.dumps(value)
|
|
||||||
|
|
||||||
log.debug("Setting {} = {}".format(self.realkey(key), value))
|
|
||||||
# FIXME: iterate over clients in case of failure ?
|
|
||||||
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def list_and_filter(self, key, filter_key=None, filter_regexp=None):
|
|
||||||
for k,v in self.get_prefix(key, as_json=True):
|
|
||||||
|
|
||||||
if filter_key and filter_regexp:
|
|
||||||
if filter_key in v:
|
|
||||||
if re.match(filter_regexp, v[filter_key]):
|
|
||||||
yield v
|
|
||||||
else:
|
|
||||||
yield v
|
|
||||||
|
|
||||||
|
|
||||||
@readable_errors
|
|
||||||
def increment(self, key, **kwargs):
|
|
||||||
print(self.realkey(key))
|
|
||||||
|
|
||||||
|
|
||||||
print("prelock")
|
|
||||||
lock = self._db_clients[0].lock('/nicohack/foo')
|
|
||||||
print("prelockacq")
|
|
||||||
lock.acquire()
|
|
||||||
print("prelockrelease")
|
|
||||||
lock.release()
|
|
||||||
|
|
||||||
with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
|
|
||||||
print("in lock")
|
|
||||||
pass
|
|
||||||
|
|
||||||
# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
|
|
||||||
# self.set(self.realkey(key), str(value + 1), **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
endpoints = [ "https://etcd1.ungleich.ch:2379",
|
|
||||||
"https://etcd2.ungleich.ch:2379",
|
|
||||||
"https://etcd3.ungleich.ch:2379" ]
|
|
||||||
|
|
||||||
db = DB(url=endpoints)
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
*.iso
|
|
||||||
radvdpid
|
|
||||||
foo
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
|
|
||||||
--key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
|
|
||||||
--cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
|
|
||||||
--endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
echo $@
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
dev=$1; shift
|
|
||||||
|
|
||||||
# bridge is setup from outside
|
|
||||||
ip link set dev "$dev" master ${bridge}
|
|
||||||
ip link set dev "$dev" up
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
000000000252
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
02:00
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
netid=100
|
|
||||||
dev=wlp2s0
|
|
||||||
dev=wlp0s20f3
|
|
||||||
#dev=wlan0
|
|
||||||
|
|
||||||
ip=2a0a:e5c1:111:888::48/64
|
|
||||||
vxlandev=vxlan${netid}
|
|
||||||
bridgedev=br${netid}
|
|
||||||
|
|
||||||
ip -6 link add ${vxlandev} type vxlan \
|
|
||||||
id ${netid} \
|
|
||||||
dstport 4789 \
|
|
||||||
group ff05::${netid} \
|
|
||||||
dev ${dev} \
|
|
||||||
ttl 5
|
|
||||||
|
|
||||||
ip link set ${vxlandev} up
|
|
||||||
|
|
||||||
|
|
||||||
ip link add ${bridgedev} type bridge
|
|
||||||
ip link set ${bridgedev} up
|
|
||||||
|
|
||||||
ip link set ${vxlandev} master ${bridgedev} up
|
|
||||||
|
|
||||||
ip addr add ${ip} dev ${bridgedev}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
flush ruleset
|
|
||||||
|
|
||||||
table bridge filter {
|
|
||||||
chain prerouting {
|
|
||||||
type filter hook prerouting priority 0;
|
|
||||||
policy accept;
|
|
||||||
|
|
||||||
ibrname br100 jump br100
|
|
||||||
}
|
|
||||||
|
|
||||||
chain br100 {
|
|
||||||
# Allow all incoming traffic from outside
|
|
||||||
iifname vxlan100 accept
|
|
||||||
|
|
||||||
# Default blocks: router advertisements, dhcpv6, dhcpv4
|
|
||||||
icmpv6 type nd-router-advert drop
|
|
||||||
ip6 version 6 udp sport 547 drop
|
|
||||||
ip version 4 udp sport 67 drop
|
|
||||||
|
|
||||||
jump br100_vmlist
|
|
||||||
drop
|
|
||||||
}
|
|
||||||
chain br100_vmlist {
|
|
||||||
# VM1
|
|
||||||
iifname tap1 ether saddr 02:00:f0:a9:c4:4e ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44e accept
|
|
||||||
|
|
||||||
# VM2
|
|
||||||
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:888:0:f0ff:fea9:c44f accept
|
|
||||||
iifname v343a-0 ether saddr 02:00:f0:a9:c4:4f ip6 saddr 2a0a:e5c1:111:1234::/64 accept
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
flush ruleset
|
|
||||||
|
|
||||||
table bridge filter {
|
|
||||||
chain prerouting {
|
|
||||||
type filter hook prerouting priority 0;
|
|
||||||
policy accept;
|
|
||||||
|
|
||||||
ibrname br100 jump netpublic
|
|
||||||
}
|
|
||||||
|
|
||||||
chain netpublic {
|
|
||||||
iifname vxlan100 jump from_uncloud
|
|
||||||
|
|
||||||
# Default blocks: router advertisements, dhcpv6, dhcpv4
|
|
||||||
icmpv6 type nd-router-advert drop
|
|
||||||
ip6 version 6 udp sport 547 drop
|
|
||||||
ip version 4 udp sport 67 drop
|
|
||||||
|
|
||||||
# Individual blocks
|
|
||||||
# iifname tap1 jump vm1
|
|
||||||
}
|
|
||||||
|
|
||||||
chain vm1 {
|
|
||||||
ether saddr != 02:00:f0:a9:c4:4e drop
|
|
||||||
ip6 saddr != 2a0a:e5c1:111:888:0:f0ff:fea9:c44e drop
|
|
||||||
}
|
|
||||||
|
|
||||||
chain from_uncloud {
|
|
||||||
accept
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# table ip6 filter {
|
|
||||||
# chain forward {
|
|
||||||
# type filter hook forward priority 0;
|
|
||||||
|
|
||||||
# # policy drop;
|
|
||||||
|
|
||||||
# ct state established,related accept;
|
|
||||||
|
|
||||||
# }
|
|
||||||
|
|
||||||
# }
|
|
||||||
|
|
||||||
# table ip filter {
|
|
||||||
# chain input {
|
|
||||||
# type filter hook input priority filter; policy drop;
|
|
||||||
# iif "lo" accept
|
|
||||||
# icmp type { echo-reply, destination-unreachable, source-quench, redirect, echo-request, router-advertisement, router-solicitation, time-exceeded, parameter-problem, timestamp-request, timestamp-reply, info-request, info-reply, address-mask-request, address-mask-reply } accept
|
|
||||||
# ct state established,related accept
|
|
||||||
# tcp dport { 22 } accept
|
|
||||||
# log prefix "firewall-ipv4: "
|
|
||||||
# udp sport 67 drop
|
|
||||||
# }
|
|
||||||
|
|
||||||
# chain forward {
|
|
||||||
# type filter hook forward priority filter; policy drop;
|
|
||||||
# log prefix "firewall-ipv4: "
|
|
||||||
# }
|
|
||||||
|
|
||||||
# chain output {
|
|
||||||
# type filter hook output priority filter; policy accept;
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
interface br100
|
|
||||||
{
|
|
||||||
AdvSendAdvert on;
|
|
||||||
MinRtrAdvInterval 3;
|
|
||||||
MaxRtrAdvInterval 5;
|
|
||||||
AdvDefaultLifetime 3600;
|
|
||||||
|
|
||||||
prefix 2a0a:e5c1:111:888::/64 {
|
|
||||||
};
|
|
||||||
|
|
||||||
RDNSS 2a0a:e5c0::3 2a0a:e5c0::4 { AdvRDNSSLifetime 6000; };
|
|
||||||
DNSSL place7.ungleich.ch { AdvDNSSLLifetime 6000; } ;
|
|
||||||
};
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
radvd -C ./radvd.conf -n -p ./radvdpid
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
vmid=$1; shift
|
|
||||||
|
|
||||||
qemu=/usr/bin/qemu-system-x86_64
|
|
||||||
|
|
||||||
accel=kvm
|
|
||||||
#accel=tcg
|
|
||||||
|
|
||||||
memory=1024
|
|
||||||
cores=2
|
|
||||||
uuid=732e08c7-84f8-4d43-9571-263db4f80080
|
|
||||||
|
|
||||||
export bridge=br100
|
|
||||||
|
|
||||||
$qemu -name uc${vmid} \
|
|
||||||
-machine pc,accel=${accel} \
|
|
||||||
-m ${memory} \
|
|
||||||
-smp ${cores} \
|
|
||||||
-uuid ${uuid} \
|
|
||||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
|
||||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
|
||||||
-netdev tap,id=netmain,script=./ifup.sh \
|
|
||||||
-device virtio-net-pci,netdev=netmain,id=net0,mac=02:00:f0:a9:c4:4e
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# if [ $# -ne 1 ]; then
|
|
||||||
# echo "$0: owner"
|
|
||||||
# exit 1
|
|
||||||
# fi
|
|
||||||
|
|
||||||
qemu=/usr/bin/qemu-system-x86_64
|
|
||||||
|
|
||||||
accel=kvm
|
|
||||||
#accel=tcg
|
|
||||||
|
|
||||||
memory=1024
|
|
||||||
cores=2
|
|
||||||
uuid=$(uuidgen)
|
|
||||||
mac=$(./mac-gen.py)
|
|
||||||
owner=nico
|
|
||||||
|
|
||||||
export bridge=br100
|
|
||||||
|
|
||||||
set -x
|
|
||||||
$qemu -name "uncloud-${uuid}" \
|
|
||||||
-machine pc,accel=${accel} \
|
|
||||||
-m ${memory} \
|
|
||||||
-smp ${cores} \
|
|
||||||
-uuid ${uuid} \
|
|
||||||
-drive file=alpine-virt-3.11.2-x86_64.iso,media=cdrom \
|
|
||||||
-netdev tap,id=netmain,script=./ifup.sh,downscript=./ifdown.sh \
|
|
||||||
-device virtio-net-pci,netdev=netmain,id=net0,mac=${mac}
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from uncloud.hack.db import DB
|
|
||||||
from uncloud import UncloudException
|
|
||||||
|
|
||||||
class Host(object):
|
|
||||||
def __init__(self, config, db_entry=None):
|
|
||||||
self.config = config
|
|
||||||
self.db = DB(self.config, prefix="/hosts")
|
|
||||||
|
|
||||||
if db_entry:
|
|
||||||
self.db_entry = db_entry
|
|
||||||
|
|
||||||
|
|
||||||
def list_hosts(self, filter_key=None, filter_regexp=None):
|
|
||||||
""" Return list of all hosts """
|
|
||||||
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
|
|
||||||
yield self.__class__(self.config, db_entry=entry)
|
|
||||||
|
|
||||||
def cmdline_add_host(self):
|
|
||||||
""" FIXME: make this a bit smarter and less redundant """
|
|
||||||
|
|
||||||
for required_arg in [
|
|
||||||
'add_vm_host',
|
|
||||||
'max_cores_per_vm',
|
|
||||||
'max_cores_total',
|
|
||||||
'max_memory_in_gb' ]:
|
|
||||||
if not required_arg in self.config.arguments:
|
|
||||||
raise UncloudException("Missing argument: {}".format(required_arg))
|
|
||||||
|
|
||||||
return self.add_host(
|
|
||||||
self.config.arguments['add_vm_host'],
|
|
||||||
self.config.arguments['max_cores_per_vm'],
|
|
||||||
self.config.arguments['max_cores_total'],
|
|
||||||
self.config.arguments['max_memory_in_gb'])
|
|
||||||
|
|
||||||
|
|
||||||
def add_host(self,
|
|
||||||
hostname,
|
|
||||||
max_cores_per_vm,
|
|
||||||
max_cores_total,
|
|
||||||
max_memory_in_gb):
|
|
||||||
|
|
||||||
db_entry = {}
|
|
||||||
db_entry['uuid'] = str(uuid.uuid4())
|
|
||||||
db_entry['hostname'] = hostname
|
|
||||||
db_entry['max_cores_per_vm'] = max_cores_per_vm
|
|
||||||
db_entry['max_cores_total'] = max_cores_total
|
|
||||||
db_entry['max_memory_in_gb'] = max_memory_in_gb
|
|
||||||
db_entry["db_version"] = 1
|
|
||||||
db_entry["log"] = []
|
|
||||||
|
|
||||||
self.db.set(db_entry['uuid'], db_entry, as_json=True)
|
|
||||||
|
|
||||||
return self.__class__(self.config, db_entry)
|
|
||||||
|
|
@ -1,104 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2012 Nico Schottelius (nico-cinv at schottelius.org)
|
|
||||||
#
|
|
||||||
# This file is part of cinv.
|
|
||||||
#
|
|
||||||
# cinv is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# cinv is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with cinv. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
import os.path
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from uncloud import UncloudException
|
|
||||||
from uncloud.hack.db import DB
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MAC(object):
|
|
||||||
def __init__(self, config):
|
|
||||||
self.config = config
|
|
||||||
self.no_db = self.config.arguments['no_db']
|
|
||||||
if not self.no_db:
|
|
||||||
self.db = DB(config, prefix="/mac")
|
|
||||||
|
|
||||||
self.prefix = 0x420000000000
|
|
||||||
self._number = 0 # Not set by default
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def validate_mac(mac):
|
|
||||||
if not re.match(r'([0-9A-F]{2}[-:]){5}[0-9A-F]{2}$', mac, re.I):
|
|
||||||
raise UncloudException("Not a valid mac address: %s" % mac)
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def last_used_index(self):
|
|
||||||
if not self.no_db:
|
|
||||||
value = self.db.get("last_used_index")
|
|
||||||
if not value:
|
|
||||||
self.db.set("last_used_index", "0")
|
|
||||||
value = self.db.get("last_used_index")
|
|
||||||
|
|
||||||
else:
|
|
||||||
value = "0"
|
|
||||||
|
|
||||||
return int(value)
|
|
||||||
|
|
||||||
def last_used_mac(self):
|
|
||||||
return self.int_to_mac(self.prefix + self.last_used_index())
|
|
||||||
|
|
||||||
def to_colon_format(self):
|
|
||||||
b = self._number.to_bytes(6, byteorder="big")
|
|
||||||
return ':'.join(format(s, '02x') for s in b)
|
|
||||||
|
|
||||||
def to_str_format(self):
|
|
||||||
b = self._number.to_bytes(6, byteorder="big")
|
|
||||||
return ''.join(format(s, '02x') for s in b)
|
|
||||||
|
|
||||||
def create(self):
|
|
||||||
last_number = self.last_used_index()
|
|
||||||
|
|
||||||
if last_number == int('0xffffffff', 16):
|
|
||||||
raise UncloudException("Exhausted all possible mac addresses - try to free some")
|
|
||||||
|
|
||||||
next_number = last_number + 1
|
|
||||||
self._number = self.prefix + next_number
|
|
||||||
|
|
||||||
#next_number_string = "{:012x}".format(next_number)
|
|
||||||
#next_mac = self.int_to_mac(next_mac_number)
|
|
||||||
# db_entry = {}
|
|
||||||
# db_entry['vm_uuid'] = vmuuid
|
|
||||||
# db_entry['index'] = next_number
|
|
||||||
# db_entry['mac_address'] = next_mac
|
|
||||||
|
|
||||||
# should be one transaction
|
|
||||||
# self.db.increment("last_used_index")
|
|
||||||
# self.db.set("used/{}".format(next_mac),
|
|
||||||
# db_entry, as_json=True)
|
|
||||||
|
|
||||||
def __int__(self):
|
|
||||||
return self._number
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return self.to_str_format()
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.to_colon_format()
|
|
||||||
|
|
@ -1,186 +0,0 @@
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
|
|
||||||
import ldap3
|
|
||||||
|
|
||||||
|
|
||||||
from uncloud.hack.vm import VM
|
|
||||||
from uncloud.hack.host import Host
|
|
||||||
from uncloud.hack.config import Config
|
|
||||||
from uncloud.hack.mac import MAC
|
|
||||||
from uncloud.hack.net import VXLANBridge, DNSRA
|
|
||||||
|
|
||||||
from uncloud import UncloudException
|
|
||||||
from uncloud.hack.product import ProductOrder
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('hack', add_help=False)
|
|
||||||
#description="Commands that are unfinished - use at own risk")
|
|
||||||
arg_parser.add_argument('--last-used-mac', action='store_true')
|
|
||||||
arg_parser.add_argument('--get-new-mac', action='store_true')
|
|
||||||
|
|
||||||
arg_parser.add_argument('--init-network', help="Initialise networking", action='store_true')
|
|
||||||
arg_parser.add_argument('--create-vxlan', help="Initialise networking", action='store_true')
|
|
||||||
arg_parser.add_argument('--network', help="/64 IPv6 network")
|
|
||||||
arg_parser.add_argument('--vxlan-uplink-device', help="The VXLAN underlay device, i.e. eth0")
|
|
||||||
arg_parser.add_argument('--vni', help="VXLAN ID (decimal)", type=int)
|
|
||||||
arg_parser.add_argument('--run-dns-ra', action='store_true',
|
|
||||||
help="Provide router advertisements and DNS resolution via dnsmasq")
|
|
||||||
arg_parser.add_argument('--use-sudo', help="Use sudo for command requiring root!", action='store_true')
|
|
||||||
|
|
||||||
arg_parser.add_argument('--create-vm', action='store_true')
|
|
||||||
arg_parser.add_argument('--destroy-vm', action='store_true')
|
|
||||||
arg_parser.add_argument('--get-vm-status', action='store_true')
|
|
||||||
arg_parser.add_argument('--get-vm-vnc', action='store_true')
|
|
||||||
arg_parser.add_argument('--list-vms', action='store_true')
|
|
||||||
arg_parser.add_argument('--memory', help="Size of memory (GB)", type=int, default=2)
|
|
||||||
arg_parser.add_argument('--cores', help="Amount of CPU cores", type=int, default=1)
|
|
||||||
arg_parser.add_argument('--image', help="Path (under hackprefix) to OS image")
|
|
||||||
|
|
||||||
arg_parser.add_argument('--image-format', help="Image format: qcow2 or raw", choices=['raw', 'qcow2'])
|
|
||||||
arg_parser.add_argument('--uuid', help="VM UUID")
|
|
||||||
|
|
||||||
arg_parser.add_argument('--no-db', help="Disable connection to etcd. For local testing only!", action='store_true')
|
|
||||||
arg_parser.add_argument('--hackprefix', help="hackprefix, if you need it you know it (it's where the iso is located and ifup/down.sh")
|
|
||||||
|
|
||||||
# order based commands => later to be shifted below "order"
|
|
||||||
arg_parser.add_argument('--order', action='store_true')
|
|
||||||
arg_parser.add_argument('--list-orders', help="List all orders", action='store_true')
|
|
||||||
arg_parser.add_argument('--filter-order-key', help="Which key to filter on")
|
|
||||||
arg_parser.add_argument('--filter-order-regexp', help="Which regexp the value should match")
|
|
||||||
|
|
||||||
arg_parser.add_argument('--process-orders', help="Process all (pending) orders", action='store_true')
|
|
||||||
|
|
||||||
arg_parser.add_argument('--product', choices=["dualstack-vm"])
|
|
||||||
arg_parser.add_argument('--os-image-name', help="Name of OS image (successor to --image)")
|
|
||||||
arg_parser.add_argument('--os-image-size', help="Size of OS image in GB", type=int, default=10)
|
|
||||||
|
|
||||||
arg_parser.add_argument('--username')
|
|
||||||
arg_parser.add_argument('--password')
|
|
||||||
|
|
||||||
arg_parser.add_argument('--api', help="Run the API")
|
|
||||||
arg_parser.add_argument('--mode',
|
|
||||||
choices=["direct", "api", "client"],
|
|
||||||
default="client",
|
|
||||||
help="Directly manipulate etcd, spawn the API server or behave as a client")
|
|
||||||
|
|
||||||
|
|
||||||
arg_parser.add_argument('--add-vm-host', help="Add a host that can run VMs")
|
|
||||||
arg_parser.add_argument('--list-vm-hosts', action='store_true')
|
|
||||||
|
|
||||||
arg_parser.add_argument('--max-cores-per-vm')
|
|
||||||
arg_parser.add_argument('--max-cores-total')
|
|
||||||
arg_parser.add_argument('--max-memory-in-gb')
|
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def authenticate(username, password, totp_token=None):
|
|
||||||
server = ldap3.Server("ldaps://ldap1.ungleich.ch")
|
|
||||||
dn = "uid={},ou=customer,dc=ungleich,dc=ch".format(username)
|
|
||||||
|
|
||||||
log.debug("LDAP: connecting to {} as {}".format(server, dn))
|
|
||||||
|
|
||||||
try:
|
|
||||||
conn = ldap3.Connection(server, dn, password, auto_bind=True)
|
|
||||||
except ldap3.core.exceptions.LDAPBindError as e:
|
|
||||||
raise UncloudException("Credentials not verified by LDAP server: {}".format(e))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def order(config):
|
|
||||||
for required_arg in [ 'product', 'username', 'password' ]:
|
|
||||||
if not config.arguments[required_arg]:
|
|
||||||
raise UncloudException("Missing required argument: {}".format(required_arg))
|
|
||||||
|
|
||||||
if config.arguments['product'] == 'dualstack-vm':
|
|
||||||
for required_arg in [ 'cores', 'memory', 'os_image_name', 'os_image_size' ]:
|
|
||||||
if not config.arguments[required_arg]:
|
|
||||||
raise UncloudException("Missing required argument: {}".format(required_arg))
|
|
||||||
|
|
||||||
log.debug(config.arguments)
|
|
||||||
authenticate(config.arguments['username'], config.arguments['password'])
|
|
||||||
|
|
||||||
# create DB entry for VM
|
|
||||||
vm = VM(config)
|
|
||||||
return vm.product.place_order(owner=config.arguments['username'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
config = Config(arguments)
|
|
||||||
|
|
||||||
if arguments['add_vm_host']:
|
|
||||||
h = Host(config)
|
|
||||||
h.cmdline_add_host()
|
|
||||||
|
|
||||||
if arguments['list_vm_hosts']:
|
|
||||||
h = Host(config)
|
|
||||||
|
|
||||||
for host in h.list_hosts(filter_key=arguments['filter_order_key'],
|
|
||||||
filter_regexp=arguments['filter_order_regexp']):
|
|
||||||
print("Host {}: {}".format(host.db_entry['uuid'], host.db_entry))
|
|
||||||
|
|
||||||
if arguments['order']:
|
|
||||||
print("Created order: {}".format(order(config)))
|
|
||||||
|
|
||||||
if arguments['list_orders']:
|
|
||||||
p = ProductOrder(config)
|
|
||||||
for product_order in p.list_orders(filter_key=arguments['filter_order_key'],
|
|
||||||
filter_regexp=arguments['filter_order_regexp']):
|
|
||||||
print("Order {}: {}".format(product_order.db_entry['uuid'], product_order.db_entry))
|
|
||||||
|
|
||||||
if arguments['process_orders']:
|
|
||||||
p = ProductOrder(config)
|
|
||||||
p.process_orders()
|
|
||||||
|
|
||||||
if arguments['create_vm']:
|
|
||||||
vm = VM(config)
|
|
||||||
vm.create()
|
|
||||||
|
|
||||||
if arguments['destroy_vm']:
|
|
||||||
vm = VM(config)
|
|
||||||
vm.stop()
|
|
||||||
|
|
||||||
if arguments['get_vm_status']:
|
|
||||||
vm = VM(config)
|
|
||||||
vm.status()
|
|
||||||
|
|
||||||
if arguments['get_vm_vnc']:
|
|
||||||
vm = VM(config)
|
|
||||||
vm.vnc_addr()
|
|
||||||
|
|
||||||
if arguments['list_vms']:
|
|
||||||
vm = VM(config)
|
|
||||||
vm.list()
|
|
||||||
|
|
||||||
if arguments['last_used_mac']:
|
|
||||||
m = MAC(config)
|
|
||||||
print(m.last_used_mac())
|
|
||||||
|
|
||||||
if arguments['get_new_mac']:
|
|
||||||
print(MAC(config).get_next())
|
|
||||||
|
|
||||||
#if arguments['init_network']:
|
|
||||||
if arguments['create_vxlan']:
|
|
||||||
if not arguments['network'] or not arguments['vni'] or not arguments['vxlan_uplink_device']:
|
|
||||||
raise UncloudException("Initialising the network requires an IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
|
||||||
vb = VXLANBridge(vni=arguments['vni'],
|
|
||||||
route=arguments['network'],
|
|
||||||
uplinkdev=arguments['vxlan_uplink_device'],
|
|
||||||
use_sudo=arguments['use_sudo'])
|
|
||||||
vb._setup_vxlan()
|
|
||||||
vb._setup_bridge()
|
|
||||||
vb._add_vxlan_to_bridge()
|
|
||||||
vb._route_network()
|
|
||||||
|
|
||||||
if arguments['run_dns_ra']:
|
|
||||||
if not arguments['network'] or not arguments['vni']:
|
|
||||||
raise UncloudException("Providing DNS/RAs requires a /64 IPv6 network and a VNI. You can use fd00::/64 and vni=1 for testing (non production!)")
|
|
||||||
|
|
||||||
dnsra = DNSRA(route=arguments['network'],
|
|
||||||
vni=arguments['vni'],
|
|
||||||
use_sudo=arguments['use_sudo'])
|
|
||||||
dnsra._setup_dnsmasq()
|
|
||||||
|
|
@ -1,116 +0,0 @@
|
||||||
import subprocess
|
|
||||||
import ipaddress
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
from uncloud import UncloudException
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class VXLANBridge(object):
|
|
||||||
cmd_create_vxlan = "{sudo}ip -6 link add {vxlandev} type vxlan id {vni_dec} dstport 4789 group {multicast_address} dev {uplinkdev} ttl 5"
|
|
||||||
cmd_up_dev = "{sudo}ip link set {dev} up"
|
|
||||||
cmd_create_bridge="{sudo}ip link add {bridgedev} type bridge"
|
|
||||||
cmd_add_to_bridge="{sudo}ip link set {vxlandev} master {bridgedev} up"
|
|
||||||
cmd_add_addr="{sudo}ip addr add {ip} dev {bridgedev}"
|
|
||||||
cmd_add_route_dev="{sudo}ip route add {route} dev {bridgedev}"
|
|
||||||
|
|
||||||
# VXLAN ids are at maximum 24 bit - use a /104
|
|
||||||
multicast_network = ipaddress.IPv6Network("ff05::/104")
|
|
||||||
max_vni = (2**24)-1
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
vni,
|
|
||||||
uplinkdev,
|
|
||||||
route=None,
|
|
||||||
use_sudo=False):
|
|
||||||
self.config = {}
|
|
||||||
|
|
||||||
if vni > self.max_vni:
|
|
||||||
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
|
||||||
|
|
||||||
if use_sudo:
|
|
||||||
self.config['sudo'] = 'sudo '
|
|
||||||
else:
|
|
||||||
self.config['sudo'] = ''
|
|
||||||
|
|
||||||
self.config['vni_dec'] = vni
|
|
||||||
self.config['vni_hex'] = "{:x}".format(vni)
|
|
||||||
self.config['multicast_address'] = self.multicast_network[vni]
|
|
||||||
|
|
||||||
self.config['route_network'] = ipaddress.IPv6Network(route)
|
|
||||||
self.config['route'] = route
|
|
||||||
|
|
||||||
self.config['uplinkdev'] = uplinkdev
|
|
||||||
self.config['vxlandev'] = "vx{}".format(self.config['vni_hex'])
|
|
||||||
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
|
||||||
|
|
||||||
|
|
||||||
def setup_networking(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _setup_vxlan(self):
|
|
||||||
self._execute_cmd(self.cmd_create_vxlan)
|
|
||||||
self._execute_cmd(self.cmd_up_dev, dev=self.config['vxlandev'])
|
|
||||||
|
|
||||||
def _setup_bridge(self):
|
|
||||||
self._execute_cmd(self.cmd_create_bridge)
|
|
||||||
self._execute_cmd(self.cmd_up_dev, dev=self.config['bridgedev'])
|
|
||||||
|
|
||||||
def _route_network(self):
|
|
||||||
self._execute_cmd(self.cmd_add_route_dev)
|
|
||||||
|
|
||||||
def _add_vxlan_to_bridge(self):
|
|
||||||
self._execute_cmd(self.cmd_add_to_bridge)
|
|
||||||
|
|
||||||
def _execute_cmd(self, cmd_string, **kwargs):
|
|
||||||
cmd = cmd_string.format(**self.config, **kwargs)
|
|
||||||
log.info("Executing: {}".format(cmd))
|
|
||||||
subprocess.run(cmd.split())
|
|
||||||
|
|
||||||
class ManagementBridge(VXLANBridge):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class DNSRA(object):
|
|
||||||
# VXLAN ids are at maximum 24 bit
|
|
||||||
max_vni = (2**24)-1
|
|
||||||
|
|
||||||
|
|
||||||
# Command to start dnsmasq
|
|
||||||
cmd_start_dnsmasq="{sudo}dnsmasq --interface={bridgedev} --bind-interfaces --dhcp-range={route},ra-only,infinite --enable-ra --no-daemon"
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
vni,
|
|
||||||
route=None,
|
|
||||||
use_sudo=False):
|
|
||||||
self.config = {}
|
|
||||||
|
|
||||||
if vni > self.max_vni:
|
|
||||||
raise UncloudException("VNI must be in the range of 0 .. {}".format(self.max_vni))
|
|
||||||
|
|
||||||
if use_sudo:
|
|
||||||
self.config['sudo'] = 'sudo '
|
|
||||||
else:
|
|
||||||
self.config['sudo'] = ''
|
|
||||||
|
|
||||||
#TODO: remove if not needed
|
|
||||||
#self.config['vni_dec'] = vni
|
|
||||||
self.config['vni_hex'] = "{:x}".format(vni)
|
|
||||||
|
|
||||||
# dnsmasq only wants the network without the prefix, therefore, cut it off
|
|
||||||
self.config['route'] = ipaddress.IPv6Network(route).network_address
|
|
||||||
self.config['bridgedev'] = "br{}".format(self.config['vni_hex'])
|
|
||||||
|
|
||||||
def _setup_dnsmasq(self):
|
|
||||||
self._execute_cmd(self.cmd_start_dnsmasq)
|
|
||||||
|
|
||||||
def _execute_cmd(self, cmd_string, **kwargs):
|
|
||||||
cmd = cmd_string.format(**self.config, **kwargs)
|
|
||||||
log.info("Executing: {}".format(cmd))
|
|
||||||
print("Executing: {}".format(cmd))
|
|
||||||
subprocess.run(cmd.split())
|
|
||||||
|
|
||||||
class Firewall(object):
|
|
||||||
pass
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
||||||
flush ruleset
|
|
||||||
|
|
||||||
table bridge filter {
|
|
||||||
chain prerouting {
|
|
||||||
type filter hook prerouting priority 0;
|
|
||||||
policy accept;
|
|
||||||
ibrname br100 jump netpublic
|
|
||||||
}
|
|
||||||
chain netpublic {
|
|
||||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
table ip6 filter {
|
|
||||||
chain forward {
|
|
||||||
type filter hook forward priority 0;
|
|
||||||
|
|
||||||
# this would be nice...
|
|
||||||
policy drop;
|
|
||||||
|
|
||||||
ct state established,related accept;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
chain prerouting {
|
|
||||||
type filter hook prerouting priority 0;
|
|
||||||
policy accept;
|
|
||||||
|
|
||||||
# not supporting in here!
|
|
||||||
|
|
||||||
|
|
||||||
iifname vmXXXX jump vmXXXX
|
|
||||||
iifname vmYYYY jump vmYYYY
|
|
||||||
|
|
||||||
iifname brXX jump brXX
|
|
||||||
|
|
||||||
iifname vxlan100 jump vxlan100
|
|
||||||
iifname br100 jump br100
|
|
||||||
}
|
|
||||||
|
|
||||||
# 1. Rules per VM (names: vmXXXXX?
|
|
||||||
# 2. Rules per network (names: vxlanXXXX, what about non vxlan?)
|
|
||||||
# 3. Rules per bridge:
|
|
||||||
# vxlanXX is inside brXX
|
|
||||||
# This is effectively a network filter
|
|
||||||
# 4. Kill all malicous traffic:
|
|
||||||
# - router advertisements from VMs in which they should not announce RAs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
chain vxlan100 {
|
|
||||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
|
||||||
}
|
|
||||||
chain br100 {
|
|
||||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } log
|
|
||||||
}
|
|
||||||
|
|
||||||
chain netpublic {
|
|
||||||
# drop router advertisements that don't come from us
|
|
||||||
iifname != vxlanpublic icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
|
||||||
# icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
# This vlan
|
|
||||||
chain brXX {
|
|
||||||
ip6 saddr != 2001:db8:1::/64 drop;
|
|
||||||
}
|
|
||||||
|
|
||||||
chain vmXXXX {
|
|
||||||
ether saddr != 00:0f:54:0c:11:04 drop;
|
|
||||||
ip6 saddr != 2001:db8:1:000f::540c:11ff:fe04 drop;
|
|
||||||
jump drop_from_vm_without_ipam
|
|
||||||
}
|
|
||||||
|
|
||||||
chain net_2a0ae5c05something {
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
chain drop_from_vm_without_ipam {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
chain vmYYYY {
|
|
||||||
ether saddr != 00:0f:54:0c:11:05 drop;
|
|
||||||
jump drop_from_vm_with_ipam
|
|
||||||
}
|
|
||||||
|
|
||||||
# Drop stuff from every VM
|
|
||||||
chain drop_from_vm_with_ipam {
|
|
||||||
icmpv6 type {nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect } drop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,206 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import uuid
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import importlib
|
|
||||||
|
|
||||||
from uncloud import UncloudException
|
|
||||||
from uncloud.hack.db import DB, db_logentry
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
class ProductOrder(object):
|
|
||||||
def __init__(self, config, product_entry=None, db_entry=None):
|
|
||||||
self.config = config
|
|
||||||
self.db = DB(self.config, prefix="/orders")
|
|
||||||
self.db_entry = {}
|
|
||||||
self.db_entry["product"] = product_entry
|
|
||||||
|
|
||||||
# Overwrite if we are loading an existing product order
|
|
||||||
if db_entry:
|
|
||||||
self.db_entry = db_entry
|
|
||||||
|
|
||||||
# FIXME: this should return a list of our class!
|
|
||||||
def list_orders(self, filter_key=None, filter_regexp=None):
|
|
||||||
for entry in self.db.list_and_filter("", filter_key, filter_regexp):
|
|
||||||
yield self.__class__(self.config, db_entry=entry)
|
|
||||||
|
|
||||||
|
|
||||||
def set_required_values(self):
|
|
||||||
"""Set values that are required to make the db entry valid"""
|
|
||||||
if not "uuid" in self.db_entry:
|
|
||||||
self.db_entry["uuid"] = str(uuid.uuid4())
|
|
||||||
if not "status" in self.db_entry:
|
|
||||||
self.db_entry["status"] = "NEW"
|
|
||||||
if not "owner" in self.db_entry:
|
|
||||||
self.db_entry["owner"] = "UNKNOWN"
|
|
||||||
if not "log" in self.db_entry:
|
|
||||||
self.db_entry["log"] = []
|
|
||||||
if not "db_version" in self.db_entry:
|
|
||||||
self.db_entry["db_version"] = 1
|
|
||||||
|
|
||||||
def validate_status(self):
|
|
||||||
if "status" in self.db_entry:
|
|
||||||
if self.db_entry["status"] in [ "NEW",
|
|
||||||
"SCHEDULED",
|
|
||||||
"CREATED_ACTIVE",
|
|
||||||
"CANCELLED",
|
|
||||||
"REJECTED" ]:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def order(self):
|
|
||||||
self.set_required_values()
|
|
||||||
if not self.db_entry["status"] == "NEW":
|
|
||||||
raise UncloudException("Cannot re-order same order. Status: {}".format(self.db_entry["status"]))
|
|
||||||
self.db.set(self.db_entry["uuid"], self.db_entry, as_json=True)
|
|
||||||
|
|
||||||
return self.db_entry["uuid"]
|
|
||||||
|
|
||||||
def process_orders(self):
|
|
||||||
"""processing orders can be done stand alone on server side"""
|
|
||||||
for order in self.list_orders():
|
|
||||||
if order.db_entry["status"] == "NEW":
|
|
||||||
log.info("Handling new order: {}".format(order))
|
|
||||||
|
|
||||||
# FIXME: these all should be a transactions! -> fix concurrent access! !
|
|
||||||
if not "log" in order.db_entry:
|
|
||||||
order.db_entry['log'] = []
|
|
||||||
|
|
||||||
is_valid = True
|
|
||||||
# Verify the order entry
|
|
||||||
for must_attribute in [ "owner", "product" ]:
|
|
||||||
if not must_attribute in order.db_entry:
|
|
||||||
message = "Missing {} entry in order, rejecting order".format(must_attribute)
|
|
||||||
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
|
|
||||||
|
|
||||||
order.db_entry['log'].append(db_logentry(message))
|
|
||||||
order.db_entry['status'] = "REJECTED"
|
|
||||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
|
||||||
|
|
||||||
is_valid = False
|
|
||||||
|
|
||||||
# Rejected the order
|
|
||||||
if not is_valid:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Verify the product entry
|
|
||||||
for must_attribute in [ "python_product_class", "python_product_module" ]:
|
|
||||||
if not must_attribute in order.db_entry['product']:
|
|
||||||
message = "Missing {} entry in product of order, rejecting order".format(must_attribute)
|
|
||||||
log.info("Rejecting order {}: {}".format(order.db_entry["uuid"], message))
|
|
||||||
|
|
||||||
order.db_entry['log'].append(db_logentry(message))
|
|
||||||
order.db_entry['status'] = "REJECTED"
|
|
||||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
|
||||||
|
|
||||||
is_valid = False
|
|
||||||
|
|
||||||
# Rejected the order
|
|
||||||
if not is_valid:
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(order.db_entry["product"]["python_product_class"])
|
|
||||||
|
|
||||||
# Create the product
|
|
||||||
m = importlib.import_module(order.db_entry["product"]["python_product_module"])
|
|
||||||
c = getattr(m, order.db_entry["product"]["python_product_class"])
|
|
||||||
|
|
||||||
product = c(config, db_entry=order.db_entry["product"])
|
|
||||||
|
|
||||||
# STOPPED
|
|
||||||
product.create_product()
|
|
||||||
|
|
||||||
order.db_entry['status'] = "SCHEDULED"
|
|
||||||
self.db.set(order.db_entry['uuid'], order.db_entry, as_json=True)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return str(self.db_entry)
|
|
||||||
|
|
||||||
class Product(object):
|
|
||||||
def __init__(self,
|
|
||||||
config,
|
|
||||||
product_name,
|
|
||||||
product_class,
|
|
||||||
db_entry=None):
|
|
||||||
self.config = config
|
|
||||||
self.db = DB(self.config, prefix="/orders")
|
|
||||||
|
|
||||||
self.db_entry = {}
|
|
||||||
self.db_entry["product_name"] = product_name
|
|
||||||
self.db_entry["python_product_class"] = product_class.__qualname__
|
|
||||||
self.db_entry["python_product_module"] = product_class.__module__
|
|
||||||
self.db_entry["db_version"] = 1
|
|
||||||
self.db_entry["log"] = []
|
|
||||||
self.db_entry["features"] = {}
|
|
||||||
|
|
||||||
# Existing product? Read in db_entry
|
|
||||||
if db_entry:
|
|
||||||
self.db_entry = db_entry
|
|
||||||
|
|
||||||
self.valid_periods = [ "per_year", "per_month", "per_week",
|
|
||||||
"per_day", "per_hour",
|
|
||||||
"per_minute", "per_second" ]
|
|
||||||
|
|
||||||
def define_feature(self,
|
|
||||||
name,
|
|
||||||
one_time_price,
|
|
||||||
recurring_price,
|
|
||||||
recurring_period,
|
|
||||||
minimum_period):
|
|
||||||
|
|
||||||
self.db_entry['features'][name] = {}
|
|
||||||
self.db_entry['features'][name]['one_time_price'] = one_time_price
|
|
||||||
self.db_entry['features'][name]['recurring_price'] = recurring_price
|
|
||||||
|
|
||||||
if not recurring_period in self.valid_periods:
|
|
||||||
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
|
|
||||||
|
|
||||||
self.db_entry['features'][name]['recurring_period'] = recurring_period
|
|
||||||
|
|
||||||
if not minimum_period in self.valid_periods:
|
|
||||||
raise UncloudException("Invalid recurring period: {}".format(recurring_period))
|
|
||||||
|
|
||||||
recurring_index = self.valid_periods.index(recurring_period)
|
|
||||||
minimum_index = self.valid_periods.index(minimum_period)
|
|
||||||
|
|
||||||
if minimum_index < recurring_index:
|
|
||||||
raise UncloudException("Minimum period for product '{}' feature '{}' must be shorter or equal than/as recurring period: {} > {}".format(self.db_entry['product_name'], name, minimum_period, recurring_period))
|
|
||||||
|
|
||||||
self.db_entry['features'][name]['minimum_period'] = minimum_period
|
|
||||||
|
|
||||||
|
|
||||||
def validate_product(self):
|
|
||||||
for feature in self.db_entry['features']:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def place_order(self, owner):
|
|
||||||
""" Schedule creating the product in etcd """
|
|
||||||
order = ProductOrder(self.config, product_entry=self.db_entry)
|
|
||||||
order.db_entry["owner"] = owner
|
|
||||||
return order.order()
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return json.dumps(self.db_entry)
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
#!/sbin/openrc-run
|
|
||||||
|
|
||||||
name="$RC_SVCNAME"
|
|
||||||
pidfile="/var/run/${name}.pid"
|
|
||||||
command="$(which pipenv)"
|
|
||||||
command_args="run python ucloud.py api"
|
|
||||||
command_background="true"
|
|
||||||
directory="/root/ucloud"
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
#!/sbin/openrc-run
|
|
||||||
|
|
||||||
name="$RC_SVCNAME"
|
|
||||||
pidfile="/var/run/${name}.pid"
|
|
||||||
command="$(which pipenv)"
|
|
||||||
command_args="run python ucloud.py host ${HOSTNAME}"
|
|
||||||
command_background="true"
|
|
||||||
directory="/root/ucloud"
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
#!/sbin/openrc-run
|
|
||||||
|
|
||||||
name="$RC_SVCNAME"
|
|
||||||
pidfile="/var/run/${name}.pid"
|
|
||||||
command="$(which pipenv)"
|
|
||||||
command_args="run python ucloud.py metadata"
|
|
||||||
command_background="true"
|
|
||||||
directory="/root/ucloud"
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
#!/sbin/openrc-run
|
|
||||||
|
|
||||||
name="$RC_SVCNAME"
|
|
||||||
pidfile="/var/run/${name}.pid"
|
|
||||||
command="$(which pipenv)"
|
|
||||||
command_args="run python ucloud.py scheduler"
|
|
||||||
command_background="true"
|
|
||||||
directory="/root/ucloud"
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
id=100
|
|
||||||
rawdev=eth0
|
|
||||||
|
|
||||||
# create vxlan
|
|
||||||
ip -6 link add vxlan${id} type vxlan \
|
|
||||||
id ${id} \
|
|
||||||
dstport 4789 \
|
|
||||||
group ff05::${id} \
|
|
||||||
dev ${rawdev} \
|
|
||||||
ttl 5
|
|
||||||
|
|
||||||
ip link set vxlan${id} up
|
|
||||||
|
|
||||||
# create bridge
|
|
||||||
ip link set vxlan${id} up
|
|
||||||
ip link set br${id} up
|
|
||||||
|
|
||||||
# Add vxlan into bridge
|
|
||||||
ip link set vxlan${id} master br${id}
|
|
||||||
|
|
||||||
|
|
||||||
# useradd -m uncloud
|
|
||||||
# [18:05] tablett.place10:~# id uncloud
|
|
||||||
# uid=1000(uncloud) gid=1000(uncloud) groups=1000(uncloud),34(kvm),36(qemu)
|
|
||||||
# apk add qemu-system-x86_64
|
|
||||||
# also needs group netdev
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if [ $# -ne 1 ]; then
|
|
||||||
echo $0 vmid
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
id=$1; shift
|
|
||||||
|
|
||||||
memory=512
|
|
||||||
macaddress=02:00:b9:cb:70:${id}
|
|
||||||
netname=net${id}-1
|
|
||||||
|
|
||||||
qemu-system-x86_64 \
|
|
||||||
-name uncloud-${id} \
|
|
||||||
-accel kvm \
|
|
||||||
-m ${memory} \
|
|
||||||
-smp 2,sockets=2,cores=1,threads=1 \
|
|
||||||
-device virtio-net-pci,netdev=net0,mac=$macaddress \
|
|
||||||
-netdev tap,id=net0,ifname=${netname},script=no,downscript=no \
|
|
||||||
-vnc [::]:0
|
|
||||||
|
|
||||||
# To be changed:
|
|
||||||
# -vnc to unix path
|
|
||||||
# or -spice
|
|
||||||
|
|
@ -1,193 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
|
||||||
#
|
|
||||||
# This file is part of uncloud.
|
|
||||||
#
|
|
||||||
# uncloud is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# uncloud is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# This module is directly called from the hack module, and can be used as follow:
|
|
||||||
#
|
|
||||||
# Create a new VM with default CPU/Memory. The path of the image file is relative to $hackprefix.
|
|
||||||
# `uncloud hack --hackprefix /tmp/hackcloud --create-vm --image mysuperimage.qcow2`
|
|
||||||
#
|
|
||||||
# List running VMs (returns a list of UUIDs).
|
|
||||||
# `uncloud hack --hackprefix /tmp/hackcloud --list-vms
|
|
||||||
#
|
|
||||||
# Get VM status:
|
|
||||||
# `uncloud hack --hackprefix /tmp/hackcloud --get-vm-status --uuid my-vm-uuid`
|
|
||||||
#
|
|
||||||
# Stop a VM:
|
|
||||||
# `uncloud hack --hackprefix /tmp/hackcloud --destroy-vm --uuid my-vm-uuid`
|
|
||||||
# ``
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import uuid
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from uncloud.hack.db import DB
|
|
||||||
from uncloud.hack.mac import MAC
|
|
||||||
from uncloud.vmm import VMM
|
|
||||||
from uncloud.hack.product import Product
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
log.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
class VM(object):
|
|
||||||
def __init__(self, config, db_entry=None):
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
#TODO: Enable etcd lookup
|
|
||||||
self.no_db = self.config.arguments['no_db']
|
|
||||||
if not self.no_db:
|
|
||||||
self.db = DB(self.config, prefix="/vm")
|
|
||||||
|
|
||||||
if db_entry:
|
|
||||||
self.db_entry = db_entry
|
|
||||||
|
|
||||||
# General CLI arguments.
|
|
||||||
self.hackprefix = self.config.arguments['hackprefix']
|
|
||||||
self.uuid = self.config.arguments['uuid']
|
|
||||||
self.memory = self.config.arguments['memory'] or '1024M'
|
|
||||||
self.cores = self.config.arguments['cores'] or 1
|
|
||||||
|
|
||||||
if self.config.arguments['image']:
|
|
||||||
self.image = os.path.join(self.hackprefix, self.config.arguments['image'])
|
|
||||||
else:
|
|
||||||
self.image = None
|
|
||||||
|
|
||||||
if self.config.arguments['image_format']:
|
|
||||||
self.image_format=self.config.arguments['image_format']
|
|
||||||
else:
|
|
||||||
self.image_format='qcow2'
|
|
||||||
|
|
||||||
# External components.
|
|
||||||
|
|
||||||
# This one is broken:
|
|
||||||
# TypeError: expected str, bytes or os.PathLike object, not NoneType
|
|
||||||
# Fix before re-enabling
|
|
||||||
# self.vmm = VMM(vmm_backend=self.hackprefix)
|
|
||||||
self.mac = MAC(self.config)
|
|
||||||
|
|
||||||
# Harcoded & generated values.
|
|
||||||
self.owner = 'uncloud'
|
|
||||||
self.accel = 'kvm'
|
|
||||||
self.threads = 1
|
|
||||||
self.ifup = os.path.join(self.hackprefix, "ifup.sh")
|
|
||||||
self.ifdown = os.path.join(self.hackprefix, "ifdown.sh")
|
|
||||||
self.ifname = "uc{}".format(self.mac.to_str_format())
|
|
||||||
|
|
||||||
self.vm = {}
|
|
||||||
|
|
||||||
self.product = Product(config, product_name="dualstack-vm",
|
|
||||||
product_class=self.__class__)
|
|
||||||
self.product.define_feature(name="base",
|
|
||||||
one_time_price=0,
|
|
||||||
recurring_price=9,
|
|
||||||
recurring_period="per_month",
|
|
||||||
minimum_period="per_hour")
|
|
||||||
|
|
||||||
|
|
||||||
self.features = []
|
|
||||||
|
|
||||||
|
|
||||||
def get_qemu_args(self):
|
|
||||||
command = (
|
|
||||||
"-name {owner}-{name}"
|
|
||||||
" -machine pc,accel={accel}"
|
|
||||||
" -drive file={image},format={image_format},if=virtio"
|
|
||||||
" -device virtio-rng-pci"
|
|
||||||
" -m {memory} -smp cores={cores},threads={threads}"
|
|
||||||
" -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname}"
|
|
||||||
" -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
|
|
||||||
).format(
|
|
||||||
owner=self.owner, name=self.uuid,
|
|
||||||
accel=self.accel,
|
|
||||||
image=self.image, image_format=self.image_format,
|
|
||||||
memory=self.memory, cores=self.cores, threads=self.threads,
|
|
||||||
ifup=self.ifup, ifdown=self.ifdown, ifname=self.ifname,
|
|
||||||
mac=self.mac
|
|
||||||
)
|
|
||||||
|
|
||||||
return command.split(" ")
|
|
||||||
|
|
||||||
def create_product(self):
|
|
||||||
"""Find a VM host and schedule on it"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def create(self):
|
|
||||||
# New VM: new UUID, new MAC.
|
|
||||||
self.uuid = str(uuid.uuid4())
|
|
||||||
self.mac=MAC(self.config)
|
|
||||||
self.mac.create()
|
|
||||||
|
|
||||||
qemu_args = self.get_qemu_args()
|
|
||||||
log.debug("QEMU args passed to VMM: {}".format(qemu_args))
|
|
||||||
self.vmm.start(
|
|
||||||
uuid=self.uuid,
|
|
||||||
migration=False,
|
|
||||||
*qemu_args
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
self.mac.create()
|
|
||||||
self.vm['mac'] = self.mac
|
|
||||||
self.vm['ifname'] = "uc{}".format(self.mac.__repr__())
|
|
||||||
|
|
||||||
# FIXME: TODO: turn this into a string and THEN
|
|
||||||
# .split() it later -- easier for using .format()
|
|
||||||
#self.vm['commandline'] = [ "{}".format(self.sudo),
|
|
||||||
self.vm['commandline'] = "{sudo}{qemu} -name uncloud-{uuid} -machine pc,accel={accel} -m {memory} -smp {cores} -uuid {uuid} -drive file={os_image},media=cdrom -netdev tap,id=netmain,script={ifup},downscript={ifdown},ifname={ifname} -device virtio-net-pci,netdev=netmain,id=net0,mac={mac}"
|
|
||||||
# self.vm['commandline'] = [ "{}".format(self.sudo),
|
|
||||||
# "{}".format(self.qemu),
|
|
||||||
# "-name", "uncloud-{}".format(self.vm['uuid']),
|
|
||||||
# "-machine", "pc,accel={}".format(self.accel),
|
|
||||||
# "-m", "{}".format(self.vm['memory']),
|
|
||||||
# "-smp", "{}".format(self.vm['cores']),
|
|
||||||
# "-uuid", "{}".format(self.vm['uuid']),
|
|
||||||
# "-drive", "file={},media=cdrom".format(self.vm['os_image']),
|
|
||||||
# "-netdev", "tap,id=netmain,script={},downscript={},ifname={}".format(self.ifup, self.ifdown, self.vm['ifname']),
|
|
||||||
# "-device", "virtio-net-pci,netdev=netmain,id=net0,mac={}".format(self.vm['mac'])
|
|
||||||
# ]
|
|
||||||
|
|
||||||
def _execute_cmd(self, cmd_string, **kwargs):
|
|
||||||
cmd = cmd_string.format(**self.vm, **kwargs)
|
|
||||||
log.info("Executing: {}".format(cmd))
|
|
||||||
subprocess.run(cmd.split())
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if not self.uuid:
|
|
||||||
print("Please specific an UUID with the --uuid flag.")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
self.vmm.stop(self.uuid)
|
|
||||||
|
|
||||||
def status(self):
|
|
||||||
if not self.uuid:
|
|
||||||
print("Please specific an UUID with the --uuid flag.")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
print(self.vmm.get_status(self.uuid))
|
|
||||||
|
|
||||||
def vnc_addr(self):
|
|
||||||
if not self.uuid:
|
|
||||||
print("Please specific an UUID with the --uuid flag.")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
print(self.vmm.get_vnc(self.uuid))
|
|
||||||
|
|
||||||
def list(self):
|
|
||||||
print(self.vmm.discover())
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
||||||
import argparse
|
|
||||||
import multiprocessing as mp
|
|
||||||
import time
|
|
||||||
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
from uncloud.common.request import RequestEntry, RequestType
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
from uncloud.common.vm import VMStatus
|
|
||||||
from uncloud.vmm import VMM
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
from . import virtualmachine, logger
|
|
||||||
|
|
||||||
arg_parser = argparse.ArgumentParser('host', add_help=False)
|
|
||||||
arg_parser.add_argument('--hostname', required=True)
|
|
||||||
|
|
||||||
|
|
||||||
def update_heartbeat(hostname):
|
|
||||||
"""Update Last HeartBeat Time for :param hostname: in etcd"""
|
|
||||||
host_pool = shared.host_pool
|
|
||||||
this_host = next(
|
|
||||||
filter(lambda h: h.hostname == hostname, host_pool.hosts), None
|
|
||||||
)
|
|
||||||
while True:
|
|
||||||
this_host.update_heartbeat()
|
|
||||||
host_pool.put(this_host)
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
|
|
||||||
def maintenance(host):
|
|
||||||
vmm = VMM()
|
|
||||||
running_vms = vmm.discover()
|
|
||||||
for vm_uuid in running_vms:
|
|
||||||
if vmm.is_running(vm_uuid) and vmm.get_status(vm_uuid) == 'running':
|
|
||||||
logger.debug('VM {} is running on {}'.format(vm_uuid, host))
|
|
||||||
vm = shared.vm_pool.get(
|
|
||||||
join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
|
|
||||||
)
|
|
||||||
vm.status = VMStatus.running
|
|
||||||
vm.vnc_socket = vmm.get_vnc(vm_uuid)
|
|
||||||
vm.hostname = host
|
|
||||||
shared.vm_pool.put(vm)
|
|
||||||
|
|
||||||
|
|
||||||
def main(arguments):
|
|
||||||
hostname = arguments['hostname']
|
|
||||||
host_pool = shared.host_pool
|
|
||||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
|
||||||
|
|
||||||
# Does not yet exist, create it
|
|
||||||
if not host:
|
|
||||||
host_key = join_path(
|
|
||||||
shared.settings['etcd']['host_prefix'], uuid4().hex
|
|
||||||
)
|
|
||||||
host_entry = {
|
|
||||||
'specs': '',
|
|
||||||
'hostname': hostname,
|
|
||||||
'status': 'DEAD',
|
|
||||||
'last_heartbeat': '',
|
|
||||||
}
|
|
||||||
shared.etcd_client.put(
|
|
||||||
host_key, host_entry, value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# update, get ourselves now for sure
|
|
||||||
host = next(filter(lambda h: h.hostname == hostname, host_pool.hosts), None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
heartbeat_updating_process = mp.Process(target=update_heartbeat, args=(hostname,))
|
|
||||||
heartbeat_updating_process.start()
|
|
||||||
except Exception as e:
|
|
||||||
raise Exception('uncloud-host heartbeat updating mechanism is not working') from e
|
|
||||||
|
|
||||||
# The below while True is neccessary for gracefully handling leadership transfer and temporary
|
|
||||||
# unavailability in etcd. Why does it work? It works because the get_prefix,watch_prefix return
|
|
||||||
# iter([]) that is iterator of empty list on exception (that occur due to above mentioned reasons)
|
|
||||||
# which ends the loop immediately. So, having it inside infinite loop we try again and again to
|
|
||||||
# get prefix until either success or deamon death comes.
|
|
||||||
while True:
|
|
||||||
for events_iterator in [
|
|
||||||
shared.etcd_client.get_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
|
|
||||||
raise_exception=False),
|
|
||||||
shared.etcd_client.watch_prefix(shared.settings['etcd']['request_prefix'], value_in_json=True,
|
|
||||||
raise_exception=False)
|
|
||||||
]:
|
|
||||||
for request_event in events_iterator:
|
|
||||||
request_event = RequestEntry(request_event)
|
|
||||||
|
|
||||||
maintenance(host.key)
|
|
||||||
|
|
||||||
if request_event.hostname == host.key:
|
|
||||||
logger.debug('VM Request: %s on Host %s', request_event, host.hostname)
|
|
||||||
|
|
||||||
shared.request_pool.client.client.delete(request_event.key)
|
|
||||||
vm_entry = shared.etcd_client.get(
|
|
||||||
join_path(shared.settings['etcd']['vm_prefix'], request_event.uuid)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug('VM hostname: {}'.format(vm_entry.value))
|
|
||||||
|
|
||||||
vm = virtualmachine.VM(vm_entry)
|
|
||||||
if request_event.type == RequestType.StartVM:
|
|
||||||
vm.start()
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.StopVM:
|
|
||||||
vm.stop()
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.DeleteVM:
|
|
||||||
vm.delete()
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.InitVMMigration:
|
|
||||||
vm.start(destination_host_key=host.key)
|
|
||||||
|
|
||||||
elif request_event.type == RequestType.TransferVM:
|
|
||||||
destination_host = host_pool.get(request_event.destination_host_key)
|
|
||||||
if destination_host:
|
|
||||||
vm.migrate(
|
|
||||||
destination_host=destination_host.hostname,
|
|
||||||
destination_sock_path=request_event.destination_sock_path,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error('Host %s not found!', request_event.destination_host_key)
|
|
||||||
|
|
@ -1,303 +0,0 @@
|
||||||
# QEMU Manual
|
|
||||||
# https://qemu.weilnetz.de/doc/qemu-doc.html
|
|
||||||
|
|
||||||
# For QEMU Monitor Protocol Commands Information, See
|
|
||||||
# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
|
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess as sp
|
|
||||||
import ipaddress
|
|
||||||
|
|
||||||
from string import Template
|
|
||||||
from os.path import join as join_path
|
|
||||||
|
|
||||||
from uncloud.common.request import RequestEntry, RequestType
|
|
||||||
from uncloud.common.vm import VMStatus, declare_stopped
|
|
||||||
from uncloud.common.network import create_dev, delete_network_interface
|
|
||||||
from uncloud.common.schemas import VMSchema, NetworkSchema
|
|
||||||
from uncloud.host import logger
|
|
||||||
from uncloud.common.shared import shared
|
|
||||||
from uncloud.vmm import VMM
|
|
||||||
|
|
||||||
from marshmallow import ValidationError
|
|
||||||
|
|
||||||
|
|
||||||
class VM:
|
|
||||||
def __init__(self, vm_entry):
|
|
||||||
self.schema = VMSchema()
|
|
||||||
self.vmm = VMM()
|
|
||||||
self.key = vm_entry.key
|
|
||||||
try:
|
|
||||||
self.vm = self.schema.loads(vm_entry.value)
|
|
||||||
except ValidationError:
|
|
||||||
logger.exception(
|
|
||||||
"Couldn't validate VM Entry", vm_entry.value
|
|
||||||
)
|
|
||||||
self.vm = None
|
|
||||||
else:
|
|
||||||
self.uuid = vm_entry.key.split("/")[-1]
|
|
||||||
self.host_key = self.vm["hostname"]
|
|
||||||
logger.debug('VM Hostname {}'.format(self.host_key))
|
|
||||||
|
|
||||||
def get_qemu_args(self):
|
|
||||||
command = (
|
|
||||||
"-drive file={file},format=raw,if=virtio"
|
|
||||||
" -device virtio-rng-pci"
|
|
||||||
" -m {memory} -smp cores={cores},threads={threads}"
|
|
||||||
" -name {owner}_{name}"
|
|
||||||
).format(
|
|
||||||
owner=self.vm["owner"],
|
|
||||||
name=self.vm["name"],
|
|
||||||
memory=int(self.vm["specs"]["ram"].to_MB()),
|
|
||||||
cores=self.vm["specs"]["cpu"],
|
|
||||||
threads=1,
|
|
||||||
file=shared.storage_handler.qemu_path_string(self.uuid),
|
|
||||||
)
|
|
||||||
|
|
||||||
return command.split(" ")
|
|
||||||
|
|
||||||
def start(self, destination_host_key=None):
|
|
||||||
migration = False
|
|
||||||
if destination_host_key:
|
|
||||||
migration = True
|
|
||||||
|
|
||||||
self.create()
|
|
||||||
try:
|
|
||||||
network_args = self.create_network_dev()
|
|
||||||
except Exception as err:
|
|
||||||
declare_stopped(self.vm)
|
|
||||||
self.vm["log"].append("Cannot Setup Network Properly")
|
|
||||||
logger.error("Cannot Setup Network Properly for vm %s", self.uuid, exc_info=err)
|
|
||||||
else:
|
|
||||||
self.vmm.start(
|
|
||||||
uuid=self.uuid,
|
|
||||||
migration=migration,
|
|
||||||
*self.get_qemu_args(),
|
|
||||||
*network_args
|
|
||||||
)
|
|
||||||
|
|
||||||
status = self.vmm.get_status(self.uuid)
|
|
||||||
logger.debug('VM {} status is {}'.format(self.uuid, status))
|
|
||||||
if status == "running":
|
|
||||||
self.vm["status"] = VMStatus.running
|
|
||||||
self.vm["vnc_socket"] = self.vmm.get_vnc(self.uuid)
|
|
||||||
elif status == "inmigrate":
|
|
||||||
r = RequestEntry.from_scratch(
|
|
||||||
type=RequestType.TransferVM, # Transfer VM
|
|
||||||
hostname=self.host_key, # Which VM should get this request. It is source host
|
|
||||||
uuid=self.uuid, # uuid of VM
|
|
||||||
destination_sock_path=join_path(
|
|
||||||
self.vmm.socket_dir, self.uuid
|
|
||||||
),
|
|
||||||
destination_host_key=destination_host_key, # Where source host transfer VM
|
|
||||||
request_prefix=shared.settings["etcd"]["request_prefix"],
|
|
||||||
)
|
|
||||||
shared.request_pool.put(r)
|
|
||||||
else:
|
|
||||||
self.stop()
|
|
||||||
declare_stopped(self.vm)
|
|
||||||
logger.debug('VM {} has hostname {}'.format(self.uuid, self.vm['hostname']))
|
|
||||||
self.sync()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
self.vmm.stop(self.uuid)
|
|
||||||
self.delete_network_dev()
|
|
||||||
declare_stopped(self.vm)
|
|
||||||
self.sync()
|
|
||||||
|
|
||||||
def migrate(self, destination_host, destination_sock_path):
|
|
||||||
self.vmm.transfer(
|
|
||||||
src_uuid=self.uuid,
|
|
||||||
destination_sock_path=destination_sock_path,
|
|
||||||
host=destination_host,
|
|
||||||
)
|
|
||||||
|
|
||||||
def create_network_dev(self):
|
|
||||||
command = ""
|
|
||||||
for network_mac_and_tap in self.vm["network"]:
|
|
||||||
network_name, mac, tap = network_mac_and_tap
|
|
||||||
|
|
||||||
_key = os.path.join(
|
|
||||||
shared.settings["etcd"]["network_prefix"],
|
|
||||||
self.vm["owner"],
|
|
||||||
network_name,
|
|
||||||
)
|
|
||||||
network = shared.etcd_client.get(_key, value_in_json=True)
|
|
||||||
network_schema = NetworkSchema()
|
|
||||||
try:
|
|
||||||
network = network_schema.load(network.value)
|
|
||||||
except ValidationError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if network["type"] == "vxlan":
|
|
||||||
tap = create_vxlan_br_tap(
|
|
||||||
_id=network["id"],
|
|
||||||
_dev=shared.settings["network"]["vxlan_phy_dev"],
|
|
||||||
tap_id=tap,
|
|
||||||
ip=network["ipv6"],
|
|
||||||
)
|
|
||||||
|
|
||||||
all_networks = shared.etcd_client.get_prefix(
|
|
||||||
shared.settings["etcd"]["network_prefix"],
|
|
||||||
value_in_json=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if ipaddress.ip_network(network["ipv6"]).is_global:
|
|
||||||
update_radvd_conf(all_networks)
|
|
||||||
|
|
||||||
command += (
|
|
||||||
"-netdev tap,id=vmnet{net_id},ifname={tap},script=no,downscript=no"
|
|
||||||
" -device virtio-net-pci,netdev=vmnet{net_id},mac={mac}".format(
|
|
||||||
tap=tap, net_id=network["id"], mac=mac
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if command:
|
|
||||||
command = command.split(' ')
|
|
||||||
|
|
||||||
return command
|
|
||||||
|
|
||||||
def delete_network_dev(self):
|
|
||||||
try:
|
|
||||||
for network in self.vm["network"]:
|
|
||||||
network_name = network[0]
|
|
||||||
_ = network[1] # tap_mac
|
|
||||||
tap_id = network[2]
|
|
||||||
|
|
||||||
delete_network_interface("tap{}".format(tap_id))
|
|
||||||
|
|
||||||
owners_vms = shared.vm_pool.by_owner(self.vm["owner"])
|
|
||||||
owners_running_vms = shared.vm_pool.by_status(
|
|
||||||
VMStatus.running, _vms=owners_vms
|
|
||||||
)
|
|
||||||
|
|
||||||
networks = map(
|
|
||||||
lambda n: n[0],
|
|
||||||
map(lambda vm: vm.network, owners_running_vms),
|
|
||||||
)
|
|
||||||
networks_in_use_by_user_vms = [vm[0] for vm in networks]
|
|
||||||
if network_name not in networks_in_use_by_user_vms:
|
|
||||||
network_entry = resolve_network(
|
|
||||||
network[0], self.vm["owner"]
|
|
||||||
)
|
|
||||||
if network_entry:
|
|
||||||
network_type = network_entry.value["type"]
|
|
||||||
network_id = network_entry.value["id"]
|
|
||||||
if network_type == "vxlan":
|
|
||||||
delete_network_interface(
|
|
||||||
"br{}".format(network_id)
|
|
||||||
)
|
|
||||||
delete_network_interface(
|
|
||||||
"vxlan{}".format(network_id)
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
logger.exception("Exception in network interface deletion")
|
|
||||||
|
|
||||||
def create(self):
|
|
||||||
if shared.storage_handler.is_vm_image_exists(self.uuid):
|
|
||||||
# File Already exists. No Problem Continue
|
|
||||||
logger.debug("Image for vm %s exists", self.uuid)
|
|
||||||
else:
|
|
||||||
if shared.storage_handler.make_vm_image(
|
|
||||||
src=self.vm["image_uuid"], dest=self.uuid
|
|
||||||
):
|
|
||||||
if not shared.storage_handler.resize_vm_image(
|
|
||||||
path=self.uuid,
|
|
||||||
size=int(self.vm["specs"]["os-ssd"].to_MB()),
|
|
||||||
):
|
|
||||||
self.vm["status"] = VMStatus.error
|
|
||||||
else:
|
|
||||||
logger.info("New VM Created")
|
|
||||||
|
|
||||||
def sync(self):
|
|
||||||
shared.etcd_client.put(
|
|
||||||
self.key, self.schema.dump(self.vm), value_in_json=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def delete(self):
|
|
||||||
self.stop()
|
|
||||||
|
|
||||||
if shared.storage_handler.is_vm_image_exists(self.uuid):
|
|
||||||
r_status = shared.storage_handler.delete_vm_image(self.uuid)
|
|
||||||
if r_status:
|
|
||||||
shared.etcd_client.client.delete(self.key)
|
|
||||||
else:
|
|
||||||
shared.etcd_client.client.delete(self.key)
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_network(network_name, network_owner):
|
|
||||||
network = shared.etcd_client.get(
|
|
||||||
join_path(
|
|
||||||
shared.settings["etcd"]["network_prefix"],
|
|
||||||
network_owner,
|
|
||||||
network_name,
|
|
||||||
),
|
|
||||||
value_in_json=True,
|
|
||||||
)
|
|
||||||
return network
|
|
||||||
|
|
||||||
|
|
||||||
def create_vxlan_br_tap(_id, _dev, tap_id, ip=None):
|
|
||||||
network_script_base = os.path.join(
|
|
||||||
os.path.dirname(os.path.dirname(__file__)), "network"
|
|
||||||
)
|
|
||||||
vxlan = create_dev(
|
|
||||||
script=os.path.join(network_script_base, "create-vxlan.sh"),
|
|
||||||
_id=_id,
|
|
||||||
dev=_dev,
|
|
||||||
)
|
|
||||||
if vxlan:
|
|
||||||
bridge = create_dev(
|
|
||||||
script=os.path.join(
|
|
||||||
network_script_base, "create-bridge.sh"
|
|
||||||
),
|
|
||||||
_id=_id,
|
|
||||||
dev=vxlan,
|
|
||||||
ip=ip,
|
|
||||||
)
|
|
||||||
if bridge:
|
|
||||||
tap = create_dev(
|
|
||||||
script=os.path.join(
|
|
||||||
network_script_base, "create-tap.sh"
|
|
||||||
),
|
|
||||||
_id=str(tap_id),
|
|
||||||
dev=bridge,
|
|
||||||
)
|
|
||||||
if tap:
|
|
||||||
return tap
|
|
||||||
|
|
||||||
|
|
||||||
def update_radvd_conf(all_networks):
|
|
||||||
network_script_base = os.path.join(
|
|
||||||
os.path.dirname(os.path.dirname(__file__)), "network"
|
|
||||||
)
|
|
||||||
|
|
||||||
networks = {
|
|
||||||
net.value["ipv6"]: net.value["id"]
|
|
||||||
for net in all_networks
|
|
||||||
if net.value.get("ipv6")
|
|
||||||
and ipaddress.ip_network(net.value.get("ipv6")).is_global
|
|
||||||
}
|
|
||||||
radvd_template = open(
|
|
||||||
os.path.join(network_script_base, "radvd-template.conf"), "r"
|
|
||||||
).read()
|
|
||||||
radvd_template = Template(radvd_template)
|
|
||||||
|
|
||||||
content = [
|
|
||||||
radvd_template.safe_substitute(
|
|
||||||
bridge="br{}".format(networks[net]), prefix=net
|
|
||||||
)
|
|
||||||
for net in networks
|
|
||||||
if networks.get(net)
|
|
||||||
]
|
|
||||||
with open("/etc/radvd.conf", "w") as radvd_conf:
|
|
||||||
radvd_conf.writelines(content)
|
|
||||||
try:
|
|
||||||
sp.check_output(["systemctl", "restart", "radvd"])
|
|
||||||
except sp.CalledProcessError:
|
|
||||||
try:
|
|
||||||
sp.check_output(["service", "radvd", "restart"])
|
|
||||||
except sp.CalledProcessError as err:
|
|
||||||
raise err.__class__(
|
|
||||||
"Cannot start/restart radvd service", err.cmd
|
|
||||||
) from err
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue