Compare commits
583 Commits
Author | SHA1 | Date |
---|---|---|
amalelshihaby | 5f4b10cb82 | |
amalelshihaby | 02ad7a9441 | |
amalelshihaby | 8ca5b2d104 | |
sangheekim | eb86c7e189 | |
amalelshihaby | 2e05ebbf67 | |
amalelshihaby | 1aeb757e22 | |
amalelshihaby | 1c3d3efb3a | |
amalelshihaby | 7986b825a7 | |
amalelshihaby | 5bb0c4cdda | |
amalelshihaby | 030a0cd501 | |
amalelshihaby | 5564400ef8 | |
amalelshihaby | 94dac8110e | |
amalelshihaby | 4a0987766d | |
amalelshihaby | 20ce755303 | |
amalelshihaby | c4f7913cb1 | |
nico14571 | 8287e73f6b | |
amalelshihaby | b7aa1c6971 | |
Nico Schottelius | e205d8d07c | |
Nico Schottelius | a463bcf7bd | |
Nico Schottelius | d872357dd1 | |
Nico Schottelius | 485f08e25c | |
Nico Schottelius | 745abc48ef | |
Nico Schottelius | 49f52fd41d | |
Nico Schottelius | c8ce7dbb40 | |
Nico Schottelius | a920887100 | |
Nico Schottelius | 6b9b15e663 | |
Nico Schottelius | 48ce21f833 | |
Nico Schottelius | 6c15d2086e | |
Nico Schottelius | 1b06d8ee03 | |
Nico Schottelius | e225bf1cc0 | |
Nico Schottelius | e2c4a19049 | |
Nico Schottelius | 74749bf07c | |
Nico Schottelius | 93e5d39c7b | |
Nico Schottelius | 18d4c99571 | |
Nico Schottelius | e51edab2f5 | |
Nico Schottelius | f7c68b5ca5 | |
Nico Schottelius | 6efedcb381 | |
Nico Schottelius | df4c0c3060 | |
Nico Schottelius | 8dd4b712fb | |
Nico Schottelius | 50a395c8ec | |
Nico Schottelius | 663d72269a | |
Nico Schottelius | a0fbe2d6ed | |
Nico Schottelius | 858aabb5ba | |
Nico Schottelius | ece2bca831 | |
Nico Schottelius | cdab685269 | |
Nico Schottelius | 689375a2fe | |
Nico Schottelius | 8f83679c48 | |
Nico Schottelius | 5e870f04b1 | |
Nico Schottelius | 63191c0a88 | |
Nico Schottelius | 03c0b34446 | |
Nico Schottelius | 1922a0d92d | |
Nico Schottelius | 2e6c72c093 | |
Nico Schottelius | b3626369a2 | |
Nico Schottelius | 179baee96d | |
Nico Schottelius | 054886fd9c | |
Nico Schottelius | e2b36c8bca | |
Nico Schottelius | 372fe800cd | |
Nico Schottelius | 16f3adef93 | |
Nico Schottelius | 2d62388eb1 | |
Nico Schottelius | aec79cba74 | |
Nico Schottelius | cd19c47fdb | |
Nico Schottelius | cf948b03a8 | |
Nico Schottelius | 5716cae900 | |
Nico Schottelius | 10d5a72c5a | |
Nico Schottelius | 074cffcbd7 | |
Nico Schottelius | 7f32d05cd4 | |
Nico Schottelius | 0fd5ac18cd | |
Nico Schottelius | ad0c2f1e9d | |
Nico Schottelius | 0b1c2cc168 | |
ahmadbilalkhalid | 4845ab1e39 | |
Nico Schottelius | ecc9e6f734 | |
Nico Schottelius | 20c7c86703 | |
Nico Schottelius | 8959bc6ad5 | |
Nico Schottelius | 0cd8a3a787 | |
Nico Schottelius | bbc7625550 | |
Nico Schottelius | fe4e200dc0 | |
Nico Schottelius | e03cdf214a | |
Nico Schottelius | 50fd9e1f37 | |
Nico Schottelius | 2e74661702 | |
Nico Schottelius | c26ff253de | |
Nico Schottelius | 9623a77907 | |
Nico Schottelius | c435639241 | |
Nico Schottelius | 992c7c551e | |
Nico Schottelius | 58883765d7 | |
Nico Schottelius | 8d8c4d660c | |
Nico Schottelius | c32499199a | |
Nico Schottelius | c6bacab35a | |
Nico Schottelius | 1aead50170 | |
Nico Schottelius | d8a7964fed | |
Nico Schottelius | 077c665c53 | |
Nico Schottelius | f7274fe967 | |
Nico Schottelius | 1c7d81762d | |
Nico Schottelius | 18f9a3848a | |
Nico Schottelius | 9211894b23 | |
Nico Schottelius | b8b15704a3 | |
Nico Schottelius | ab412cb877 | |
Nico Schottelius | 7b83efe995 | |
Nico Schottelius | 4d5ca58b2a | |
Nico Schottelius | f693dd3d18 | |
Nico Schottelius | 5ceaaf7c90 | |
Nico Schottelius | 2b29e300dd | |
Nico Schottelius | 8df1d8dc7c | |
Nico Schottelius | ef02cb61fd | |
Nico Schottelius | 70c450afc8 | |
Nico Schottelius | 0dd1093812 | |
Nico Schottelius | 6a928a2b2a | |
Nico Schottelius | 89519e48a9 | |
Nico Schottelius | e169b8c1d1 | |
Nico Schottelius | d7c0c40926 | |
Nico Schottelius | fd39526350 | |
Nico Schottelius | 78d1de9031 | |
Nico Schottelius | db1a69561b | |
Nico Schottelius | 9bf0a99f6a | |
Nico Schottelius | c9be8cc50b | |
Nico Schottelius | 8da6a1e19c | |
Nico Schottelius | ff8fdb76b1 | |
Nico Schottelius | 9b00ef11fb | |
Nico Schottelius | d2bd6ba200 | |
Nico Schottelius | 165dacb7bf | |
Nico Schottelius | ee79877a27 | |
Nico Schottelius | 2ce667e8c7 | |
Nico Schottelius | e563780142 | |
Nico Schottelius | 66233a1ce5 | |
Nico Schottelius | 9d5d8657cb | |
Nico Schottelius | a091079677 | |
Nico Schottelius | c9a941e290 | |
Nico Schottelius | f7b14bf507 | |
Nico Schottelius | ed40b21d16 | |
Nico Schottelius | 880e4d046b | |
Nico Schottelius | cbd5a08ae7 | |
Nico Schottelius | 64780bfc6c | |
Nico Schottelius | 9c1b4ab275 | |
Nico Schottelius | 2771a7518a | |
Nico Schottelius | 9b3493a661 | |
Nico Schottelius | 932ac06cea | |
Nico Schottelius | 05a897db70 | |
Nico Schottelius | 011096f152 | |
Nico Schottelius | 55ba61e36b | |
Nico Schottelius | 1265e23750 | |
Nico Schottelius | bc0c77a393 | |
Nico Schottelius | bdba3bffe2 | |
Nico Schottelius | 3745a0e2b1 | |
Nico Schottelius | 7ce28b0b60 | |
Nico Schottelius | 662e706eab | |
Nico Schottelius | 11de455d23 | |
Nico Schottelius | 126d9da764 | |
Nico Schottelius | 8a17ee6de5 | |
Nico Schottelius | 721472b416 | |
Nico Schottelius | 424ceafbb8 | |
Nico Schottelius | 1e68539ed8 | |
Nico Schottelius | 3ef19610f3 | |
Nico Schottelius | 8decfe1b16 | |
Nico Schottelius | 662845128f | |
Nico Schottelius | 95011c2058 | |
Nico Schottelius | a3f3ca8cf9 | |
Nico Schottelius | 5d1eaaf0af | |
Nico Schottelius | bcd141730d | |
Nico Schottelius | 15535433e8 | |
Nico Schottelius | 18b862c2e1 | |
Nico Schottelius | b8652c921e | |
Nico Schottelius | 8bbcc5df5f | |
Nico Schottelius | 0202f80a37 | |
Nico Schottelius | caedf874e4 | |
Nico Schottelius | f17f9060b0 | |
Nico Schottelius | 04fac71a85 | |
Nico Schottelius | ef76304bae | |
Nico Schottelius | ec447e0dc4 | |
Nico Schottelius | ca2065a94d | |
Nico Schottelius | dc7a465a8c | |
fnux | 65440ab2ef | |
fnux | d794b24c86 | |
fnux | df059fb00d | |
fnux | 67af7b5465 | |
fnux | beb5bd7ee4 | |
fnux | 9574d69f4c | |
fnux | 74e2168529 | |
fnux | 444d6ded28 | |
fnux | cbba1f4169 | |
fnux | d47c94ba84 | |
fnux | 89e853b490 | |
Nico Schottelius | 1b97fc8fc7 | |
Nico Schottelius | 41a2f18453 | |
fnux | a086d46136 | |
fnux | ae2bad5754 | |
fnux | 3874165189 | |
fnux | 56d98cbb55 | |
fnux | 718abab9d2 | |
fnux | 268e08c4db | |
fnux | b8ac99acb6 | |
fnux | 221d98af4b | |
fnux | ebd4e6fa1b | |
fnux | b512d42058 | |
fnux | 1245c191c0 | |
fnux | 95d43f002f | |
fnux | 0560063326 | |
fnux | db3c29d17e | |
fnux | 892b2b6f13 | |
Nico Schottelius | aa8ade4730 | |
Nico Schottelius | 594f1a9b69 | |
Nico Schottelius | e3b28354fe | |
Nico Schottelius | 99a18232aa | |
Nico Schottelius | c835c874d5 | |
Nico Schottelius | 028f1ebe6e | |
Nico Schottelius | 9ef5309b91 | |
Nico Schottelius | 4097c2ce13 | |
Nico Schottelius | 736fe27493 | |
Nico Schottelius | 7d708cfbb6 | |
Nico Schottelius | 927fb20671 | |
Nico Schottelius | eea654a9f8 | |
Nico Schottelius | 2cda6441f4 | |
Nico Schottelius | 62d9ccbbef | |
Nico Schottelius | 1cf20a2cb6 | |
fnux | 94932edebe | |
fnux | a15952862a | |
fnux | cec4263621 | |
fnux | f61b91dab2 | |
fnux | b3afad5d5d | |
fnux | 3a03717b12 | |
fnux | db9ff5d18b | |
fnux | a49fe6ff51 | |
fnux | dd0c1cba94 | |
fnux | c0e12884e1 | |
fnux | 9bbe3b3b56 | |
fnux | e6eba7542b | |
fnux | 0522927c50 | |
fnux | 3fa1d5753e | |
fnux | c6ca94800e | |
fnux | ad187c02da | |
fnux | 7afb3f8793 | |
fnux | 86775af4c8 | |
fnux | 83d2cd465d | |
fnux | b6c976b722 | |
fnux | d1e993140c | |
fnux | c57780fb4d | |
fnux | 83a0ca0e4e | |
fnux | 5d5bf486b5 | |
fnux | 839bd5f8a9 | |
fnux | aa0702faba | |
fnux | 9a57153c4e | |
fnux | f5897ed4b1 | |
fnux | 14f59430bb | |
fnux | 05f8f1f6c0 | |
fnux | 1a58508f8b | |
fnux | e67bd03997 | |
fnux | a4cc4304f9 | |
fnux | d3b7470294 | |
fnux | a7e9f3c09d | |
Nico Schottelius | ff133e81b7 | |
Nico Schottelius | 85b4d70592 | |
Nico Schottelius | b55254b9b1 | |
Nico Schottelius | bc033a9087 | |
Nico Schottelius | 05f38d157e | |
Nico Schottelius | bab59b1879 | |
Nico Schottelius | 50b8b7a5f6 | |
nico14571 | f1bba63f6f | |
fnux | 276c7e9901 | |
fnux | 3a37343a73 | |
fnux | 3588ae88f9 | |
Nico Schottelius | 9431f11284 | |
Nico Schottelius | e64c2b8ddb | |
Nico Schottelius | d9473e8f33 | |
fnux | cb3346303b | |
Nico Schottelius | 7d892daff9 | |
Nico Schottelius | 08b9886ce3 | |
nico14571 | d6bdf5c991 | |
fnux | cc7056c87c | |
fnux | a8b81b074b | |
fnux | 89c705f7d2 | |
fnux | f2a797874a | |
Nico Schottelius | d3f2a3e071 | |
Nico Schottelius | 3d2f8574d3 | |
Nico Schottelius | 1838eaf7dd | |
Nico Schottelius | 8986835c7e | |
Nico Schottelius | 938f0a3390 | |
Nico Schottelius | 913e992a48 | |
Nico Schottelius | 096f7e05c0 | |
Nico Schottelius | 06c4a5643c | |
Nico Schottelius | 198aaea48a | |
Nico Schottelius | d537e9e2f0 | |
Nico Schottelius | 5d084a5716 | |
Nico Schottelius | 8fb3ad7fe8 | |
Nico Schottelius | c44faa7a73 | |
Nico Schottelius | fa0ca2d9c1 | |
Nico Schottelius | 7a6c8739f6 | |
Nico Schottelius | 833d570472 | |
Nico Schottelius | 3cf3439f1c | |
Nico Schottelius | 23203ff418 | |
Nico Schottelius | 9961ca0446 | |
Nico Schottelius | 105142f76a | |
Nico Schottelius | 08fe3e689e | |
Nico Schottelius | 10c5257f90 | |
Nico Schottelius | a32f7522b5 | |
fnux | 923102af24 | |
fnux | f4ebbb79ce | |
fnux | 122bc586b4 | |
fnux | ae6548e168 | |
fnux | 0e4068cea8 | |
fnux | 623d3ae5c4 | |
fnux | fe0e6d98bf | |
fnux | 41e35c1af0 | |
fnux | 948391ab2e | |
fnux | d089d06264 | |
fnux | c086dbd357 | |
fnux | 545727afe7 | |
fnux | 7bbc729b87 | |
fnux | b10cae472e | |
fnux | 952cf8fd13 | |
fnux | 7e278228bd | |
fnux | 31507c0f1a | |
fnux | 7e58a8ace2 | |
fnux | a4fa0def4b | |
fnux | 80fe28588e | |
fnux | 4e658d2d77 | |
fnux | 5161a74354 | |
fnux | 0e62ccff3b | |
fnux | bf83b750de | |
fnux | 6c7f0e98b3 | |
Nico Schottelius | 10f09c7115 | |
fnux | b07df26eb2 | |
fnux | b958cc77ea | |
fnux | e9b6a6f277 | |
fnux | 546667d117 | |
fnux | b88dfa4bfe | |
fnux | d6ee806467 | |
fnux | a41184d83d | |
fnux | 2f70418f4d | |
fnux | 21e1a3d220 | |
fnux | 4cc19e1e6e | |
fnux | 08bf7cd320 | |
fnux | 7e9f2ea561 | |
fnux | 929211162d | |
Nico Schottelius | b38c9b6060 | |
Nico Schottelius | a80a279ba5 | |
Nico Schottelius | 5ef009cc9b | |
Nico Schottelius | 5da6dbb32e | |
Nico Schottelius | 3b508fc87d | |
Nico Schottelius | 55a2de72c8 | |
Nico Schottelius | f99d0a0b64 | |
Nico Schottelius | d9a756b50e | |
Nico Schottelius | 592b745cea | |
Nico Schottelius | aaf0114df1 | |
fnux | 0e667b5262 | |
fnux | f2337a14eb | |
fnux | 8797e93baf | |
fnux | 9e2751c41e | |
fnux | 17d0c61407 | |
Nico Schottelius | 3171ab8ccb | |
Nico Schottelius | 56565ac7f7 | |
Nico Schottelius | 1b08a49aef | |
Dominique Roux | 7e36b0c067 | |
Dominique Roux | 1ca2f8670d | |
Dominique Roux | d8a465bca4 | |
Dominique Roux | dfa4e16806 | |
Dominique Roux | bdbf26cfd4 | |
Dominique Roux | 5969d3b13d | |
fnux | 3e69fb275f | |
fnux | 618fecb73f | |
fnux | e2cd44826b | |
fnux | 1758629ca1 | |
fnux | a759b8aa39 | |
fnux | 4c6a126d8b | |
fnux | 2b71c1807d | |
fnux | cbcaf63650 | |
Dominique Roux | 5d05e91335 | |
Dominique Roux | 8cc58726d0 | |
Dominique Roux | 5711bf4770 | |
Nico Schottelius | ae3482cc71 | |
Nico Schottelius | b1319d654a | |
Nico Schottelius | 93d7a409b1 | |
Nico Schottelius | 7e91f60c0a | |
Dominique Roux | 58daf8191e | |
Nico Schottelius | b5409552d8 | |
Dominique Roux | 550937630c | |
Dominique Roux | 46a04048b5 | |
Nico Schottelius | 3ddd27a08d | |
Nico Schottelius | c881c7ce4d | |
Dominique Roux | d5a7f8ef59 | |
Dominique Roux | 0982927c1b | |
Nico Schottelius | 8e839aeb44 | |
Nico Schottelius | 8888f5d9f7 | |
Nico Schottelius | bd9dbb12b7 | |
Nico Schottelius | 30be791312 | |
Nico Schottelius | 2b8831784a | |
Nico Schottelius | b847260768 | |
Nico Schottelius | 1b5a3f6d2e | |
Nico Schottelius | 8a451ff4ff | |
Nico Schottelius | bd03f95e99 | |
Nico Schottelius | 26d5c91625 | |
Nico Schottelius | b877ab13b3 | |
Nico Schottelius | 12e8ccd01c | |
Nico Schottelius | 8078ffae5a | |
Nico Schottelius | 1b36c2f96f | |
Nico Schottelius | c0e6d6a0d8 | |
Nico Schottelius | 083ba43918 | |
Nico Schottelius | 22531a7459 | |
Nico Schottelius | b96e56b453 | |
Nico Schottelius | 9f02b31b1b | |
Nico Schottelius | 10c8dc85ba | |
Nico Schottelius | 091131d350 | |
Ahmed Bilal | ab65349047 | |
Ahmed Bilal | c3b42aabc6 | |
Nico Schottelius | e6d22a73c5 | |
Nico Schottelius | 02526baaf9 | |
Nico Schottelius | 3188787c2a | |
Nico Schottelius | 94dad7c9b6 | |
Nico Schottelius | 53c6a14d60 | |
Nico Schottelius | 64ab011299 | |
Nico Schottelius | b017df4879 | |
Nico Schottelius | aaf29adcbb | |
Nico Schottelius | 6d51e2a8c4 | |
Nico Schottelius | c6b7152464 | |
Nico Schottelius | 8544df8bad | |
Nico Schottelius | 708e3ebb97 | |
Nico Schottelius | 3b68a589d4 | |
Nico Schottelius | 029ef36d62 | |
Nico Schottelius | 3cf4807f7c | |
Nico Schottelius | c1cabb7220 | |
Nico Schottelius | 5d95f11b3d | |
Nico Schottelius | 23d805f04f | |
Nico Schottelius | 3825c7c210 | |
Nico Schottelius | 7c9e3d747a | |
Nico Schottelius | b9c9a5e0ec | |
Nico Schottelius | ebcb1680d7 | |
ahmadbilalkhalid | cf4930ee84 | |
ahmadbilalkhalid | 00d876aea1 | |
Nico Schottelius | e91fd9e24a | |
Nico Schottelius | 469d03467d | |
Nico Schottelius | ec66a756a0 | |
ahmadbilalkhalid | b4f47adb4f | |
ahmadbilalkhalid | 31ec024be6 | |
Nico Schottelius | 82a69701ce | |
Nico Schottelius | d9dd6b48dc | |
Nico Schottelius | b7596e071a | |
Nico Schottelius | 71fd0ca7d9 | |
Nico Schottelius | 92f985c857 | |
Nico Schottelius | feb334cf04 | |
ahmadbilalkhalid | 48efcdf08c | |
ahmadbilalkhalid | f8f790e7fc | |
ahmadbilalkhalid | 5a646aeac9 | |
ahmadbilalkhalid | 6046015c3d | |
ahmadbilalkhalid | b4292615de | |
ahmadbilalkhalid | 48cc37c438 | |
ahmadbilalkhalid | 6086fec633 | |
Nico Schottelius | 388127bd11 | |
ahmadbilalkhalid | ef0f13534a | |
ahmadbilalkhalid | ec40d6b1e0 | |
ahmadbilalkhalid | b7f3ba1a34 | |
ahmadbilalkhalid | 6f51ddbb36 | |
ahmadbilalkhalid | 7fff280c79 | |
ahmadbilalkhalid | 6847a0d323 | |
ahmadbilalkhalid | 180f6f4989 | |
ahmadbilalkhalid | 344a957a3f | |
ahmadbilalkhalid | 3296e524cc | |
ahmadbilalkhalid | 50fb135726 | |
ahmadbilalkhalid | cd2f0aaa0d | |
ahmadbilalkhalid | 2afb37daca | |
Nico Schottelius | b95037f624 | |
Nico Schottelius | eb19b10333 | |
Nico Schottelius | 2566e86f1e | |
Nico Schottelius | e775570884 | |
Nico Schottelius | 9662e02eb7 | |
Nico Schottelius | 71c3f9d978 | |
Nico Schottelius | 29dfacfadb | |
Nico Schottelius | bff12ed930 | |
Nico Schottelius | 1fba79ca31 | |
Nico Schottelius | 4c7678618d | |
Nico Schottelius | 6682f127f1 | |
Nico Schottelius | 433a3b9817 | |
Nico Schottelius | 7b6c02b3ab | |
Nico Schottelius | 70c8da544e | |
ahmadbilalkhalid | 6a40a7f12f | |
ahmadbilalkhalid | 27e780b359 | |
ahmadbilalkhalid | 4b7d6d5099 | |
ahmadbilalkhalid | d13a4bcc37 | |
ahmadbilalkhalid | d2d6c6bf5c | |
ahmadbilalkhalid | 9963e9c62d | |
ahmadbilalkhalid | 52867614df | |
ahmadbilalkhalid | 9bdf4d2180 | |
ahmadbilalkhalid | 29e938dc74 | |
ahmadbilalkhalid | f980cdb464 | |
ahmadbilalkhalid | 808271f3e0 | |
ahmadbilalkhalid | ba515f0b48 | |
ahmadbilalkhalid | cd9d4cb78c | |
ahmadbilalkhalid | ec3cf49799 | |
ahmadbilalkhalid | f79097cae9 | |
ahmadbilalkhalid | 972bb5a920 | |
ahmadbilalkhalid | eea6c1568e | |
ahmadbilalkhalid | e4d2c98fb5 | |
ahmadbilalkhalid | 88b4d34e1a | |
ahmadbilalkhalid | 04993e4106 | |
ahmadbilalkhalid | bc58a6ed9c | |
ahmadbilalkhalid | 71279a968f | |
Ahmed Bilal | f919719b1e | |
Nico Schottelius | 8afd524c55 | |
Nico Schottelius | e79f1b4de9 | |
Nico Schottelius | 23c7604a3e | |
Nico Schottelius | 8e159c8be1 | |
Nico Schottelius | 0283894ba2 | |
Nico Schottelius | 9206d8ed1d | |
Nico Schottelius | 5b44034602 | |
llnu | dee0a29c91 | |
llnu | 8b90755015 | |
llnu | bfbf08c7cd | |
llnu | 2a1e80dbc5 | |
Nico Schottelius | 26aeb78f61 | |
Nico Schottelius | 9ec9083c57 | |
llnu | c6fe2cb1c4 | |
llnu | 1e70d0183d | |
llnu | 34a6e99525 | |
llnu | fee1cfd4ff | |
Nico Schottelius | b2de277244 | |
Nico Schottelius | 00563c7dc2 | |
llnu | b235f0833c | |
Nico Schottelius | 42bb7bc609 | |
llnu | 012f3cb3b5 | |
Nico Schottelius | 0d38a66a34 | |
Nico Schottelius | 72af426b3a | |
llnu | 871aa5347b | |
llnu | 608d1eb280 | |
Nico Schottelius | 537a5b01f1 | |
Nico Schottelius | c37bf19f92 | |
Nico Schottelius | 431a6f6d9b | |
llnu | 6c56a7a7c6 | |
llnu | dd33b89941 | |
Nico Schottelius | cff6a4021f | |
Nico Schottelius | 76f63633ca | |
Nico Schottelius | adddba518c | |
llnu | 787b236305 | |
llnu | c1b0c5301e | |
llnu | 7486fafbaa | |
Nico Schottelius | cdbfb96e71 | |
Nico Schottelius | a4bedb01f6 | |
Nico Schottelius | 6d0ce65f5c | |
Nico Schottelius | e459434b91 | |
Nico Schottelius | cfb09c29de | |
Nico Schottelius | 9517e73233 | |
Nico Schottelius | f9dbdc730a | |
Nico Schottelius | 2244b94fd8 | |
Nico Schottelius | 9ae75f20e8 | |
Nico Schottelius | 6d715e8348 | |
Nico Schottelius | 40176d2eaf | |
Nico Schottelius | 34d9dc1f73 | |
Nico Schottelius | 57eaddb03f | |
Nico Schottelius | 6596e482ca | |
Nico Schottelius | ba9ac4c754 | |
Nico Schottelius | 424c0d27b2 | |
Nico Schottelius | a31dd38343 | |
Nico Schottelius | 1f0dc30730 | |
Nico Schottelius | a8c20e5a30 | |
Nico Schottelius | 4a6f119a93 | |
Nico Schottelius | 95361c1759 | |
Nico Schottelius | 9f03f58d62 | |
ahmadbilalkhalid | ad87982cf0 | |
ahmadbilalkhalid | abc2c6fe51 | |
ahmadbilalkhalid | 1e7300b56e | |
ahmadbilalkhalid | bbe09667a6 | |
ahmadbilalkhalid | 66b7cf525f | |
ahmadbilalkhalid | 46c14306ec | |
ahmadbilalkhalid | db7fcdd66f | |
Nico Schottelius | 789e37df6b | |
Nico Schottelius | 5be0e26669 | |
ahmadbilalkhalid | fd042eb85d | |
ahmadbilalkhalid | b9faddd3a2 | |
ahmadbilalkhalid | 597dedb1ff | |
Nico Schottelius | e890c45dbf | |
ahmadbilalkhalid | f3f2f6127a | |
ahmadbilalkhalid | befb22b9cb | |
ahmadbilalkhalid | cc0ca68498 | |
ahmadbilalkhalid | 6fa77bce4d | |
Nico Schottelius | 1d2b980c74 | |
Nico Schottelius | 21df2367bb | |
ahmadbilalkhalid | fefbe2e1c7 | |
ahmadbilalkhalid | f6eb2ec01f | |
ahmadbilalkhalid | 5d613df33d | |
ahmadbilalkhalid | e37222c1c7 | |
ahmadbilalkhalid | da5a600ccb | |
ahmadbilalkhalid | 2a66be07a6 | |
ahmadbilalkhalid | 93dee1c9fc | |
Nico Schottelius | 583bbe34bc | |
Nico Schottelius | 1a76150d4d | |
Nico Schottelius | b27f1b62f3 | |
Nico Schottelius | a2547bcd83 | |
ahmadbilalkhalid | da77ac65eb |
|
@ -7,3 +7,23 @@ log.txt
|
|||
test.py
|
||||
STRIPE
|
||||
venv/
|
||||
|
||||
uncloud/docs/build
|
||||
logs.txt
|
||||
|
||||
uncloud.egg-info
|
||||
|
||||
# run artefacts
|
||||
default.etcd
|
||||
__pycache__
|
||||
|
||||
# build artefacts
|
||||
uncloud/version.py
|
||||
build/
|
||||
venv/
|
||||
dist/
|
||||
.history/
|
||||
*.iso
|
||||
*.sqlite3
|
||||
.DS_Store
|
||||
static/CACHE/
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
stages:
|
||||
- lint
|
||||
- test
|
||||
|
||||
run-tests:
|
||||
stage: test
|
||||
image: code.ungleich.ch:5050/uncloud/uncloud/uncloud-ci:latest
|
||||
services:
|
||||
- postgres:latest
|
||||
variables:
|
||||
DATABASE_HOST: postgres
|
||||
DATABASE_USER: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
coverage: /^TOTAL.+?(\d+\%)$/
|
||||
script:
|
||||
- pip install -r requirements.txt
|
||||
- coverage run --source='.' ./manage.py test
|
||||
- coverage report
|
|
@ -0,0 +1,674 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
@ -0,0 +1,70 @@
|
|||
# Uncloud
|
||||
|
||||
Cloud management platform, the ungleich way.
|
||||
|
||||
|
||||
[![pipeline status](https://code.ungleich.ch/uncloud/uncloud/badges/master/pipeline.svg)](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
||||
[![coverage report](https://code.ungleich.ch/uncloud/uncloud/badges/master/coverage.svg)](https://code.ungleich.ch/uncloud/uncloud/commits/master)
|
||||
|
||||
## Useful commands
|
||||
|
||||
* `./manage.py import-vat-rates path/to/csv`
|
||||
* `./manage.py createsuperuser`
|
||||
|
||||
## Development setup
|
||||
|
||||
Install system dependencies:
|
||||
|
||||
* On Fedora, you will need the following packages: `python3-virtualenv python3-devel openldap-devel gcc chromium`
|
||||
* sudo apt-get install libpq-dev python-dev libxml2-dev libxslt1-dev libldap2-dev libsasl2-dev libffi-dev
|
||||
|
||||
|
||||
NOTE: you will need to configure a LDAP server and credentials for authentication. See `uncloud/settings.py`.
|
||||
|
||||
```
|
||||
# Initialize virtualenv.
|
||||
» virtualenv .venv
|
||||
Using base prefix '/usr'
|
||||
New python executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python3
|
||||
Also creating executable in /home/fnux/Workspace/ungleich/uncloud/uncloud/.venv/bin/python
|
||||
Installing setuptools, pip, wheel...
|
||||
done.
|
||||
|
||||
# Enter virtualenv.
|
||||
» source .venv/bin/activate
|
||||
|
||||
# Install dependencies.
|
||||
» pip install -r requirements.txt
|
||||
[...]
|
||||
|
||||
# Run migrations.
|
||||
» ./manage.py migrate
|
||||
Operations to perform:
|
||||
Apply all migrations: admin, auth, contenttypes, opennebula, sessions, uncloud_auth, uncloud_net, uncloud_pay, uncloud_service, uncloud_vm
|
||||
Running migrations:
|
||||
[...]
|
||||
|
||||
# Run webserver.
|
||||
» ./manage.py runserver
|
||||
Watching for file changes with StatReloader
|
||||
Performing system checks...
|
||||
|
||||
System check identified no issues (0 silenced).
|
||||
May 07, 2020 - 10:17:08
|
||||
Django version 3.0.6, using settings 'uncloud.settings'
|
||||
Starting development server at http://127.0.0.1:8000/
|
||||
Quit the server with CONTROL-C.
|
||||
```
|
||||
### Run Background Job Queue
|
||||
We use Django Q to handle the asynchronous code and Background Cron jobs
|
||||
To start the workers make sure first that Redis or the Django Q broker is working and you can edit it's settings in the settings file.
|
||||
```
|
||||
./manage.py qcluster
|
||||
```
|
||||
|
||||
### Note on PGSQL
|
||||
|
||||
If you want to use Postgres:
|
||||
|
||||
* Install on configure PGSQL on your base system.
|
||||
* OR use a container! `podman run --rm -p 5432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust -it postgres:latest`
|
|
@ -0,0 +1,6 @@
|
|||
* Intro
|
||||
This file lists issues that should be handled, are small and likely
|
||||
not yet high prio.
|
||||
* Issues
|
||||
** TODO Register prefered address in User model
|
||||
** TODO Allow to specify different recurring periods
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/sh
|
||||
|
||||
dbhost=$1; shift
|
||||
|
||||
ssh -L5432:localhost:5432 "$dbhost" &
|
||||
|
||||
python manage.py "$@"
|
||||
|
||||
|
||||
|
||||
# command only needs to be active while manage command is running
|
||||
|
||||
# -T no pseudo terminal
|
||||
|
||||
|
||||
# alternatively: commands output shell code
|
||||
|
||||
# ssh uncloud@dbhost "python manage.py --hostname xxx ..."
|
|
@ -0,0 +1,4 @@
|
|||
db.sqlite3
|
||||
uncloud/secrets.py
|
||||
debug.log
|
||||
uncloud/local_settings.py
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/sh
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2019-2020 Nico Schottelius (nico-uncloud at schottelius.org)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
# Wrapper for real script to allow execution from checkout
|
||||
dir=${0%/*}
|
||||
|
||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
||||
# the git version contains a dynamic version
|
||||
printf "VERSION = \"%s\"\n" "$(git describe --tags --abbrev=0)" > ${dir}/../uncloud/version.py
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/sh
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
||||
#
|
||||
# This file is part of ucloud.
|
||||
#
|
||||
# ucloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# ucloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
# Wrapper for real script to allow execution from checkout
|
||||
dir=${0%/*}
|
||||
|
||||
# Ensure version is present - the bundled/shipped version contains a static version,
|
||||
# the git version contains a dynamic version
|
||||
${dir}/gen-version
|
||||
|
||||
libdir=$(cd "${dir}/../" && pwd -P)
|
||||
export PYTHONPATH="${libdir}"
|
||||
|
||||
"$dir/../scripts/uncloud" "$@"
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/sh
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2012-2019 Nico Schottelius (nico-ucloud at schottelius.org)
|
||||
#
|
||||
# This file is part of ucloud.
|
||||
#
|
||||
# ucloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# ucloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with ucloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
# Wrapper for real script to allow execution from checkout
|
||||
dir=${0%/*}
|
||||
|
||||
${dir}/gen-version;
|
||||
pip uninstall -y uncloud >/dev/null
|
||||
python setup.py install >/dev/null
|
||||
${dir}/uncloud "$@"
|
|
@ -0,0 +1,13 @@
|
|||
[etcd]
|
||||
url = localhost
|
||||
port = 2379
|
||||
base_prefix = /
|
||||
ca_cert
|
||||
cert_cert
|
||||
cert_key
|
||||
|
||||
[client]
|
||||
name = replace_me
|
||||
realm = replace_me
|
||||
seed = replace_me
|
||||
api_server = http://localhost:5000
|
|
@ -0,0 +1,25 @@
|
|||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source/
|
||||
BUILDDIR = build/
|
||||
DESTINATION=root@staticweb.ungleich.ch:/home/services/www/ungleichstatic/staticcms.ungleich.ch/www/uncloud/
|
||||
|
||||
.PHONY: all build clean
|
||||
|
||||
publish: build permissions
|
||||
rsync -av $(BUILDDIR) $(DESTINATION)
|
||||
|
||||
permissions: build
|
||||
find $(BUILDDIR) -type f -exec chmod 0644 {} \;
|
||||
find $(BUILDDIR) -type d -exec chmod 0755 {} \;
|
||||
|
||||
build:
|
||||
$(SPHINXBUILD) "$(SOURCEDIR)" "$(BUILDDIR)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)
|
|
@ -0,0 +1,12 @@
|
|||
# uncloud docs
|
||||
|
||||
## Requirements
|
||||
1. Python3
|
||||
2. Sphinx
|
||||
|
||||
## Usage
|
||||
Run `make build` to build docs.
|
||||
|
||||
Run `make clean` to remove build directory.
|
||||
|
||||
Run `make publish` to push build dir to https://ungleich.ch/ucloud/
|
|
@ -0,0 +1,131 @@
|
|||
.. _admin-guide:
|
||||
|
||||
|
||||
Usage Guide For Administrators
|
||||
==============================
|
||||
|
||||
Start API
|
||||
----------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud api
|
||||
|
||||
Host Creation
|
||||
-------------
|
||||
|
||||
Currently, we don't have any host (that runs virtual machines).
|
||||
So, we need to create it by executing the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli host create --hostname ungleich.ch --cpu 32 --ram '32GB' --os-ssd '32GB'
|
||||
|
||||
You should see something like the following
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": "Host Created"
|
||||
}
|
||||
|
||||
Start Scheduler
|
||||
---------------
|
||||
Scheduler is responsible for scheduling VMs on appropriate host.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud scheduler
|
||||
|
||||
Start Host
|
||||
----------
|
||||
Host is responsible for handling the following actions
|
||||
|
||||
* Start VM.
|
||||
* Stop VM.
|
||||
* Create VM.
|
||||
* Delete VM.
|
||||
* Migrate VM.
|
||||
* Manage Network Resources needed by VMs.
|
||||
|
||||
It uses a hypervisor such as QEMU to perform these actions.
|
||||
|
||||
To start host we created earlier, execute the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud host ungleich.ch
|
||||
|
||||
File & image scanners
|
||||
--------------------------
|
||||
|
||||
Let's assume we have uploaded an *alpine-uploaded.qcow2* disk images to our
|
||||
uncloud server. Currently, our *alpine-untouched.qcow2* is not tracked by
|
||||
ucloud. We can only make images from tracked files. So, we need to track the
|
||||
file by running File Scanner
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud filescanner
|
||||
|
||||
File Scanner would run, scan your uploaded image and track it. You can check whether your image
|
||||
is successfully tracked by executing the :code:`ucloud-cli user files`, It will return something like the following
|
||||
|
||||
.. _list-user-files:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": [
|
||||
{
|
||||
"filename": "alpine-untouched.qcow2",
|
||||
"uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Our file is now being tracked by ucloud. Lets create an OS image using the uploaded file.
|
||||
|
||||
An image belongs to an image store. There are two types of store
|
||||
|
||||
* Public Image Store
|
||||
* Private Image Store (Not Implemented Yet)
|
||||
|
||||
.. note::
|
||||
**Quick Quiz** Have we created an image store yet?
|
||||
|
||||
The answer is **No, we haven't**. Creating a sample image store is very easy.
|
||||
Just execute the following command
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
(cd ~/ucloud && pipenv run python api/create_image_store.py)
|
||||
|
||||
An image store (with name = "images") would be created. Now, we are fully ready for creating our
|
||||
very own image. Executing the following command to create image using the file uploaded earlier
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli image create-from-file --name alpine --uuid 3f75bd20-45d6-4013-89c4-7fceaedc8dda --image-store-name images
|
||||
|
||||
Please note that your **uuid** would be different. See :ref:`List of user files <list-user-files>`.
|
||||
|
||||
Now, ucloud have received our request to create an image from file. We have to run Image Scanner to make the image.
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud imagescanner
|
||||
|
||||
To make sure, that our image is create run :code:`ucloud-cli image list --public`. You would get
|
||||
output something like the following
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"images": [
|
||||
{
|
||||
"name": "images:alpine",
|
||||
"status": "CREATED"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# This file only contains a selection of the most common options. For a full
|
||||
# list see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Path setup --------------------------------------------------------------
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
|
||||
project = "uncloud"
|
||||
copyright = "2019, ungleich"
|
||||
author = "ungleich"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx_rtd_theme",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This pattern also affects html_static_path and html_extra_path.
|
||||
exclude_patterns = []
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ["_static"]
|
|
@ -0,0 +1,44 @@
|
|||
graph LR
|
||||
style ucloud fill:#FFD2FC
|
||||
style cron fill:#FFF696
|
||||
style infrastructure fill:#BDF0FF
|
||||
subgraph ucloud[ucloud]
|
||||
ucloud-cli[CLI]-->ucloud-api[API]
|
||||
ucloud-api-->ucloud-scheduler[Scheduler]
|
||||
ucloud-api-->ucloud-imagescanner[Image Scanner]
|
||||
ucloud-api-->ucloud-host[Host]
|
||||
ucloud-scheduler-->ucloud-host
|
||||
|
||||
ucloud-host-->need-networking{VM need Networking}
|
||||
need-networking-->|Yes| networking-scripts
|
||||
need-networking-->|No| VM[Virtual Machine]
|
||||
need-networking-->|SLAAC?| radvd
|
||||
networking-scripts-->VM
|
||||
networking-scripts--Create Networks Devices-->networking-scripts
|
||||
subgraph cron[Cron Jobs]
|
||||
ucloud-imagescanner
|
||||
ucloud-filescanner[File Scanner]
|
||||
ucloud-filescanner--Track User files-->ucloud-filescanner
|
||||
end
|
||||
subgraph infrastructure[Infrastructure]
|
||||
radvd
|
||||
etcd
|
||||
networking-scripts[Networking Scripts]
|
||||
ucloud-imagescanner-->image-store
|
||||
image-store{Image Store}
|
||||
image-store-->|CEPH| ceph
|
||||
image-store-->|FILE| file-system
|
||||
ceph[CEPH]
|
||||
file-system[File System]
|
||||
end
|
||||
subgraph virtual-machine[Virtual Machine]
|
||||
VM
|
||||
VM-->ucloud-init
|
||||
|
||||
end
|
||||
|
||||
subgraph metadata-group[Metadata Server]
|
||||
metadata-->ucloud-init
|
||||
ucloud-init<-->metadata
|
||||
end
|
||||
end
|
|
@ -0,0 +1,36 @@
|
|||
Hacking
|
||||
=======
|
||||
Using uncloud in hacking (aka development) mode.
|
||||
|
||||
|
||||
Get the code
|
||||
------------
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/uncloud/uncloud.git
|
||||
|
||||
|
||||
|
||||
Install python requirements
|
||||
---------------------------
|
||||
You need to have python3 installed.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
cd uncloud!
|
||||
python -m venv venv
|
||||
. ./venv/bin/activate
|
||||
./bin/uncloud-run-reinstall
|
||||
|
||||
|
||||
|
||||
Install os requirements
|
||||
-----------------------
|
||||
Install the following software packages: **dnsmasq**.
|
||||
|
||||
If you already have a working IPv6 SLAAC and DNS setup,
|
||||
this step can be skipped.
|
||||
|
||||
Note that you need at least one /64 IPv6 network to run uncloud.
|
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 37 KiB |
|
@ -0,0 +1,26 @@
|
|||
.. ucloud documentation master file, created by
|
||||
sphinx-quickstart on Mon Nov 11 19:08:16 2019.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to ucloud's documentation!
|
||||
==================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
introduction
|
||||
setup-install
|
||||
vm-images
|
||||
user-guide
|
||||
admin-guide
|
||||
troubleshooting
|
||||
hacking
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
|
@ -0,0 +1,58 @@
|
|||
Introduction
|
||||
============
|
||||
|
||||
ucloud is a modern, IPv6 first virtual machine management system.
|
||||
It is an alternative to `OpenNebula <https://opennebula.org/>`_,
|
||||
`OpenStack <https://www.openstack.org/>`_ or
|
||||
`Cloudstack <https://cloudstack.apache.org/>`_.
|
||||
|
||||
ucloud is the first cloud management system that puts IPv6
|
||||
first. ucloud also has an integral ordering process that we missed in
|
||||
existing solutions.
|
||||
|
||||
The ucloud documentation is separated into various sections for the
|
||||
different use cases:
|
||||
|
||||
* :ref:`The user guide <user-guide>` describes how to use an existing
|
||||
ucloud installation
|
||||
* There are :ref:`setup instructions <setup-install>` which describe on how to setup a new
|
||||
ucloud instance
|
||||
* :ref:`The admin guide <admin-guide>` describe on how to
|
||||
administrate ucloud
|
||||
|
||||
|
||||
Architecture
|
||||
------------
|
||||
We try to reuse existing components for ucloud. Generally speaking,
|
||||
ucloud consist of a variety of daemons who handle specific tasks and
|
||||
connect to a shared database.
|
||||
|
||||
All interactions with the clients are done through an API.
|
||||
|
||||
ucloud consists of the following components:
|
||||
|
||||
* API
|
||||
* Scheduler
|
||||
* Host
|
||||
* File Scanner
|
||||
* Image Scanner
|
||||
* Metadata Server
|
||||
* VM Init Scripts (dubbed as ucloud-init)How does ucloud work?
|
||||
|
||||
|
||||
Tech Stack
|
||||
----------
|
||||
The following technologies are utilised:
|
||||
|
||||
* Python 3
|
||||
* Flask
|
||||
* QEMU as hypervisor
|
||||
* etcd (key/value store)
|
||||
* radvd for Router Advertisement
|
||||
|
||||
|
||||
Optional components:
|
||||
|
||||
* CEPH for distributed image storage
|
||||
* uotp for user authentication
|
||||
* netbox for IPAM
|
|
@ -0,0 +1,32 @@
|
|||
TODO
|
||||
====
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
* **Check Authentication:** Nico reported that some endpoints
|
||||
even work without providing token. (e.g ListUserVM)
|
||||
|
||||
Refactoring/Feature
|
||||
-------------------
|
||||
|
||||
* Put overrides for **IMAGE_BASE**, **VM_BASE** in **ImageStorageHandler**.
|
||||
* Expose more details in ListUserFiles.
|
||||
* Throw KeyError instead of returning None when some key is not found in etcd.
|
||||
* Create Network Manager
|
||||
* That would handle tasks like up/down an interface
|
||||
* Create VXLANs, Bridges, TAPs.
|
||||
* Remove them when they are no longer used.
|
||||
|
||||
Reliability
|
||||
-----------
|
||||
|
||||
* What to do if some command hangs forever? e.g CEPH commands
|
||||
:code:`rbd ls ssd` etc. hangs forever if CEPH isn't running
|
||||
or not responding.
|
||||
* What to do if etcd goes down?
|
||||
|
||||
Misc.
|
||||
-----
|
||||
|
||||
* Put "Always use only one StorageHandler"
|
|
@ -0,0 +1,323 @@
|
|||
.. _setup-install:
|
||||
|
||||
Installation of ucloud
|
||||
======================
|
||||
To install ucloud, you will first need to install the requirements and
|
||||
then ucloud itself.
|
||||
|
||||
We describe the installation in x sections:
|
||||
|
||||
* Installation overview
|
||||
* Requirements on Alpine
|
||||
* Installation on Arch Linux
|
||||
|
||||
|
||||
Installation overview
|
||||
---------------------
|
||||
|
||||
ucloud requires the following components to run:
|
||||
|
||||
* python3
|
||||
* an etcd cluster
|
||||
|
||||
|
||||
Installation on Arch Linux
|
||||
--------------------------
|
||||
|
||||
In Arch Linux, some packages can be installed from the regular
|
||||
repositories, some packages need to be installed from AUR.
|
||||
|
||||
|
||||
System packages
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
pacman -Syu qemu
|
||||
|
||||
|
||||
AUR packages
|
||||
~~~~~~~~~~~~
|
||||
Use your favorite AUR manager to install the following packages:
|
||||
|
||||
* etcd
|
||||
|
||||
|
||||
Alpine
|
||||
------
|
||||
|
||||
.. note::
|
||||
Python Wheel (Binary) Packages does not support Alpine Linux as it is
|
||||
using musl libc instead of glibc. Therefore, expect longer installation
|
||||
times than other linux distributions.
|
||||
|
||||
Enable Edge Repos, Update and Upgrade
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. warning::
|
||||
The below commands would overwrite your repositories sources and
|
||||
upgrade all packages and their dependencies to match those available
|
||||
in edge repos. **So, be warned**
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
cat > /etc/apk/repositories << EOF
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/main
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
http://dl-cdn.alpinelinux.org/alpine/edge/testing
|
||||
EOF
|
||||
|
||||
apk update
|
||||
apk upgrade
|
||||
|
||||
reboot
|
||||
|
||||
|
||||
Install Dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
.. note::
|
||||
The installation and configuration of a production grade etcd cluster
|
||||
is out of scope of this manual. So, we will install etcd with default
|
||||
configuration.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
apk add git python3 alpine-sdk python3-dev etcd etcd-ctl openntpd \
|
||||
libffi-dev openssl-dev make py3-protobuf py3-tempita chrony
|
||||
|
||||
pip3 install pipenv
|
||||
|
||||
|
||||
**Install QEMU (For Filesystem based Installation)**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
apk add qemu qemu-system-x86_64 qemu-img
|
||||
|
||||
**Install QEMU/CEPH/radvd (For CEPH based Installation)**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$(git clone https://code.ungleich.ch/ahmedbilal/qemu-with-rbd-alpine.git && cd qemu-with-rbd-alpine && apk add apks/*.apk --allow-untrusted)
|
||||
apk add ceph radvd
|
||||
|
||||
Syncronize Date/Time
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
service chronyd start
|
||||
rc-update add chronyd
|
||||
|
||||
|
||||
Start etcd and enable it
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. note::
|
||||
The following :command:`curl` statement shouldn't be run once
|
||||
etcd is fixed in alpine repos.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
curl https://raw.githubusercontent.com/etcd-io/etcd/release-3.4/etcd.conf.yml.sample -o /etc/etcd/conf.yml
|
||||
service etcd start
|
||||
rc-update add etcd
|
||||
|
||||
|
||||
Install uotp
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ungleich-public/uotp.git
|
||||
cd uotp
|
||||
mv .env.sample .env
|
||||
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
pipenv run python app.py
|
||||
|
||||
Run :code:`$(cd scripts && pipenv run python get-admin.py)` to get
|
||||
admin seed. A sample output
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"seed": "FYTVQ72A2CJJ4TB4",
|
||||
"realm": ["ungleich-admin"]
|
||||
}
|
||||
|
||||
Now, run :code:`pipenv run python scripts/create-auth.py FYTVQ72A2CJJ4TB4`
|
||||
(Replace **FYTVQ72A2CJJ4TB4** with your admin seed obtained in previous step).
|
||||
A sample output is as below. It shows seed of auth.
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"message": "Account Created",
|
||||
"name": "auth",
|
||||
"realm": ["ungleich-auth"],
|
||||
"seed": "XZLTUMX26TRAZOXC"
|
||||
}
|
||||
|
||||
.. note::
|
||||
Please note both **admin** and **auth** seeds as we would need them in setting up ucloud.
|
||||
|
||||
|
||||
Install and configure ucloud
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ucloud/ucloud.git
|
||||
cd ucloud
|
||||
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
|
||||
**Filesystem based Installation**
|
||||
|
||||
You just need to update **AUTH_SEED** in the below code to match your auth's seed.
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
mkdir /etc/ucloud
|
||||
|
||||
cat > /etc/ucloud/ucloud.conf << EOF
|
||||
AUTH_NAME=auth
|
||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
||||
AUTH_REALM=ungleich-auth
|
||||
|
||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
||||
|
||||
OTP_SERVER="http://127.0.0.1:8000/"
|
||||
|
||||
ETCD_URL=localhost
|
||||
|
||||
STORAGE_BACKEND=filesystem
|
||||
|
||||
BASE_DIR=/var/www
|
||||
IMAGE_DIR=/var/image
|
||||
VM_DIR=/var/vm
|
||||
|
||||
VM_PREFIX=/v1/vm/
|
||||
HOST_PREFIX=/v1/host/
|
||||
REQUEST_PREFIX=/v1/request/
|
||||
FILE_PREFIX=/v1/file/
|
||||
IMAGE_PREFIX=/v1/image/
|
||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
||||
USER_PREFIX=/v1/user/
|
||||
NETWORK_PREFIX=/v1/network/
|
||||
|
||||
ssh_username=meow
|
||||
ssh_pkey="~/.ssh/id_rsa"
|
||||
|
||||
VXLAN_PHY_DEV="eth0"
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
|
||||
**CEPH based Installation**
|
||||
You need to update the following
|
||||
|
||||
* **AUTH_SEED**
|
||||
* **NETBOX_URL**
|
||||
* **NETBOX_TOKEN**
|
||||
* **PREFIX**
|
||||
* **PREFIX_LENGTH**
|
||||
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
mkdir /etc/ucloud
|
||||
|
||||
cat > /etc/ucloud/ucloud.conf << EOF
|
||||
AUTH_NAME=auth
|
||||
AUTH_SEED=XZLTUMX26TRAZOXC
|
||||
AUTH_REALM=ungleich-auth
|
||||
|
||||
REALM_ALLOWED = ["ungleich-admin", "ungleich-user"]
|
||||
|
||||
OTP_SERVER="http://127.0.0.1:8000/"
|
||||
|
||||
ETCD_URL=localhost
|
||||
|
||||
STORAGE_BACKEND=ceph
|
||||
|
||||
BASE_DIR=/var/www
|
||||
IMAGE_DIR=/var/image
|
||||
VM_DIR=/var/vm
|
||||
|
||||
VM_PREFIX=/v1/vm/
|
||||
HOST_PREFIX=/v1/host/
|
||||
REQUEST_PREFIX=/v1/request/
|
||||
FILE_PREFIX=/v1/file/
|
||||
IMAGE_PREFIX=/v1/image/
|
||||
IMAGE_STORE_PREFIX=/v1/image_store/
|
||||
USER_PREFIX=/v1/user/
|
||||
NETWORK_PREFIX=/v1/network/
|
||||
|
||||
ssh_username=meow
|
||||
ssh_pkey="~/.ssh/id_rsa"
|
||||
|
||||
VXLAN_PHY_DEV="eth0"
|
||||
|
||||
NETBOX_URL="<url-for-your-netbox-installation>"
|
||||
NETBOX_TOKEN="netbox-token"
|
||||
PREFIX="your-prefix"
|
||||
PREFIX_LENGTH="64"
|
||||
EOF
|
||||
|
||||
|
||||
Install and configure ucloud-cli
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: sh
|
||||
:linenos:
|
||||
|
||||
git clone https://code.ungleich.ch/ucloud/ucloud-cli.git
|
||||
cd ucloud-cli
|
||||
pipenv --three --site-packages
|
||||
pipenv install
|
||||
|
||||
cat > ~/.ucloud.conf << EOF
|
||||
UCLOUD_API_SERVER=http://localhost:5000
|
||||
EOF
|
||||
|
||||
mkdir /var/www/
|
||||
|
||||
**Only for Filesystem Based Installation**
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
mkdir /var/image/
|
||||
mkdir /var/vm/
|
||||
|
||||
|
||||
Environment Variables and aliases
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To ease usage of ucloud and its various components put the following in
|
||||
your shell profile e.g *~/.profile*
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
export OTP_NAME=admin
|
||||
export OTP_REALM=ungleich-admin
|
||||
export OTP_SEED=FYTVQ72A2CJJ4TB4
|
||||
|
||||
alias ucloud='cd /root/ucloud/ && pipenv run python ucloud.py'
|
||||
alias ucloud-cli='cd /root/ucloud-cli/ && pipenv run python ucloud-cli.py'
|
||||
alias uotp='cd /root/uotp/ && pipenv run python app.py'
|
||||
|
||||
and run :code:`source ~/.profile`
|
|
@ -0,0 +1,98 @@
|
|||
Summary
|
||||
=======
|
||||
|
||||
.. image:: /images/ucloud.svg
|
||||
|
||||
.. code-block::
|
||||
|
||||
<cli>
|
||||
|
|
||||
|
|
||||
|
|
||||
+-------------------------<api>
|
||||
| |
|
||||
| |```````````````|```````````````|
|
||||
| | | |
|
||||
| <file_scanner> <scheduler> <image_scanner>
|
||||
| |
|
||||
| |
|
||||
+-------------------------<host>
|
||||
|
|
||||
|
|
||||
|
|
||||
Virtual Machine------<init>------<metadata>
|
||||
|
||||
|
||||
|
||||
**ucloud-cli** interact with **ucloud-api** to do the following operations:
|
||||
|
||||
- Create/Delete/Start/Stop/Migrate/Probe (Status of) Virtual Machines
|
||||
- Create/Delete Networks
|
||||
- Add/Get/Delete SSH Keys
|
||||
- Create OS Image out of a file (tracked by file_scanner)
|
||||
- List User's files/networks/vms
|
||||
- Add Host
|
||||
|
||||
ucloud can currently stores OS-Images on
|
||||
|
||||
* File System
|
||||
* `CEPH <https://ceph.io/>`_
|
||||
|
||||
|
||||
**ucloud-api** in turns creates appropriate Requests which are taken
|
||||
by suitable components of ucloud. For Example, if user uses ucloud-cli
|
||||
to create a VM, **ucloud-api** would create a **ScheduleVMRequest** containing
|
||||
things like pointer to VM's entry which have specs, networking
|
||||
configuration of VMs.
|
||||
|
||||
**ucloud-scheduler** accepts requests for VM's scheduling and
|
||||
migration. It finds a host from a list of available host on which
|
||||
the incoming VM can run and schedules it on that host.
|
||||
|
||||
**ucloud-host** runs on host servers i.e servers that
|
||||
actually runs virtual machines, accepts requests
|
||||
intended only for them. It creates/delete/start/stop/migrate
|
||||
virtual machines. It also arrange network resources needed for the
|
||||
incoming VM.
|
||||
|
||||
**ucloud-filescanner** keep tracks of user's files which would be needed
|
||||
later for creating OS Images.
|
||||
|
||||
**ucloud-imagescanner** converts images files from qcow2 format to raw
|
||||
format which would then be imported into image store.
|
||||
|
||||
* In case of **File System**, the converted image would be copied to
|
||||
:file:`/var/image/` or the path referred by :envvar:`IMAGE_PATH`
|
||||
environement variable mentioned in :file:`/etc/ucloud/ucloud.conf`.
|
||||
|
||||
* In case of **CEPH**, the converted image would be imported into
|
||||
specific pool (it depends on the image store in which the image
|
||||
belongs) of CEPH Block Storage.
|
||||
|
||||
**ucloud-metadata** provides metadata which is used to contextualize
|
||||
VMs. When, the VM is created, it is just clone (duplicate) of OS
|
||||
image from which it is created. So, to differentiate between my
|
||||
VM and your VM, the VM need to be contextualized. This works
|
||||
like the following
|
||||
|
||||
.. note::
|
||||
Actually, ucloud-init makes the GET request. You can also try it
|
||||
yourself using curl but ucloud-init does that for yourself.
|
||||
|
||||
* VM make a GET requests http://metadata which resolves to actual
|
||||
address of metadata server. The metadata server looks at the IPv6
|
||||
Address of the requester and extracts the MAC Address which is possible
|
||||
because the IPv6 address is
|
||||
`IPv6 EUI-64 <https://community.cisco.com/t5/networking-documents/understanding-ipv6-eui-64-bit-address/ta-p/3116953>`_.
|
||||
Metadata use this MAC address to find the actual VM to which it belongs
|
||||
and its owner, ssh-keys and much more. Then, metadata return these
|
||||
details back to the calling VM in JSON format. These details are
|
||||
then used be the **ucloud-init** which is explained next.
|
||||
|
||||
**ucloud-init** gets the metadata from **ucloud-metadata** to contextualize
|
||||
the VM. Specifically, it gets owner's ssh keys (or any other keys the
|
||||
owner of VM added to authorized keys for this VM) and put them to ssh
|
||||
server's (installed on VM) authorized keys so that owner can access
|
||||
the VM using ssh. It also install softwares that are needed for correct
|
||||
behavior of VM e.g rdnssd (needed for `SLAAC <https://en.wikipedia.org/wiki/IPv6#Stateless_address_autoconfiguration_(SLAAC)>`_).
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
Installation Troubleshooting
|
||||
============================
|
||||
|
||||
etcd doesn't start
|
||||
------------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
[root@archlinux ~]# systemctl start etcd
|
||||
Job for etcd.service failed because the control process exited with error code.
|
||||
See "systemctl status etcd.service" and "journalctl -xe" for details
|
||||
|
||||
possible solution
|
||||
~~~~~~~~~~~~~~~~~
|
||||
Try :code:`cat /etc/hosts` if its output contain the following
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
127.0.0.1 localhost.localdomain localhost
|
||||
::1 localhost localhost.localdomain
|
||||
|
||||
|
||||
then unfortunately, we can't help you. But, if it doesn't contain the
|
||||
above you can put the above in :file:`/etc/hosts` to fix the issue.
|
|
@ -0,0 +1,121 @@
|
|||
.. _user-guide:
|
||||
|
||||
User Guide
|
||||
==========
|
||||
|
||||
Create VM
|
||||
---------
|
||||
|
||||
The following command would create a Virtual Machine (name: meow)
|
||||
with following specs
|
||||
|
||||
* CPU: 1
|
||||
* RAM: 1GB
|
||||
* OS-SSD: 4GB
|
||||
* OS: Alpine Linux
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm create --vm-name meow --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine
|
||||
|
||||
|
||||
.. _how-to-check-vm-status:
|
||||
|
||||
Check VM Status
|
||||
---------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm status --vm-name meow
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"hostname": "/v1/host/74c21c332f664972bf5078e8de080eea",
|
||||
"image_uuid": "3f75bd20-45d6-4013-89c4-7fceaedc8dda",
|
||||
"in_migration": null,
|
||||
"log": [
|
||||
"2019-11-12T09:11:09.800798 - Started successfully"
|
||||
],
|
||||
"metadata": {
|
||||
"ssh-keys": []
|
||||
},
|
||||
"name": "meow",
|
||||
"network": [],
|
||||
"owner": "admin",
|
||||
"owner_realm": "ungleich-admin",
|
||||
"specs": {
|
||||
"cpu": 1,
|
||||
"hdd": [],
|
||||
"os-ssd": "4.0 GB",
|
||||
"ram": "1.0 GB"
|
||||
},
|
||||
"status": "RUNNING",
|
||||
"vnc_socket": "/tmp/tmpj1k6sdo_"
|
||||
}
|
||||
|
||||
|
||||
Connect to VM using VNC
|
||||
-----------------------
|
||||
|
||||
We would need **socat** utility and a remote desktop client
|
||||
e.g Remmina, KRDC etc. We can get the vnc socket path by getting
|
||||
its status, see :ref:`how-to-check-vm-status`.
|
||||
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
socat TCP-LISTEN:1234,reuseaddr,fork UNIX-CLIENT:/tmp/tmpj1k6sdo_
|
||||
|
||||
|
||||
Then, launch your remote desktop client and connect to vnc://localhost:1234.
|
||||
|
||||
Create Network
|
||||
--------------
|
||||
|
||||
Layer 2 Network with sample IPv6 range fd00::/64 (without IPAM and routing)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli network create --network-name mynet --network-type vxlan
|
||||
|
||||
|
||||
Layer 2 Network with /64 network with automatic IPAM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli network create --network-name mynet --network-type vxlan --user True
|
||||
|
||||
Attach Network to VM
|
||||
--------------------
|
||||
|
||||
Currently, user can only attach network to his/her VM at
|
||||
the time of creation. A sample command to create VM with
|
||||
a network is as follow
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm create --vm-name meow2 --cpu 1 --ram '1gb' --os-ssd '4gb' --image images:alpine --network mynet
|
||||
|
||||
.. _get-list-of-hosts:
|
||||
|
||||
Get List of Hosts
|
||||
-----------------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli host list
|
||||
|
||||
|
||||
Migrate VM
|
||||
----------
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
ucloud-cli vm migrate --vm-name meow --destination server1.place10
|
||||
|
||||
|
||||
.. option:: --destination
|
||||
|
||||
The name of destination host. You can find a list of host
|
||||
using :ref:`get-list-of-hosts`
|
|
@ -0,0 +1,53 @@
|
|||
How to create VM images for ucloud
|
||||
==================================
|
||||
|
||||
Overview
|
||||
---------
|
||||
|
||||
ucloud tries to be least invasise towards VMs and only require
|
||||
strictly necessary changes for running in a virtualised
|
||||
environment. This includes configurations for:
|
||||
|
||||
* Configuring the network
|
||||
* Managing access via ssh keys
|
||||
* Resizing the attached disk(s)
|
||||
|
||||
|
||||
Network configuration
|
||||
---------------------
|
||||
All VMs in ucloud are required to support IPv6. The primary network
|
||||
configuration is always done using SLAAC. A VM thus needs only to be
|
||||
configured to
|
||||
|
||||
* accept router advertisements on all network interfaces
|
||||
* use the router advertisements to configure the network interfaces
|
||||
* accept the DNS entries from the router advertisements
|
||||
|
||||
|
||||
Configuring SSH keys
|
||||
--------------------
|
||||
|
||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
||||
|
||||
To accept ssh keys in your VM, request the URL
|
||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
||||
**authorized_keys** file. Below you find sample code to accomplish
|
||||
this task:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tmp=$(mktemp)
|
||||
curl -s http://metadata/ssk_keys > "$tmp"
|
||||
touch ~/.ssh/authorized_keys # ensure it exists
|
||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
||||
|
||||
|
||||
Disk resize
|
||||
-----------
|
||||
In virtualised environments, the disk sizes might grow. The operating
|
||||
system should detect disks that are bigger than the existing partition
|
||||
table and resize accordingly. This task is os specific.
|
||||
|
||||
ucloud does not support shrinking disks due to the complexity and
|
||||
intra OS dependencies.
|
|
@ -0,0 +1,66 @@
|
|||
VM images
|
||||
==================================
|
||||
|
||||
Overview
|
||||
---------
|
||||
|
||||
ucloud tries to be least invasise towards VMs and only require
|
||||
strictly necessary changes for running in a virtualised
|
||||
environment. This includes configurations for:
|
||||
|
||||
* Configuring the network
|
||||
* Managing access via ssh keys
|
||||
* Resizing the attached disk(s)
|
||||
|
||||
Upstream images
|
||||
---------------
|
||||
|
||||
The 'official' uncloud images are defined in the `uncloud/images
|
||||
<https://code.ungleich.ch/uncloud/images>`_ repository.
|
||||
|
||||
How to make you own Uncloud images
|
||||
----------------------------------
|
||||
|
||||
.. note::
|
||||
It is fairly easy to create your own images for uncloud, as the common
|
||||
operations (which are detailed below) can be automatically handled by the
|
||||
`uncloud/uncloud-init <https://code.ungleich.ch/uncloud/uncloud-init>`_ tool.
|
||||
|
||||
Network configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
All VMs in ucloud are required to support IPv6. The primary network
|
||||
configuration is always done using SLAAC. A VM thus needs only to be
|
||||
configured to
|
||||
|
||||
* accept router advertisements on all network interfaces
|
||||
* use the router advertisements to configure the network interfaces
|
||||
* accept the DNS entries from the router advertisements
|
||||
|
||||
|
||||
Configuring SSH keys
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To be able to access the VM, ucloud support provisioning SSH keys.
|
||||
|
||||
To accept ssh keys in your VM, request the URL
|
||||
*http://metadata/ssh_keys*. Add the content to the appropriate user's
|
||||
**authorized_keys** file. Below you find sample code to accomplish
|
||||
this task:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
tmp=$(mktemp)
|
||||
curl -s http://metadata/ssk_keys > "$tmp"
|
||||
touch ~/.ssh/authorized_keys # ensure it exists
|
||||
cat ~/.ssh/authorized_keys >> "$tmp"
|
||||
sort "$tmp" | uniq > ~/.ssh/authorized_keys
|
||||
|
||||
|
||||
Disk resize
|
||||
~~~~~~~~~~~
|
||||
In virtualised environments, the disk sizes might grow. The operating
|
||||
system should detect disks that are bigger than the existing partition
|
||||
table and resize accordingly. This task is os specific.
|
||||
|
||||
ucloud does not support shrinking disks due to the complexity and
|
||||
intra OS dependencies.
|
|
@ -0,0 +1,89 @@
|
|||
#!/usr/bin/env python3
|
||||
import logging
|
||||
import sys
|
||||
import importlib
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from etcd3.exceptions import ConnectionFailedError
|
||||
|
||||
from uncloud.common import settings
|
||||
from uncloud import UncloudException
|
||||
from uncloud.common.cli import resolve_otp_credentials
|
||||
|
||||
# Components that use etcd
|
||||
ETCD_COMPONENTS = ['api', 'scheduler', 'host', 'filescanner',
|
||||
'imagescanner', 'metadata', 'configure', 'hack']
|
||||
|
||||
ALL_COMPONENTS = ETCD_COMPONENTS.copy()
|
||||
ALL_COMPONENTS.append('oneshot')
|
||||
#ALL_COMPONENTS.append('cli')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
subparsers = arg_parser.add_subparsers(dest='command')
|
||||
|
||||
parent_parser = argparse.ArgumentParser(add_help=False)
|
||||
parent_parser.add_argument('--debug', '-d', action='store_true', default=False,
|
||||
help='More verbose logging')
|
||||
parent_parser.add_argument('--conf-dir', '-c', help='Configuration directory',
|
||||
default=os.path.expanduser('~/uncloud'))
|
||||
|
||||
etcd_parser = argparse.ArgumentParser(add_help=False)
|
||||
etcd_parser.add_argument('--etcd-host')
|
||||
etcd_parser.add_argument('--etcd-port')
|
||||
etcd_parser.add_argument('--etcd-ca-cert', help='CA that signed the etcd certificate')
|
||||
etcd_parser.add_argument('--etcd-cert-cert', help='Path to client certificate')
|
||||
etcd_parser.add_argument('--etcd-cert-key', help='Path to client certificate key')
|
||||
|
||||
for component in ALL_COMPONENTS:
|
||||
mod = importlib.import_module('uncloud.{}.main'.format(component))
|
||||
parser = getattr(mod, 'arg_parser')
|
||||
|
||||
if component in ETCD_COMPONENTS:
|
||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser, etcd_parser])
|
||||
else:
|
||||
subparsers.add_parser(name=parser.prog, parents=[parser, parent_parser])
|
||||
|
||||
arguments = vars(arg_parser.parse_args())
|
||||
etcd_arguments = [key for key, value in arguments.items() if key.startswith('etcd_') and value]
|
||||
etcd_arguments = {
|
||||
'etcd': {
|
||||
key.replace('etcd_', ''): arguments[key]
|
||||
for key in etcd_arguments
|
||||
}
|
||||
}
|
||||
if not arguments['command']:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
# Initializing Settings and resolving otp_credentials
|
||||
# It is neccessary to resolve_otp_credentials after argument parsing is done because
|
||||
# previously we were reading config file which was fixed to ~/uncloud/uncloud.conf and
|
||||
# providing the default values for --name, --realm and --seed arguments from the values
|
||||
# we read from file. But, now we are asking user about where the config file lives. So,
|
||||
# to providing default value is not possible before parsing arguments. So, we are doing
|
||||
# it after..
|
||||
# settings.settings = settings.Settings(arguments['conf_dir'], seed_value=etcd_arguments)
|
||||
# resolve_otp_credentials(arguments)
|
||||
|
||||
name = arguments.pop('command')
|
||||
mod = importlib.import_module('uncloud.{}.main'.format(name))
|
||||
main = getattr(mod, 'main')
|
||||
|
||||
if arguments['debug']:
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
log = logging.getLogger()
|
||||
|
||||
try:
|
||||
main(arguments)
|
||||
except UncloudException as err:
|
||||
log.error(err)
|
||||
sys.exit(1)
|
||||
# except ConnectionFailedError as err:
|
||||
# log.error('Cannot connect to etcd: {}'.format(err))
|
||||
except Exception as err:
|
||||
log.exception(err)
|
|
@ -0,0 +1,51 @@
|
|||
import os
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
try:
|
||||
import uncloud.version
|
||||
|
||||
version = uncloud.version.VERSION
|
||||
except:
|
||||
import subprocess
|
||||
|
||||
c = subprocess.check_output(["git", "describe"])
|
||||
version = c.decode("utf-8").strip()
|
||||
|
||||
|
||||
setup(
|
||||
name="uncloud",
|
||||
version=version,
|
||||
description="uncloud cloud management",
|
||||
url="https://code.ungleich.ch/uncloud/uncloud",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||
"Programming Language :: Python :: 3",
|
||||
],
|
||||
author="ungleich",
|
||||
author_email="technik@ungleich.ch",
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
"requests",
|
||||
"Flask>=1.1.1",
|
||||
"flask-restful",
|
||||
"bitmath",
|
||||
"pyotp",
|
||||
"pynetbox",
|
||||
"colorama",
|
||||
"etcd3 @ https://github.com/kragniz/python-etcd3/tarball/master#egg=etcd3",
|
||||
"marshmallow",
|
||||
"ldap3"
|
||||
],
|
||||
scripts=["scripts/uncloud"],
|
||||
data_files=[
|
||||
(os.path.expanduser("~/uncloud/"), ["conf/uncloud.conf"])
|
||||
],
|
||||
zip_safe=False,
|
||||
)
|
|
@ -0,0 +1,37 @@
|
|||
import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from uncloud.hack.mac import MAC
|
||||
from uncloud import UncloudException
|
||||
|
||||
class TestMacLocal(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.config = Mock()
|
||||
self.config.arguments = {"no_db":True}
|
||||
self.mac = MAC(self.config)
|
||||
self.mac.create()
|
||||
|
||||
def testMacInt(self):
|
||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong first MAC index")
|
||||
|
||||
def testMacRepr(self):
|
||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong first MAC index")
|
||||
|
||||
def testMacStr(self):
|
||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong first MAC index")
|
||||
|
||||
def testValidationRaise(self):
|
||||
with self.assertRaises(UncloudException):
|
||||
self.mac.validate_mac("2")
|
||||
|
||||
def testValidation(self):
|
||||
self.assertTrue(self.mac.validate_mac("42:00:00:00:00:01"), "Validation of a given MAC not working properly")
|
||||
|
||||
def testNextMAC(self):
|
||||
self.mac.create()
|
||||
self.assertEqual(self.mac.__repr__(), '420000000001', "wrong second MAC index")
|
||||
self.assertEqual(self.mac.__int__(), int("0x420000000001",0), "wrong second MAC index")
|
||||
self.assertEqual(self.mac.__str__(), '42:00:00:00:00:01', "wrong second MAC index")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -0,0 +1,2 @@
|
|||
class UncloudException(Exception):
|
||||
pass
|
|
@ -0,0 +1,12 @@
|
|||
# ucloud-api
|
||||
[![Project Status: WIP – Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip)
|
||||
|
||||
## Installation
|
||||
|
||||
**Make sure you have Python >= 3.5 and Pipenv installed.**
|
||||
|
||||
1. Clone the repository and `cd` into it.
|
||||
2. Run the following commands
|
||||
- `pipenv install`
|
||||
- `pipenv shell`
|
||||
- `python main.py`
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
|
@ -0,0 +1,59 @@
|
|||
import os
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
|
||||
class Optional:
|
||||
pass
|
||||
|
||||
|
||||
class Field:
|
||||
def __init__(self, _name, _type, _value=None):
|
||||
self.name = _name
|
||||
self.value = _value
|
||||
self.type = _type
|
||||
self.__errors = []
|
||||
|
||||
def validation(self):
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
if self.value == KeyError:
|
||||
self.add_error(
|
||||
"'{}' field is a required field".format(self.name)
|
||||
)
|
||||
else:
|
||||
if isinstance(self.value, Optional):
|
||||
pass
|
||||
elif not isinstance(self.value, self.type):
|
||||
self.add_error(
|
||||
"Incorrect Type for '{}' field".format(self.name)
|
||||
)
|
||||
else:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return self.__errors
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class VmUUIDField(Field):
|
||||
def __init__(self, data):
|
||||
self.uuid = data.get("uuid", KeyError)
|
||||
|
||||
super().__init__("uuid", str, self.uuid)
|
||||
|
||||
self.validation = self.vm_uuid_validation
|
||||
|
||||
def vm_uuid_validation(self):
|
||||
r = shared.etcd_client.get(
|
||||
os.path.join(shared.settings["etcd"]["vm_prefix"], self.uuid)
|
||||
)
|
||||
if not r:
|
||||
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
|
@ -0,0 +1,19 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
data = {
|
||||
'is_public': True,
|
||||
'type': 'ceph',
|
||||
'name': 'images',
|
||||
'description': 'first ever public image-store',
|
||||
'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
||||
}
|
||||
|
||||
shared.etcd_client.put(
|
||||
os.path.join(shared.settings['etcd']['image_store_prefix'], uuid4().hex),
|
||||
json.dumps(data),
|
||||
)
|
|
@ -0,0 +1,148 @@
|
|||
import binascii
|
||||
import ipaddress
|
||||
import random
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from pyotp import TOTP
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check_otp(name, realm, token):
|
||||
try:
|
||||
data = {
|
||||
"auth_name": shared.settings["otp"]["auth_name"],
|
||||
"auth_token": TOTP(shared.settings["otp"]["auth_seed"]).now(),
|
||||
"auth_realm": shared.settings["otp"]["auth_realm"],
|
||||
"name": name,
|
||||
"realm": realm,
|
||||
"token": token,
|
||||
}
|
||||
except binascii.Error as err:
|
||||
logger.error(
|
||||
"Cannot compute OTP for seed: {}".format(
|
||||
shared.settings["otp"]["auth_seed"]
|
||||
)
|
||||
)
|
||||
return 400
|
||||
|
||||
response = requests.post(
|
||||
shared.settings["otp"]["verification_controller_url"], json=data
|
||||
)
|
||||
return response.status_code
|
||||
|
||||
|
||||
def resolve_vm_name(name, owner):
|
||||
"""Return UUID of Virtual Machine of name == name and owner == owner
|
||||
|
||||
Input: name of vm, owner of vm.
|
||||
Output: uuid of vm if found otherwise None
|
||||
"""
|
||||
result = next(
|
||||
filter(
|
||||
lambda vm: vm.value["owner"] == owner
|
||||
and vm.value["name"] == name,
|
||||
shared.vm_pool.vms,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if result:
|
||||
return result.key.split("/")[-1]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def resolve_image_name(name, etcd_client):
|
||||
"""Return image uuid given its name and its store
|
||||
|
||||
* If the provided name is not in correct format
|
||||
i.e {store_name}:{image_name} return ValueError
|
||||
* If no such image found then return KeyError
|
||||
|
||||
"""
|
||||
|
||||
seperator = ":"
|
||||
|
||||
# Ensure, user/program passed valid name that is of type string
|
||||
try:
|
||||
store_name_and_image_name = name.split(seperator)
|
||||
|
||||
"""
|
||||
Examples, where it would work and where it would raise exception
|
||||
"images:alpine" --> ["images", "alpine"]
|
||||
|
||||
"images" --> ["images"] it would raise Exception as non enough value to unpack
|
||||
|
||||
"images:alpine:meow" --> ["images", "alpine", "meow"] it would raise Exception
|
||||
as too many values to unpack
|
||||
"""
|
||||
store_name, image_name = store_name_and_image_name
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Image name not in correct format i.e {store_name}:{image_name}"
|
||||
)
|
||||
|
||||
images = etcd_client.get_prefix(
|
||||
shared.settings["etcd"]["image_prefix"], value_in_json=True
|
||||
)
|
||||
|
||||
# Try to find image with name == image_name and store_name == store_name
|
||||
try:
|
||||
image = next(
|
||||
filter(
|
||||
lambda im: im.value["name"] == image_name
|
||||
and im.value["store_name"] == store_name,
|
||||
images,
|
||||
)
|
||||
)
|
||||
except StopIteration:
|
||||
raise KeyError("No image with name {} found.".format(name))
|
||||
else:
|
||||
image_uuid = image.key.split("/")[-1]
|
||||
|
||||
return image_uuid
|
||||
|
||||
|
||||
def random_bytes(num=6):
|
||||
return [random.randrange(256) for _ in range(num)]
|
||||
|
||||
|
||||
def generate_mac(uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"):
|
||||
mac = random_bytes()
|
||||
if oui:
|
||||
if type(oui) == str:
|
||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||
mac = oui + random_bytes(num=6 - len(oui))
|
||||
else:
|
||||
if multicast:
|
||||
mac[0] |= 1 # set bit 0
|
||||
else:
|
||||
mac[0] &= ~1 # clear bit 0
|
||||
if uaa:
|
||||
mac[0] &= ~(1 << 1) # clear bit 1
|
||||
else:
|
||||
mac[0] |= 1 << 1 # set bit 1
|
||||
return separator.join(byte_fmt % b for b in mac)
|
||||
|
||||
|
||||
def mac2ipv6(mac, prefix):
|
||||
# only accept MACs separated by a colon
|
||||
parts = mac.split(":")
|
||||
|
||||
# modify parts to match IPv6 value
|
||||
parts.insert(3, "ff")
|
||||
parts.insert(4, "fe")
|
||||
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
|
||||
|
||||
# format output
|
||||
ipv6_parts = [str(0)] * 4
|
||||
for i in range(0, len(parts), 2):
|
||||
ipv6_parts.append("".join(parts[i : i + 2]))
|
||||
|
||||
lower_part = ipaddress.IPv6Address(":".join(ipv6_parts))
|
||||
prefix = ipaddress.IPv6Address(prefix)
|
||||
return str(prefix + int(lower_part))
|
||||
|
|
@ -0,0 +1,600 @@
|
|||
import json
|
||||
import pynetbox
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
from uuid import uuid4
|
||||
from os.path import join as join_path
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_restful import Resource, Api
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
from uncloud.common import counters
|
||||
from uncloud.common.vm import VMStatus
|
||||
from uncloud.common.request import RequestEntry, RequestType
|
||||
from uncloud.api import schemas
|
||||
from uncloud.api.helper import generate_mac, mac2ipv6
|
||||
from uncloud import UncloudException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = Flask(__name__)
|
||||
api = Api(app)
|
||||
app.logger.handlers.clear()
|
||||
|
||||
arg_parser = argparse.ArgumentParser('api', add_help=False)
|
||||
arg_parser.add_argument('--port', '-p')
|
||||
|
||||
|
||||
@app.errorhandler(Exception)
|
||||
def handle_exception(e):
|
||||
app.logger.error(e)
|
||||
# pass through HTTP errors
|
||||
if isinstance(e, HTTPException):
|
||||
return e
|
||||
|
||||
# now you're handling non-HTTP exceptions only
|
||||
return {'message': 'Server Error'}, 500
|
||||
|
||||
|
||||
class CreateVM(Resource):
|
||||
"""API Request to Handle Creation of VM"""
|
||||
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateVMSchema(data)
|
||||
if validator.is_valid():
|
||||
vm_uuid = uuid4().hex
|
||||
vm_key = join_path(shared.settings['etcd']['vm_prefix'], vm_uuid)
|
||||
specs = {
|
||||
'cpu': validator.specs['cpu'],
|
||||
'ram': validator.specs['ram'],
|
||||
'os-ssd': validator.specs['os-ssd'],
|
||||
'hdd': validator.specs['hdd'],
|
||||
}
|
||||
macs = [generate_mac() for _ in range(len(data['network']))]
|
||||
tap_ids = [
|
||||
counters.increment_etcd_counter(
|
||||
shared.etcd_client, shared.settings['etcd']['tap_counter']
|
||||
)
|
||||
for _ in range(len(data['network']))
|
||||
]
|
||||
vm_entry = {
|
||||
'name': data['vm_name'],
|
||||
'owner': data['name'],
|
||||
'owner_realm': data['realm'],
|
||||
'specs': specs,
|
||||
'hostname': '',
|
||||
'status': VMStatus.stopped,
|
||||
'image_uuid': validator.image_uuid,
|
||||
'log': [],
|
||||
'vnc_socket': '',
|
||||
'network': list(zip(data['network'], macs, tap_ids)),
|
||||
'metadata': {'ssh-keys': []},
|
||||
'in_migration': False,
|
||||
}
|
||||
shared.etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||
|
||||
# Create ScheduleVM Request
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.ScheduleVM,
|
||||
uuid=vm_uuid,
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
|
||||
return {'message': 'VM Creation Queued'}, 200
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VmStatus(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VMStatusSchema(data)
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(
|
||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||
)
|
||||
vm_value = vm.value.copy()
|
||||
vm_value['ip'] = []
|
||||
for network_mac_and_tap in vm.network:
|
||||
network_name, mac, tap = network_mac_and_tap
|
||||
network = shared.etcd_client.get(
|
||||
join_path(
|
||||
shared.settings['etcd']['network_prefix'],
|
||||
data['name'],
|
||||
network_name,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
ipv6_addr = (
|
||||
network.value.get('ipv6').split('::')[0] + '::'
|
||||
)
|
||||
vm_value['ip'].append(mac2ipv6(mac, ipv6_addr))
|
||||
vm.value = vm_value
|
||||
return vm.value
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateImage(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateImageSchema(data)
|
||||
if validator.is_valid():
|
||||
file_entry = shared.etcd_client.get(
|
||||
join_path(shared.settings['etcd']['file_prefix'], data['uuid'])
|
||||
)
|
||||
file_entry_value = json.loads(file_entry.value)
|
||||
|
||||
image_entry_json = {
|
||||
'status': 'TO_BE_CREATED',
|
||||
'owner': file_entry_value['owner'],
|
||||
'filename': file_entry_value['filename'],
|
||||
'name': data['name'],
|
||||
'store_name': data['image_store'],
|
||||
'visibility': 'public',
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
join_path(
|
||||
shared.settings['etcd']['image_prefix'], data['uuid']
|
||||
),
|
||||
json.dumps(image_entry_json),
|
||||
)
|
||||
|
||||
return {'message': 'Image queued for creation.'}
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListPublicImages(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
images = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['image_prefix'], value_in_json=True
|
||||
)
|
||||
r = {'images': []}
|
||||
for image in images:
|
||||
image_key = '{}:{}'.format(
|
||||
image.value['store_name'], image.value['name']
|
||||
)
|
||||
r['images'].append(
|
||||
{'name': image_key, 'status': image.value['status']}
|
||||
)
|
||||
return r, 200
|
||||
|
||||
|
||||
class VMAction(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmActionSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm_entry = shared.vm_pool.get(
|
||||
join_path(shared.settings['etcd']['vm_prefix'], data['uuid'])
|
||||
)
|
||||
action = data['action']
|
||||
|
||||
if action == 'start':
|
||||
action = 'schedule'
|
||||
|
||||
if action == 'delete' and vm_entry.hostname == '':
|
||||
if shared.storage_handler.is_vm_image_exists(
|
||||
vm_entry.uuid
|
||||
):
|
||||
r_status = shared.storage_handler.delete_vm_image(
|
||||
vm_entry.uuid
|
||||
)
|
||||
if r_status:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {'message': 'VM successfully deleted'}
|
||||
else:
|
||||
logger.error(
|
||||
'Some Error Occurred while deleting VM'
|
||||
)
|
||||
return {'message': 'VM deletion unsuccessfull'}
|
||||
else:
|
||||
shared.etcd_client.client.delete(vm_entry.key)
|
||||
return {'message': 'VM successfully deleted'}
|
||||
|
||||
r = RequestEntry.from_scratch(
|
||||
type='{}VM'.format(action.title()),
|
||||
uuid=data['uuid'],
|
||||
hostname=vm_entry.hostname,
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{'message': 'VM {} Queued'.format(action.title())},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class VMMigration(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.VmMigrationSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vm = shared.vm_pool.get(data['uuid'])
|
||||
r = RequestEntry.from_scratch(
|
||||
type=RequestType.InitVMMigration,
|
||||
uuid=vm.uuid,
|
||||
hostname=join_path(
|
||||
shared.settings['etcd']['host_prefix'],
|
||||
validator.destination.value,
|
||||
),
|
||||
request_prefix=shared.settings['etcd']['request_prefix'],
|
||||
)
|
||||
|
||||
shared.request_pool.put(r)
|
||||
return (
|
||||
{'message': 'VM Migration Initialization Queued'},
|
||||
200,
|
||||
)
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserVM(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
vms = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['vm_prefix'], value_in_json=True
|
||||
)
|
||||
return_vms = []
|
||||
user_vms = filter(
|
||||
lambda v: v.value['owner'] == data['name'], vms
|
||||
)
|
||||
for vm in user_vms:
|
||||
return_vms.append(
|
||||
{
|
||||
'name': vm.value['name'],
|
||||
'vm_uuid': vm.key.split('/')[-1],
|
||||
'specs': vm.value['specs'],
|
||||
'status': vm.value['status'],
|
||||
'hostname': vm.value['hostname'],
|
||||
'vnc_socket': vm.value.get('vnc_socket', None),
|
||||
}
|
||||
)
|
||||
if return_vms:
|
||||
return {'message': return_vms}, 200
|
||||
return {'message': 'No VM found'}, 404
|
||||
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserFiles(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
files = shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['file_prefix'], value_in_json=True
|
||||
)
|
||||
return_files = []
|
||||
user_files = [f for f in files if f.value['owner'] == data['name']]
|
||||
for file in user_files:
|
||||
file_uuid = file.key.split('/')[-1]
|
||||
file = file.value
|
||||
file['uuid'] = file_uuid
|
||||
|
||||
file.pop('sha512sum', None)
|
||||
file.pop('owner', None)
|
||||
|
||||
return_files.append(file)
|
||||
return {'message': return_files}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateHost(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateHostSchema(data)
|
||||
if validator.is_valid():
|
||||
host_key = join_path(
|
||||
shared.settings['etcd']['host_prefix'], uuid4().hex
|
||||
)
|
||||
host_entry = {
|
||||
'specs': data['specs'],
|
||||
'hostname': data['hostname'],
|
||||
'status': 'DEAD',
|
||||
'last_heartbeat': '',
|
||||
}
|
||||
shared.etcd_client.put(
|
||||
host_key, host_entry, value_in_json=True
|
||||
)
|
||||
|
||||
return {'message': 'Host Created'}, 200
|
||||
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListHost(Resource):
|
||||
@staticmethod
|
||||
def get():
|
||||
hosts = shared.host_pool.hosts
|
||||
r = {
|
||||
host.key: {
|
||||
'status': host.status,
|
||||
'specs': host.specs,
|
||||
'hostname': host.hostname,
|
||||
}
|
||||
for host in hosts
|
||||
}
|
||||
return r, 200
|
||||
|
||||
|
||||
class GetSSHKeys(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.GetSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
if not validator.key_name.value:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get_prefix(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
keys = {
|
||||
key.key.split('/')[-1]: key.value
|
||||
for key in etcd_entry
|
||||
}
|
||||
return {'keys': keys}
|
||||
else:
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
|
||||
if etcd_entry:
|
||||
return {
|
||||
'keys': {
|
||||
etcd_entry.key.split('/')[
|
||||
-1
|
||||
]: etcd_entry.value
|
||||
}
|
||||
}
|
||||
else:
|
||||
return {'keys': {}}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class AddSSHKey(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.AddSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
return {
|
||||
'message': 'Key with name "{}" already exists'.format(
|
||||
data['key_name']
|
||||
)
|
||||
}
|
||||
else:
|
||||
# Key Not Found. It implies user' haven't added any key yet.
|
||||
shared.etcd_client.put(
|
||||
etcd_key, data['key'], value_in_json=True
|
||||
)
|
||||
return {'message': 'Key added successfully'}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class RemoveSSHKey(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.RemoveSSHSchema(data)
|
||||
if validator.is_valid():
|
||||
|
||||
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||
etcd_key = join_path(
|
||||
shared.settings['etcd']['user_prefix'],
|
||||
data['realm'],
|
||||
data['name'],
|
||||
'key',
|
||||
data['key_name'],
|
||||
)
|
||||
etcd_entry = shared.etcd_client.get(
|
||||
etcd_key, value_in_json=True
|
||||
)
|
||||
if etcd_entry:
|
||||
shared.etcd_client.client.delete(etcd_key)
|
||||
return {'message': 'Key successfully removed.'}
|
||||
else:
|
||||
return {
|
||||
'message': 'No Key with name "{}" Exists at all.'.format(
|
||||
data['key_name']
|
||||
)
|
||||
}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class CreateNetwork(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.CreateNetwork(data)
|
||||
|
||||
if validator.is_valid():
|
||||
|
||||
network_entry = {
|
||||
'id': counters.increment_etcd_counter(
|
||||
shared.etcd_client, shared.settings['etcd']['vxlan_counter']
|
||||
),
|
||||
'type': data['type'],
|
||||
}
|
||||
if validator.user.value:
|
||||
try:
|
||||
nb = pynetbox.api(
|
||||
url=shared.settings['netbox']['url'],
|
||||
token=shared.settings['netbox']['token'],
|
||||
)
|
||||
nb_prefix = nb.ipam.prefixes.get(
|
||||
prefix=shared.settings['network']['prefix']
|
||||
)
|
||||
prefix = nb_prefix.available_prefixes.create(
|
||||
data={
|
||||
'prefix_length': int(
|
||||
shared.settings['network']['prefix_length']
|
||||
),
|
||||
'description': '{}\'s network "{}"'.format(
|
||||
data['name'], data['network_name']
|
||||
),
|
||||
'is_pool': True,
|
||||
}
|
||||
)
|
||||
except Exception as err:
|
||||
app.logger.error(err)
|
||||
return {
|
||||
'message': 'Error occured while creating network.'
|
||||
}
|
||||
else:
|
||||
network_entry['ipv6'] = prefix['prefix']
|
||||
else:
|
||||
network_entry['ipv6'] = 'fd00::/64'
|
||||
|
||||
network_key = join_path(
|
||||
shared.settings['etcd']['network_prefix'],
|
||||
data['name'],
|
||||
data['network_name'],
|
||||
)
|
||||
shared.etcd_client.put(
|
||||
network_key, network_entry, value_in_json=True
|
||||
)
|
||||
return {'message': 'Network successfully added.'}
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
class ListUserNetwork(Resource):
|
||||
@staticmethod
|
||||
def post():
|
||||
data = request.json
|
||||
validator = schemas.OTPSchema(data)
|
||||
|
||||
if validator.is_valid():
|
||||
prefix = join_path(
|
||||
shared.settings['etcd']['network_prefix'], data['name']
|
||||
)
|
||||
networks = shared.etcd_client.get_prefix(
|
||||
prefix, value_in_json=True
|
||||
)
|
||||
user_networks = []
|
||||
for net in networks:
|
||||
net.value['name'] = net.key.split('/')[-1]
|
||||
user_networks.append(net.value)
|
||||
return {'networks': user_networks}, 200
|
||||
else:
|
||||
return validator.get_errors(), 400
|
||||
|
||||
|
||||
api.add_resource(CreateVM, '/vm/create')
|
||||
api.add_resource(VmStatus, '/vm/status')
|
||||
|
||||
api.add_resource(VMAction, '/vm/action')
|
||||
api.add_resource(VMMigration, '/vm/migrate')
|
||||
|
||||
api.add_resource(CreateImage, '/image/create')
|
||||
api.add_resource(ListPublicImages, '/image/list-public')
|
||||
|
||||
api.add_resource(ListUserVM, '/user/vms')
|
||||
api.add_resource(ListUserFiles, '/user/files')
|
||||
api.add_resource(ListUserNetwork, '/user/networks')
|
||||
|
||||
api.add_resource(AddSSHKey, '/user/add-ssh')
|
||||
api.add_resource(RemoveSSHKey, '/user/remove-ssh')
|
||||
api.add_resource(GetSSHKeys, '/user/get-ssh')
|
||||
|
||||
api.add_resource(CreateHost, '/host/create')
|
||||
api.add_resource(ListHost, '/host/list')
|
||||
|
||||
api.add_resource(CreateNetwork, '/network/create')
|
||||
|
||||
|
||||
def main(arguments):
|
||||
debug = arguments['debug']
|
||||
port = arguments['port']
|
||||
|
||||
try:
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
shared.settings['etcd']['image_store_prefix'], value_in_json=True
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
image_stores = False
|
||||
|
||||
# Do not inject default values that might be very wrong
|
||||
# fail when required, not before
|
||||
#
|
||||
# if not image_stores:
|
||||
# data = {
|
||||
# 'is_public': True,
|
||||
# 'type': 'ceph',
|
||||
# 'name': 'images',
|
||||
# 'description': 'first ever public image-store',
|
||||
# 'attributes': {'list': [], 'key': [], 'pool': 'images'},
|
||||
# }
|
||||
|
||||
# shared.etcd_client.put(
|
||||
# join_path(
|
||||
# shared.settings['etcd']['image_store_prefix'], uuid4().hex
|
||||
# ),
|
||||
# json.dumps(data),
|
||||
# )
|
||||
|
||||
try:
|
||||
app.run(host='::', port=port, debug=debug)
|
||||
except OSError as e:
|
||||
raise UncloudException('Failed to start Flask: {}'.format(e))
|
|
@ -0,0 +1,557 @@
|
|||
"""
|
||||
This module contain classes thats validates and intercept/modify
|
||||
data coming from uncloud-cli (user)
|
||||
|
||||
It was primarily developed as an alternative to argument parser
|
||||
of Flask_Restful which is going to be deprecated. I also tried
|
||||
marshmallow for that purpose but it was an overkill (because it
|
||||
do validation + serialization + deserialization) and little
|
||||
inflexible for our purpose.
|
||||
"""
|
||||
|
||||
# TODO: Fix error message when user's mentioned VM (referred by name)
|
||||
# does not exists.
|
||||
#
|
||||
# Currently, it says uuid is a required field.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import bitmath
|
||||
|
||||
from uncloud.common.host import HostStatus
|
||||
from uncloud.common.vm import VMStatus
|
||||
from uncloud.common.shared import shared
|
||||
from . import helper, logger
|
||||
from .common_fields import Field, VmUUIDField
|
||||
from .helper import check_otp, resolve_vm_name
|
||||
|
||||
|
||||
class BaseSchema:
|
||||
def __init__(self, data, fields=None):
|
||||
_ = data # suppress linter warning
|
||||
self.__errors = []
|
||||
if fields is None:
|
||||
self.fields = []
|
||||
else:
|
||||
self.fields = fields
|
||||
|
||||
def validation(self):
|
||||
# custom validation is optional
|
||||
return True
|
||||
|
||||
def is_valid(self):
|
||||
for field in self.fields:
|
||||
field.is_valid()
|
||||
self.add_field_errors(field)
|
||||
|
||||
for parent in self.__class__.__bases__:
|
||||
try:
|
||||
parent.validation(self)
|
||||
except AttributeError:
|
||||
pass
|
||||
if not self.__errors:
|
||||
self.validation()
|
||||
|
||||
if self.__errors:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_errors(self):
|
||||
return {"message": self.__errors}
|
||||
|
||||
def add_field_errors(self, field: Field):
|
||||
self.__errors += field.get_errors()
|
||||
|
||||
def add_error(self, error):
|
||||
self.__errors.append(error)
|
||||
|
||||
|
||||
class OTPSchema(BaseSchema):
|
||||
def __init__(self, data: dict, fields=None):
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.realm = Field("realm", str, data.get("realm", KeyError))
|
||||
self.token = Field("token", str, data.get("token", KeyError))
|
||||
|
||||
_fields = [self.name, self.realm, self.token]
|
||||
if fields:
|
||||
_fields += fields
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def validation(self):
|
||||
if (
|
||||
check_otp(
|
||||
self.name.value, self.realm.value, self.token.value
|
||||
)
|
||||
!= 200
|
||||
):
|
||||
self.add_error("Wrong Credentials")
|
||||
|
||||
|
||||
########################## Image Operations ###############################################
|
||||
|
||||
|
||||
class CreateImageSchema(BaseSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||
self.name = Field("name", str, data.get("name", KeyError))
|
||||
self.image_store = Field(
|
||||
"image_store", str, data.get("image_store", KeyError)
|
||||
)
|
||||
|
||||
# Validations
|
||||
self.uuid.validation = self.file_uuid_validation
|
||||
self.image_store.validation = self.image_store_name_validation
|
||||
|
||||
# All Fields
|
||||
fields = [self.uuid, self.name, self.image_store]
|
||||
super().__init__(data, fields)
|
||||
|
||||
def file_uuid_validation(self):
|
||||
file_entry = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["file_prefix"], self.uuid.value
|
||||
)
|
||||
)
|
||||
if file_entry is None:
|
||||
self.add_error(
|
||||
"Image File with uuid '{}' Not Found".format(
|
||||
self.uuid.value
|
||||
)
|
||||
)
|
||||
|
||||
def image_store_name_validation(self):
|
||||
image_stores = list(
|
||||
shared.etcd_client.get_prefix(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["image_store_prefix"]
|
||||
)
|
||||
)
|
||||
|
||||
image_store = next(
|
||||
filter(
|
||||
lambda s: json.loads(s.value)["name"]
|
||||
== self.image_store.value,
|
||||
image_stores,
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not image_store:
|
||||
self.add_error(
|
||||
"Store '{}' does not exists".format(
|
||||
self.image_store.value
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Host Operations
|
||||
|
||||
|
||||
class CreateHostSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.hostname = Field(
|
||||
"hostname", str, data.get("hostname", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.specs.validation = self.specs_validation
|
||||
|
||||
fields = [self.hostname, self.specs]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if _cpu < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(10):
|
||||
self.add_error("OS-SSD must be atleast 10 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
def validation(self):
|
||||
if self.realm.value != "ungleich-admin":
|
||||
self.add_error(
|
||||
"Invalid Credentials/Insufficient Permission"
|
||||
)
|
||||
|
||||
|
||||
# VM Operations
|
||||
|
||||
|
||||
class CreateVMSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
# Fields
|
||||
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||
self.vm_name = Field(
|
||||
"vm_name", str, data.get("vm_name", KeyError)
|
||||
)
|
||||
self.image = Field("image", str, data.get("image", KeyError))
|
||||
self.network = Field(
|
||||
"network", list, data.get("network", KeyError)
|
||||
)
|
||||
|
||||
# Validation
|
||||
self.image.validation = self.image_validation
|
||||
self.vm_name.validation = self.vm_name_validation
|
||||
self.specs.validation = self.specs_validation
|
||||
self.network.validation = self.network_validation
|
||||
|
||||
fields = [self.vm_name, self.image, self.specs, self.network]
|
||||
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def image_validation(self):
|
||||
try:
|
||||
image_uuid = helper.resolve_image_name(
|
||||
self.image.value, shared.etcd_client
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
"Cannot resolve image name = %s", self.image.value
|
||||
)
|
||||
self.add_error(str(e))
|
||||
else:
|
||||
self.image_uuid = image_uuid
|
||||
|
||||
def vm_name_validation(self):
|
||||
if resolve_vm_name(
|
||||
name=self.vm_name.value, owner=self.name.value
|
||||
):
|
||||
self.add_error(
|
||||
'VM with same name "{}" already exists'.format(
|
||||
self.vm_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_validation(self):
|
||||
_network = self.network.value
|
||||
|
||||
if _network:
|
||||
for net in _network:
|
||||
network = shared.etcd_client.get(
|
||||
os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"],
|
||||
self.name.value,
|
||||
net,
|
||||
),
|
||||
value_in_json=True,
|
||||
)
|
||||
if not network:
|
||||
self.add_error(
|
||||
"Network with name {} does not exists".format(
|
||||
net
|
||||
)
|
||||
)
|
||||
|
||||
def specs_validation(self):
|
||||
ALLOWED_BASE = 10
|
||||
|
||||
_cpu = self.specs.value.get("cpu", KeyError)
|
||||
_ram = self.specs.value.get("ram", KeyError)
|
||||
_os_ssd = self.specs.value.get("os-ssd", KeyError)
|
||||
_hdd = self.specs.value.get("hdd", KeyError)
|
||||
|
||||
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||
self.add_error(
|
||||
"You must specify CPU, RAM and OS-SSD in your specs"
|
||||
)
|
||||
return None
|
||||
try:
|
||||
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||
|
||||
if parsed_ram.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified RAM is not in correct units"
|
||||
)
|
||||
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified OS-SSD is not in correct units"
|
||||
)
|
||||
|
||||
if int(_cpu) < 1:
|
||||
self.add_error("CPU must be atleast 1")
|
||||
|
||||
if parsed_ram < bitmath.GB(1):
|
||||
self.add_error("RAM must be atleast 1 GB")
|
||||
|
||||
if parsed_os_ssd < bitmath.GB(1):
|
||||
self.add_error("OS-SSD must be atleast 1 GB")
|
||||
|
||||
parsed_hdd = []
|
||||
for hdd in _hdd:
|
||||
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||
if _parsed_hdd.base != ALLOWED_BASE:
|
||||
self.add_error(
|
||||
"Your specified HDD is not in correct units"
|
||||
)
|
||||
break
|
||||
else:
|
||||
parsed_hdd.append(str(_parsed_hdd))
|
||||
|
||||
except ValueError:
|
||||
# TODO: Find some good error message
|
||||
self.add_error("Specs are not correct.")
|
||||
else:
|
||||
if self.get_errors():
|
||||
self.specs = {
|
||||
"cpu": _cpu,
|
||||
"ram": str(parsed_ram),
|
||||
"os-ssd": str(parsed_os_ssd),
|
||||
"hdd": parsed_hdd,
|
||||
}
|
||||
|
||||
|
||||
class VMStatusSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
|
||||
fields = [self.uuid]
|
||||
|
||||
super().__init__(data, fields)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
|
||||
class VmActionSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.action = Field("action", str, data.get("action", KeyError))
|
||||
|
||||
self.action.validation = self.action_validation
|
||||
|
||||
_fields = [self.uuid, self.action]
|
||||
|
||||
super().__init__(data=data, fields=_fields)
|
||||
|
||||
def action_validation(self):
|
||||
allowed_actions = ["start", "stop", "delete"]
|
||||
if self.action.value not in allowed_actions:
|
||||
self.add_error(
|
||||
"Invalid Action. Allowed Actions are {}".format(
|
||||
allowed_actions
|
||||
)
|
||||
)
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if (
|
||||
self.action.value == "start"
|
||||
and vm.status == VMStatus.running
|
||||
and vm.hostname != ""
|
||||
):
|
||||
self.add_error("VM Already Running")
|
||||
|
||||
if self.action.value == "stop":
|
||||
if vm.status == VMStatus.stopped:
|
||||
self.add_error("VM Already Stopped")
|
||||
elif vm.status != VMStatus.running:
|
||||
self.add_error("Cannot stop non-running VM")
|
||||
|
||||
|
||||
class VmMigrationSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
data["uuid"] = (
|
||||
resolve_vm_name(
|
||||
name=data.get("vm_name", None),
|
||||
owner=(
|
||||
data.get("in_support_of", None)
|
||||
or data.get("name", None)
|
||||
),
|
||||
)
|
||||
or KeyError
|
||||
)
|
||||
|
||||
self.uuid = VmUUIDField(data)
|
||||
self.destination = Field(
|
||||
"destination", str, data.get("destination", KeyError)
|
||||
)
|
||||
|
||||
self.destination.validation = self.destination_validation
|
||||
|
||||
fields = [self.destination]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
def destination_validation(self):
|
||||
hostname = self.destination.value
|
||||
host = next(
|
||||
filter(
|
||||
lambda h: h.hostname == hostname, shared.host_pool.hosts
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not host:
|
||||
self.add_error(
|
||||
"No Such Host ({}) exists".format(
|
||||
self.destination.value
|
||||
)
|
||||
)
|
||||
elif host.status != HostStatus.alive:
|
||||
self.add_error("Destination Host is dead")
|
||||
else:
|
||||
self.destination.value = host.key
|
||||
|
||||
def validation(self):
|
||||
vm = shared.vm_pool.get(self.uuid.value)
|
||||
if not (
|
||||
vm.value["owner"] == self.name.value
|
||||
or self.realm.value == "ungleich-admin"
|
||||
):
|
||||
self.add_error("Invalid User")
|
||||
|
||||
if vm.status != VMStatus.running:
|
||||
self.add_error("Can't migrate non-running VM")
|
||||
|
||||
if vm.hostname == os.path.join(
|
||||
shared.shared.shared.shared.shared.settings["etcd"]["host_prefix"], self.destination.value
|
||||
):
|
||||
self.add_error(
|
||||
"Destination host couldn't be same as Source Host"
|
||||
)
|
||||
|
||||
|
||||
class AddSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||
|
||||
fields = [self.key_name, self.key]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class RemoveSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", KeyError)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class GetSSHSchema(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.key_name = Field(
|
||||
"key_name", str, data.get("key_name", None)
|
||||
)
|
||||
|
||||
fields = [self.key_name]
|
||||
super().__init__(data=data, fields=fields)
|
||||
|
||||
|
||||
class CreateNetwork(OTPSchema):
|
||||
def __init__(self, data):
|
||||
self.network_name = Field("network_name", str, data.get("network_name", KeyError))
|
||||
self.type = Field("type", str, data.get("type", KeyError))
|
||||
self.user = Field("user", bool, bool(data.get("user", False)))
|
||||
|
||||
self.network_name.validation = self.network_name_validation
|
||||
self.type.validation = self.network_type_validation
|
||||
|
||||
fields = [self.network_name, self.type, self.user]
|
||||
super().__init__(data, fields=fields)
|
||||
|
||||
def network_name_validation(self):
|
||||
key = os.path.join(shared.shared.shared.shared.shared.settings["etcd"]["network_prefix"], self.name.value, self.network_name.value)
|
||||
network = shared.etcd_client.get(key, value_in_json=True)
|
||||
if network:
|
||||
self.add_error(
|
||||
"Network with name {} already exists".format(
|
||||
self.network_name.value
|
||||
)
|
||||
)
|
||||
|
||||
def network_type_validation(self):
|
||||
supported_network_types = ["vxlan"]
|
||||
if self.type.value not in supported_network_types:
|
||||
self.add_error(
|
||||
"Unsupported Network Type. Supported network types are {}".format(
|
||||
supported_network_types
|
||||
)
|
||||
)
|
|
@ -0,0 +1,46 @@
|
|||
import requests
|
||||
import json
|
||||
import argparse
|
||||
import binascii
|
||||
|
||||
from pyotp import TOTP
|
||||
from os.path import join as join_path
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
|
||||
def get_otp_parser():
|
||||
otp_parser = argparse.ArgumentParser('otp')
|
||||
otp_parser.add_argument('--name')
|
||||
otp_parser.add_argument('--realm')
|
||||
otp_parser.add_argument('--seed', type=get_token, dest='token', metavar='SEED')
|
||||
|
||||
return otp_parser
|
||||
|
||||
|
||||
def load_dump_pretty(content):
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode('utf-8')
|
||||
parsed = json.loads(content)
|
||||
return json.dumps(parsed, indent=4, sort_keys=True)
|
||||
|
||||
|
||||
def make_request(*args, data=None, request_method=requests.post):
|
||||
try:
|
||||
r = request_method(join_path(shared.settings['client']['api_server'], *args), json=data)
|
||||
except requests.exceptions.RequestException:
|
||||
print('Error occurred while connecting to API server.')
|
||||
else:
|
||||
try:
|
||||
print(load_dump_pretty(r.content))
|
||||
except Exception:
|
||||
print('Error occurred while getting output from api server.')
|
||||
|
||||
|
||||
def get_token(seed):
|
||||
if seed is not None:
|
||||
try:
|
||||
token = TOTP(seed).now()
|
||||
except binascii.Error:
|
||||
raise argparse.ArgumentTypeError('Invalid seed')
|
||||
else:
|
||||
return token
|
|
@ -0,0 +1,45 @@
|
|||
import requests
|
||||
|
||||
from uncloud.cli.helper import make_request, get_otp_parser
|
||||
from uncloud.common.parser import BaseParser
|
||||
|
||||
|
||||
class HostParser(BaseParser):
|
||||
def __init__(self):
|
||||
super().__init__('host')
|
||||
|
||||
def create(self, **kwargs):
|
||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
||||
p.add_argument('--hostname', required=True)
|
||||
p.add_argument('--cpu', required=True, type=int)
|
||||
p.add_argument('--ram', required=True)
|
||||
p.add_argument('--os-ssd', required=True)
|
||||
p.add_argument('--hdd', default=list())
|
||||
|
||||
def list(self, **kwargs):
|
||||
self.subparser.add_parser('list', **kwargs)
|
||||
|
||||
|
||||
parser = HostParser()
|
||||
arg_parser = parser.arg_parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop('host_subcommand')
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
request_method = requests.post
|
||||
data = None
|
||||
if subcommand == 'create':
|
||||
kwargs['specs'] = {
|
||||
'cpu': kwargs.pop('cpu'),
|
||||
'ram': kwargs.pop('ram'),
|
||||
'os-ssd': kwargs.pop('os_ssd'),
|
||||
'hdd': kwargs.pop('hdd')
|
||||
}
|
||||
data = kwargs
|
||||
elif subcommand == 'list':
|
||||
request_method = requests.get
|
||||
|
||||
make_request('host', subcommand, data=data, request_method=request_method)
|
|
@ -0,0 +1,38 @@
|
|||
import requests
|
||||
|
||||
from uncloud.cli.helper import make_request
|
||||
from uncloud.common.parser import BaseParser
|
||||
|
||||
|
||||
class ImageParser(BaseParser):
|
||||
def __init__(self):
|
||||
super().__init__('image')
|
||||
|
||||
def create(self, **kwargs):
|
||||
p = self.subparser.add_parser('create', **kwargs)
|
||||
p.add_argument('--name', required=True)
|
||||
p.add_argument('--uuid', required=True)
|
||||
p.add_argument('--image-store', required=True, dest='image_store')
|
||||
|
||||
def list(self, **kwargs):
|
||||
self.subparser.add_parser('list', **kwargs)
|
||||
|
||||
|
||||
parser = ImageParser()
|
||||
arg_parser = parser.arg_parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop('image_subcommand')
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
data = None
|
||||
request_method = requests.post
|
||||
if subcommand == 'list':
|
||||
subcommand = 'list-public'
|
||||
request_method = requests.get
|
||||
elif subcommand == 'create':
|
||||
data = kwargs
|
||||
|
||||
make_request('image', subcommand, data=data, request_method=request_method)
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
|
||||
arg_parser = argparse.ArgumentParser('cli', add_help=False)
|
||||
subparser = arg_parser.add_subparsers(dest='subcommand')
|
||||
|
||||
for component in ['user', 'host', 'image', 'network', 'vm']:
|
||||
module = importlib.import_module('uncloud.cli.{}'.format(component))
|
||||
parser = getattr(module, 'arg_parser')
|
||||
subparser.add_parser(name=parser.prog, parents=[parser])
|
||||
|
||||
|
||||
def main(arguments):
|
||||
if not arguments['subcommand']:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
name = arguments.pop('subcommand')
|
||||
arguments.pop('debug')
|
||||
mod = importlib.import_module('uncloud.cli.{}'.format(name))
|
||||
_main = getattr(mod, 'main')
|
||||
_main(**arguments)
|
|
@ -0,0 +1,32 @@
|
|||
import requests
|
||||
|
||||
from uncloud.cli.helper import make_request, get_otp_parser
|
||||
from uncloud.common.parser import BaseParser
|
||||
|
||||
|
||||
class NetworkParser(BaseParser):
|
||||
def __init__(self):
|
||||
super().__init__('network')
|
||||
|
||||
def create(self, **kwargs):
|
||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **kwargs)
|
||||
p.add_argument('--network-name', required=True)
|
||||
p.add_argument('--network-type', required=True, dest='type')
|
||||
p.add_argument('--user', action='store_true')
|
||||
|
||||
|
||||
parser = NetworkParser()
|
||||
arg_parser = parser.arg_parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop('network_subcommand')
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
data = None
|
||||
request_method = requests.post
|
||||
if subcommand == 'create':
|
||||
data = kwargs
|
||||
|
||||
make_request('network', subcommand, data=data, request_method=request_method)
|
|
@ -0,0 +1,41 @@
|
|||
from uncloud.cli.helper import make_request, get_otp_parser
|
||||
from uncloud.common.parser import BaseParser
|
||||
|
||||
|
||||
class UserParser(BaseParser):
|
||||
def __init__(self):
|
||||
super().__init__('user')
|
||||
|
||||
def files(self, **kwargs):
|
||||
self.subparser.add_parser('files', parents=[get_otp_parser()], **kwargs)
|
||||
|
||||
def vms(self, **kwargs):
|
||||
self.subparser.add_parser('vms', parents=[get_otp_parser()], **kwargs)
|
||||
|
||||
def networks(self, **kwargs):
|
||||
self.subparser.add_parser('networks', parents=[get_otp_parser()], **kwargs)
|
||||
|
||||
def add_ssh(self, **kwargs):
|
||||
p = self.subparser.add_parser('add-ssh', parents=[get_otp_parser()], **kwargs)
|
||||
p.add_argument('--key-name', required=True)
|
||||
p.add_argument('--key', required=True)
|
||||
|
||||
def get_ssh(self, **kwargs):
|
||||
p = self.subparser.add_parser('get-ssh', parents=[get_otp_parser()], **kwargs)
|
||||
p.add_argument('--key-name', default='')
|
||||
|
||||
def remove_ssh(self, **kwargs):
|
||||
p = self.subparser.add_parser('remove-ssh', parents=[get_otp_parser()], **kwargs)
|
||||
p.add_argument('--key-name', required=True)
|
||||
|
||||
|
||||
parser = UserParser()
|
||||
arg_parser = parser.arg_parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop('user_subcommand')
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
make_request('user', subcommand, data=kwargs)
|
|
@ -0,0 +1,62 @@
|
|||
from uncloud.common.parser import BaseParser
|
||||
from uncloud.cli.helper import make_request, get_otp_parser
|
||||
|
||||
|
||||
class VMParser(BaseParser):
|
||||
def __init__(self):
|
||||
super().__init__('vm')
|
||||
|
||||
def start(self, **args):
|
||||
p = self.subparser.add_parser('start', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--vm-name', required=True)
|
||||
|
||||
def stop(self, **args):
|
||||
p = self.subparser.add_parser('stop', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--vm-name', required=True)
|
||||
|
||||
def status(self, **args):
|
||||
p = self.subparser.add_parser('status', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--vm-name', required=True)
|
||||
|
||||
def delete(self, **args):
|
||||
p = self.subparser.add_parser('delete', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--vm-name', required=True)
|
||||
|
||||
def migrate(self, **args):
|
||||
p = self.subparser.add_parser('migrate', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--vm-name', required=True)
|
||||
p.add_argument('--destination', required=True)
|
||||
|
||||
def create(self, **args):
|
||||
p = self.subparser.add_parser('create', parents=[get_otp_parser()], **args)
|
||||
p.add_argument('--cpu', required=True)
|
||||
p.add_argument('--ram', required=True)
|
||||
p.add_argument('--os-ssd', required=True)
|
||||
p.add_argument('--hdd', action='append', default=list())
|
||||
p.add_argument('--image', required=True)
|
||||
p.add_argument('--network', action='append', default=[])
|
||||
p.add_argument('--vm-name', required=True)
|
||||
|
||||
|
||||
parser = VMParser()
|
||||
arg_parser = parser.arg_parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
subcommand = kwargs.pop('vm_subcommand')
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
data = kwargs
|
||||
endpoint = subcommand
|
||||
if subcommand in ['start', 'stop', 'delete']:
|
||||
endpoint = 'action'
|
||||
data['action'] = subcommand
|
||||
elif subcommand == 'create':
|
||||
kwargs['specs'] = {
|
||||
'cpu': kwargs.pop('cpu'),
|
||||
'ram': kwargs.pop('ram'),
|
||||
'os-ssd': kwargs.pop('os_ssd'),
|
||||
'hdd': kwargs.pop('hdd')
|
||||
}
|
||||
make_request('vm', endpoint, data=data)
|
|
@ -0,0 +1,23 @@
|
|||
import argparse
|
||||
import etcd3
|
||||
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
arg_parser = argparse.ArgumentParser('client', add_help=False)
|
||||
arg_parser.add_argument('--dump-etcd-contents-prefix', help="Dump contents below the given prefix")
|
||||
|
||||
def dump_etcd_contents(prefix):
|
||||
etcd = Etcd3Wrapper()
|
||||
for k,v in etcd.get_prefix_raw(prefix):
|
||||
k = k.decode('utf-8')
|
||||
v = v.decode('utf-8')
|
||||
print("{} = {}".format(k,v))
|
||||
# print("{} = {}".format(k,v))
|
||||
|
||||
# for k,v in etcd.get_prefix(prefix):
|
||||
#
|
||||
print("done")
|
||||
|
||||
|
||||
def main(arguments):
|
||||
if 'dump_etcd_contents_prefix' in arguments:
|
||||
dump_etcd_contents(prefix=arguments['dump_etcd_contents_prefix'])
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
|
@ -0,0 +1,26 @@
|
|||
from .etcd_wrapper import EtcdEntry
|
||||
|
||||
|
||||
class SpecificEtcdEntryBase:
|
||||
def __init__(self, e: EtcdEntry):
|
||||
self.key = e.key
|
||||
|
||||
for k in e.value.keys():
|
||||
self.__setattr__(k, e.value[k])
|
||||
|
||||
def original_keys(self):
|
||||
r = dict(self.__dict__)
|
||||
if "key" in r:
|
||||
del r["key"]
|
||||
return r
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self.original_keys()
|
||||
|
||||
@value.setter
|
||||
def value(self, v):
|
||||
self.__dict__ = v
|
||||
|
||||
def __repr__(self):
|
||||
return str(dict(self.__dict__))
|
|
@ -0,0 +1,26 @@
|
|||
from uncloud.common.shared import shared
|
||||
from pyotp import TOTP
|
||||
|
||||
|
||||
def get_token(seed):
|
||||
if seed is not None:
|
||||
try:
|
||||
token = TOTP(seed).now()
|
||||
except Exception:
|
||||
raise Exception('Invalid seed')
|
||||
else:
|
||||
return token
|
||||
|
||||
|
||||
def resolve_otp_credentials(kwargs):
|
||||
d = {
|
||||
'name': shared.settings['client']['name'],
|
||||
'realm': shared.settings['client']['realm'],
|
||||
'token': get_token(shared.settings['client']['seed'])
|
||||
}
|
||||
|
||||
for k, v in d.items():
|
||||
if k in kwargs and kwargs[k] is None:
|
||||
kwargs.update({k: v})
|
||||
|
||||
return d
|
|
@ -0,0 +1,21 @@
|
|||
from .etcd_wrapper import Etcd3Wrapper
|
||||
|
||||
|
||||
def increment_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
|
||||
if kv:
|
||||
counter = int(kv.value)
|
||||
counter = counter + 1
|
||||
else:
|
||||
counter = 1
|
||||
|
||||
etcd_client.put(key, str(counter))
|
||||
return counter
|
||||
|
||||
|
||||
def get_etcd_counter(etcd_client: Etcd3Wrapper, key):
|
||||
kv = etcd_client.get(key)
|
||||
if kv:
|
||||
return int(kv.value)
|
||||
return None
|
|
@ -0,0 +1,75 @@
|
|||
import etcd3
|
||||
import json
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from uncloud import UncloudException
|
||||
from uncloud.common import logger
|
||||
|
||||
|
||||
class EtcdEntry:
|
||||
def __init__(self, meta_or_key, value, value_in_json=False):
|
||||
if hasattr(meta_or_key, 'key'):
|
||||
# if meta has attr 'key' then get it
|
||||
self.key = meta_or_key.key.decode('utf-8')
|
||||
else:
|
||||
# otherwise meta is the 'key'
|
||||
self.key = meta_or_key
|
||||
self.value = value.decode('utf-8')
|
||||
|
||||
if value_in_json:
|
||||
self.value = json.loads(self.value)
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError:
|
||||
raise UncloudException('Cannot connect to etcd: is etcd running as configured in uncloud.conf?')
|
||||
except etcd3.exceptions.ConnectionTimeoutError as err:
|
||||
raise etcd3.exceptions.ConnectionTimeoutError('etcd connection timeout.') from err
|
||||
except Exception:
|
||||
logger.exception('Some etcd error occured. See syslog for details.')
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class Etcd3Wrapper:
|
||||
@readable_errors
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = etcd3.client(*args, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get(self, *args, value_in_json=False, **kwargs):
|
||||
_value, _key = self.client.get(*args, **kwargs)
|
||||
if _key is None or _value is None:
|
||||
return None
|
||||
return EtcdEntry(_key, _value, value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def put(self, *args, value_in_json=False, **kwargs):
|
||||
_key, _value = args
|
||||
if value_in_json:
|
||||
_value = json.dumps(_value)
|
||||
|
||||
if not isinstance(_key, str):
|
||||
_key = _key.decode('utf-8')
|
||||
|
||||
return self.client.put(_key, _value, **kwargs)
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, *args, value_in_json=False, raise_exception=True, **kwargs):
|
||||
event_iterator = self.client.get_prefix(*args, **kwargs)
|
||||
for e in event_iterator:
|
||||
yield EtcdEntry(*e[::-1], value_in_json=value_in_json)
|
||||
|
||||
@readable_errors
|
||||
def watch_prefix(self, key, raise_exception=True, value_in_json=False):
|
||||
event_iterator, cancel = self.client.watch_prefix(key)
|
||||
for e in event_iterator:
|
||||
if hasattr(e, '_event'):
|
||||
e = e._event
|
||||
if e.type == e.PUT:
|
||||
yield EtcdEntry(e.kv.key, e.kv.value, value_in_json=value_in_json)
|
|
@ -0,0 +1,69 @@
|
|||
import time
|
||||
from datetime import datetime
|
||||
from os.path import join
|
||||
from typing import List
|
||||
|
||||
from .classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class HostStatus:
|
||||
"""Possible Statuses of uncloud host."""
|
||||
|
||||
alive = "ALIVE"
|
||||
dead = "DEAD"
|
||||
|
||||
|
||||
class HostEntry(SpecificEtcdEntryBase):
|
||||
"""Represents Host Entry Structure and its supporting methods."""
|
||||
|
||||
def __init__(self, e):
|
||||
self.specs = None # type: dict
|
||||
self.hostname = None # type: str
|
||||
self.status = None # type: str
|
||||
self.last_heartbeat = None # type: str
|
||||
|
||||
super().__init__(e)
|
||||
|
||||
def update_heartbeat(self):
|
||||
self.status = HostStatus.alive
|
||||
self.last_heartbeat = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def is_alive(self):
|
||||
last_heartbeat = datetime.strptime(
|
||||
self.last_heartbeat, "%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
delta = datetime.utcnow() - last_heartbeat
|
||||
if delta.total_seconds() > 60:
|
||||
return False
|
||||
return True
|
||||
|
||||
def declare_dead(self):
|
||||
self.status = HostStatus.dead
|
||||
self.last_heartbeat = time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
|
||||
class HostPool:
|
||||
def __init__(self, etcd_client, host_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = host_prefix
|
||||
|
||||
@property
|
||||
def hosts(self) -> List[HostEntry]:
|
||||
_hosts = self.client.get_prefix(self.prefix, value_in_json=True)
|
||||
return [HostEntry(host) for host in _hosts]
|
||||
|
||||
def get(self, key):
|
||||
if not key.startswith(self.prefix):
|
||||
key = join(self.prefix, key)
|
||||
v = self.client.get(key, value_in_json=True)
|
||||
if v:
|
||||
return HostEntry(v)
|
||||
return None
|
||||
|
||||
def put(self, obj: HostEntry):
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
||||
|
||||
def by_status(self, status, _hosts=None):
|
||||
if _hosts is None:
|
||||
_hosts = self.hosts
|
||||
return list(filter(lambda x: x.status == status, _hosts))
|
|
@ -0,0 +1,70 @@
|
|||
import subprocess as sp
|
||||
import random
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def random_bytes(num=6):
|
||||
return [random.randrange(256) for _ in range(num)]
|
||||
|
||||
|
||||
def generate_mac(
|
||||
uaa=False, multicast=False, oui=None, separator=":", byte_fmt="%02x"
|
||||
):
|
||||
mac = random_bytes()
|
||||
if oui:
|
||||
if type(oui) == str:
|
||||
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||
mac = oui + random_bytes(num=6 - len(oui))
|
||||
else:
|
||||
if multicast:
|
||||
mac[0] |= 1 # set bit 0
|
||||
else:
|
||||
mac[0] &= ~1 # clear bit 0
|
||||
if uaa:
|
||||
mac[0] &= ~(1 << 1) # clear bit 1
|
||||
else:
|
||||
mac[0] |= 1 << 1 # set bit 1
|
||||
return separator.join(byte_fmt % b for b in mac)
|
||||
|
||||
|
||||
def create_dev(script, _id, dev, ip=None):
|
||||
command = [
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to create network devices for vm: ",
|
||||
script,
|
||||
str(_id),
|
||||
dev,
|
||||
]
|
||||
if ip:
|
||||
command.append(ip)
|
||||
try:
|
||||
output = sp.check_output(command, stderr=sp.PIPE)
|
||||
except Exception:
|
||||
logger.exception("Creation of interface %s failed.", dev)
|
||||
return None
|
||||
else:
|
||||
return output.decode("utf-8").strip()
|
||||
|
||||
|
||||
def delete_network_interface(iface):
|
||||
try:
|
||||
sp.check_output(
|
||||
[
|
||||
"sudo",
|
||||
"-p",
|
||||
"Enter password to remove {} network device: ".format(
|
||||
iface
|
||||
),
|
||||
"ip",
|
||||
"link",
|
||||
"del",
|
||||
iface,
|
||||
],
|
||||
stderr=sp.PIPE,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Interface %s Deletion failed", iface)
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
import argparse
|
||||
|
||||
|
||||
class BaseParser:
|
||||
def __init__(self, command):
|
||||
self.arg_parser = argparse.ArgumentParser(command, add_help=False)
|
||||
self.subparser = self.arg_parser.add_subparsers(dest='{}_subcommand'.format(command))
|
||||
self.common_args = {'add_help': False}
|
||||
|
||||
methods = [attr for attr in dir(self) if not attr.startswith('__')
|
||||
and type(getattr(self, attr)).__name__ == 'method']
|
||||
for method in methods:
|
||||
getattr(self, method)(**self.common_args)
|
|
@ -0,0 +1,46 @@
|
|||
import json
|
||||
from os.path import join
|
||||
from uuid import uuid4
|
||||
|
||||
from uncloud.common.etcd_wrapper import EtcdEntry
|
||||
from uncloud.common.classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class RequestType:
|
||||
CreateVM = "CreateVM"
|
||||
ScheduleVM = "ScheduleVM"
|
||||
StartVM = "StartVM"
|
||||
StopVM = "StopVM"
|
||||
InitVMMigration = "InitVMMigration"
|
||||
TransferVM = "TransferVM"
|
||||
DeleteVM = "DeleteVM"
|
||||
|
||||
|
||||
class RequestEntry(SpecificEtcdEntryBase):
|
||||
def __init__(self, e):
|
||||
self.destination_sock_path = None
|
||||
self.destination_host_key = None
|
||||
self.type = None # type: str
|
||||
self.migration = None # type: bool
|
||||
self.destination = None # type: str
|
||||
self.uuid = None # type: str
|
||||
self.hostname = None # type: str
|
||||
super().__init__(e)
|
||||
|
||||
@classmethod
|
||||
def from_scratch(cls, request_prefix, **kwargs):
|
||||
e = EtcdEntry(meta_or_key=join(request_prefix, uuid4().hex),
|
||||
value=json.dumps(kwargs).encode('utf-8'), value_in_json=True)
|
||||
return cls(e)
|
||||
|
||||
|
||||
class RequestPool:
|
||||
def __init__(self, etcd_client, request_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = request_prefix
|
||||
|
||||
def put(self, obj: RequestEntry):
|
||||
if not obj.key.startswith(self.prefix):
|
||||
obj.key = join(self.prefix, obj.key)
|
||||
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
|
@ -0,0 +1,41 @@
|
|||
import bitmath
|
||||
|
||||
from marshmallow import fields, Schema
|
||||
|
||||
|
||||
class StorageUnit(fields.Field):
|
||||
def _serialize(self, value, attr, obj, **kwargs):
|
||||
return str(value)
|
||||
|
||||
def _deserialize(self, value, attr, data, **kwargs):
|
||||
return bitmath.parse_string_unsafe(value)
|
||||
|
||||
|
||||
class SpecsSchema(Schema):
|
||||
cpu = fields.Int()
|
||||
ram = StorageUnit()
|
||||
os_ssd = StorageUnit(data_key="os-ssd", attribute="os-ssd")
|
||||
hdd = fields.List(StorageUnit())
|
||||
|
||||
|
||||
class VMSchema(Schema):
|
||||
name = fields.Str()
|
||||
owner = fields.Str()
|
||||
owner_realm = fields.Str()
|
||||
specs = fields.Nested(SpecsSchema)
|
||||
status = fields.Str()
|
||||
log = fields.List(fields.Str())
|
||||
vnc_socket = fields.Str()
|
||||
image_uuid = fields.Str()
|
||||
hostname = fields.Str()
|
||||
metadata = fields.Dict()
|
||||
network = fields.List(
|
||||
fields.Tuple((fields.Str(), fields.Str(), fields.Int()))
|
||||
)
|
||||
in_migration = fields.Bool()
|
||||
|
||||
|
||||
class NetworkSchema(Schema):
|
||||
_id = fields.Int(data_key="id", attribute="id")
|
||||
_type = fields.Str(data_key="type", attribute="type")
|
||||
ipv6 = fields.Str()
|
|
@ -0,0 +1,136 @@
|
|||
import configparser
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
|
||||
from datetime import datetime
|
||||
from uncloud.common.etcd_wrapper import Etcd3Wrapper
|
||||
from os.path import join as join_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = None
|
||||
|
||||
|
||||
class CustomConfigParser(configparser.RawConfigParser):
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
result = super().__getitem__(key)
|
||||
except KeyError as err:
|
||||
raise KeyError(
|
||||
'Key \'{}\' not found in configuration. Make sure you configure uncloud.'.format(
|
||||
key
|
||||
)
|
||||
) from err
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
class Settings(object):
|
||||
def __init__(self, conf_dir, seed_value=None):
|
||||
conf_name = 'uncloud.conf'
|
||||
self.config_file = join_path(conf_dir, conf_name)
|
||||
|
||||
# this is used to cache config from etcd for 1 minutes. Without this we
|
||||
# would make a lot of requests to etcd which slows down everything.
|
||||
self.last_config_update = datetime.fromtimestamp(0)
|
||||
|
||||
self.config_parser = CustomConfigParser(allow_no_value=True)
|
||||
self.config_parser.add_section('etcd')
|
||||
self.config_parser.set('etcd', 'base_prefix', '/')
|
||||
|
||||
if os.access(self.config_file, os.R_OK):
|
||||
self.config_parser.read(self.config_file)
|
||||
else:
|
||||
raise FileNotFoundError('Config file %s not found!', self.config_file)
|
||||
self.config_key = join_path(self['etcd']['base_prefix'] + 'uncloud/config/')
|
||||
|
||||
self.read_internal_values()
|
||||
|
||||
if seed_value is None:
|
||||
seed_value = dict()
|
||||
|
||||
self.config_parser.read_dict(seed_value)
|
||||
|
||||
def get_etcd_client(self):
|
||||
args = tuple()
|
||||
try:
|
||||
kwargs = {
|
||||
'host': self.config_parser.get('etcd', 'url'),
|
||||
'port': self.config_parser.get('etcd', 'port'),
|
||||
'ca_cert': self.config_parser.get('etcd', 'ca_cert'),
|
||||
'cert_cert': self.config_parser.get('etcd', 'cert_cert'),
|
||||
'cert_key': self.config_parser.get('etcd', 'cert_key'),
|
||||
}
|
||||
except configparser.Error as err:
|
||||
raise configparser.Error(
|
||||
'{} in config file {}'.format(
|
||||
err.message, self.config_file
|
||||
)
|
||||
) from err
|
||||
else:
|
||||
try:
|
||||
wrapper = Etcd3Wrapper(*args, **kwargs)
|
||||
except Exception as err:
|
||||
logger.error(
|
||||
'etcd connection not successfull. Please check your config file.'
|
||||
'\nDetails: %s\netcd connection parameters: %s',
|
||||
err,
|
||||
kwargs,
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
return wrapper
|
||||
|
||||
def read_internal_values(self):
|
||||
base_prefix = self['etcd']['base_prefix']
|
||||
self.config_parser.read_dict(
|
||||
{
|
||||
'etcd': {
|
||||
'file_prefix': join_path(base_prefix, 'files/'),
|
||||
'host_prefix': join_path(base_prefix, 'hosts/'),
|
||||
'image_prefix': join_path(base_prefix, 'images/'),
|
||||
'image_store_prefix': join_path(base_prefix, 'imagestore/'),
|
||||
'network_prefix': join_path(base_prefix, 'networks/'),
|
||||
'request_prefix': join_path(base_prefix, 'requests/'),
|
||||
'user_prefix': join_path(base_prefix, 'users/'),
|
||||
'vm_prefix': join_path(base_prefix, 'vms/'),
|
||||
'vxlan_counter': join_path(base_prefix, 'counters/vxlan'),
|
||||
'tap_counter': join_path(base_prefix, 'counters/tap')
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
def read_config_file_values(self, config_file):
|
||||
try:
|
||||
# Trying to read configuration file
|
||||
with open(config_file) as config_file_handle:
|
||||
self.config_parser.read_file(config_file_handle)
|
||||
except FileNotFoundError:
|
||||
sys.exit('Configuration file {} not found!'.format(config_file))
|
||||
except Exception as err:
|
||||
logger.exception(err)
|
||||
sys.exit('Error occurred while reading configuration file')
|
||||
|
||||
def read_values_from_etcd(self):
|
||||
etcd_client = self.get_etcd_client()
|
||||
if (datetime.utcnow() - self.last_config_update).total_seconds() > 60:
|
||||
config_from_etcd = etcd_client.get(self.config_key, value_in_json=True)
|
||||
if config_from_etcd:
|
||||
self.config_parser.read_dict(config_from_etcd.value)
|
||||
self.last_config_update = datetime.utcnow()
|
||||
else:
|
||||
raise KeyError('Key \'{}\' not found in etcd. Please configure uncloud.'.format(self.config_key))
|
||||
|
||||
def __getitem__(self, key):
|
||||
# Allow failing to read from etcd if we have
|
||||
# it locally
|
||||
if key not in self.config_parser.sections():
|
||||
try:
|
||||
self.read_values_from_etcd()
|
||||
except KeyError:
|
||||
pass
|
||||
return self.config_parser[key]
|
||||
|
||||
|
||||
def get_settings():
|
||||
return settings
|
|
@ -0,0 +1,34 @@
|
|||
from uncloud.common.settings import get_settings
|
||||
from uncloud.common.vm import VmPool
|
||||
from uncloud.common.host import HostPool
|
||||
from uncloud.common.request import RequestPool
|
||||
import uncloud.common.storage_handlers as storage_handlers
|
||||
|
||||
|
||||
class Shared:
|
||||
@property
|
||||
def settings(self):
|
||||
return get_settings()
|
||||
|
||||
@property
|
||||
def etcd_client(self):
|
||||
return self.settings.get_etcd_client()
|
||||
|
||||
@property
|
||||
def host_pool(self):
|
||||
return HostPool(self.etcd_client, self.settings["etcd"]["host_prefix"])
|
||||
|
||||
@property
|
||||
def vm_pool(self):
|
||||
return VmPool(self.etcd_client, self.settings["etcd"]["vm_prefix"])
|
||||
|
||||
@property
|
||||
def request_pool(self):
|
||||
return RequestPool(self.etcd_client, self.settings["etcd"]["request_prefix"])
|
||||
|
||||
@property
|
||||
def storage_handler(self):
|
||||
return storage_handlers.get_storage_handler()
|
||||
|
||||
|
||||
shared = Shared()
|
|
@ -0,0 +1,207 @@
|
|||
import shutil
|
||||
import subprocess as sp
|
||||
import os
|
||||
import stat
|
||||
|
||||
from abc import ABC
|
||||
from . import logger
|
||||
from os.path import join as join_path
|
||||
import uncloud.common.shared as shared
|
||||
|
||||
|
||||
class ImageStorageHandler(ABC):
|
||||
handler_name = "base"
|
||||
|
||||
def __init__(self, image_base, vm_base):
|
||||
self.image_base = image_base
|
||||
self.vm_base = vm_base
|
||||
|
||||
def import_image(self, image_src, image_dest, protect=False):
|
||||
"""Put an image at the destination
|
||||
:param image_src: An Image file
|
||||
:param image_dest: A path where :param src: is to be put.
|
||||
:param protect: If protect is true then the dest is protect (readonly etc)
|
||||
The obj must exist on filesystem.
|
||||
"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
def make_vm_image(self, image_path, path):
|
||||
"""Copy image from src to dest
|
||||
|
||||
:param image_path: A path
|
||||
:param path: A path
|
||||
|
||||
src and destination must be on same storage system i.e both on file system or both on CEPH etc.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
"""Resize image located at :param path:
|
||||
:param path: The file which is to be resized
|
||||
:param size: Size must be in Megabytes
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def execute_command(self, command, report=True, error_origin=None):
|
||||
if not error_origin:
|
||||
error_origin = self.handler_name
|
||||
|
||||
command = list(map(str, command))
|
||||
try:
|
||||
sp.check_output(command, stderr=sp.PIPE)
|
||||
except sp.CalledProcessError as e:
|
||||
_stderr = e.stderr.decode("utf-8").strip()
|
||||
if report:
|
||||
logger.exception("%s:- %s", error_origin, _stderr)
|
||||
return False
|
||||
return True
|
||||
|
||||
def vm_path_string(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FileSystemBasedImageStorageHandler(ImageStorageHandler):
|
||||
handler_name = "Filesystem"
|
||||
|
||||
def import_image(self, src, dest, protect=False):
|
||||
dest = join_path(self.image_base, dest)
|
||||
try:
|
||||
shutil.copy(src, dest)
|
||||
if protect:
|
||||
os.chmod(
|
||||
dest, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def make_vm_image(self, src, dest):
|
||||
src = join_path(self.image_base, src)
|
||||
dest = join_path(self.vm_base, dest)
|
||||
try:
|
||||
shutil.copyfile(src, dest)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = [
|
||||
"qemu-img",
|
||||
"resize",
|
||||
"-f",
|
||||
"raw",
|
||||
path,
|
||||
"{}M".format(size),
|
||||
]
|
||||
if self.execute_command(command):
|
||||
return True
|
||||
else:
|
||||
self.delete_vm_image(path)
|
||||
return False
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def vm_path_string(self, path):
|
||||
return join_path(self.vm_base, path)
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
return self.vm_path_string(path)
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["ls", path]
|
||||
return self.execute_command(command, report=False)
|
||||
|
||||
|
||||
class CEPHBasedImageStorageHandler(ImageStorageHandler):
|
||||
handler_name = "Ceph"
|
||||
|
||||
def import_image(self, src, dest, protect=False):
|
||||
dest = join_path(self.image_base, dest)
|
||||
import_command = ["rbd", "import", src, dest]
|
||||
commands = [import_command]
|
||||
if protect:
|
||||
snap_create_command = [
|
||||
"rbd",
|
||||
"snap",
|
||||
"create",
|
||||
"{}@protected".format(dest),
|
||||
]
|
||||
snap_protect_command = [
|
||||
"rbd",
|
||||
"snap",
|
||||
"protect",
|
||||
"{}@protected".format(dest),
|
||||
]
|
||||
commands.append(snap_create_command)
|
||||
commands.append(snap_protect_command)
|
||||
|
||||
result = True
|
||||
for command in commands:
|
||||
result = result and self.execute_command(command)
|
||||
|
||||
return result
|
||||
|
||||
def make_vm_image(self, src, dest):
|
||||
src = join_path(self.image_base, src)
|
||||
dest = join_path(self.vm_base, dest)
|
||||
|
||||
command = ["rbd", "clone", "{}@protected".format(src), dest]
|
||||
return self.execute_command(command)
|
||||
|
||||
def resize_vm_image(self, path, size):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "resize", path, "--size", size]
|
||||
return self.execute_command(command)
|
||||
|
||||
def delete_vm_image(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "rm", path]
|
||||
return self.execute_command(command)
|
||||
|
||||
def vm_path_string(self, path):
|
||||
return join_path(self.vm_base, path)
|
||||
|
||||
def qemu_path_string(self, path):
|
||||
return "rbd:{}".format(self.vm_path_string(path))
|
||||
|
||||
def is_vm_image_exists(self, path):
|
||||
path = join_path(self.vm_base, path)
|
||||
command = ["rbd", "info", path]
|
||||
return self.execute_command(command, report=False)
|
||||
|
||||
|
||||
def get_storage_handler():
|
||||
__storage_backend = shared.shared.settings["storage"]["storage_backend"]
|
||||
if __storage_backend == "filesystem":
|
||||
return FileSystemBasedImageStorageHandler(
|
||||
vm_base=shared.shared.settings["storage"]["vm_dir"],
|
||||
image_base=shared.shared.settings["storage"]["image_dir"],
|
||||
)
|
||||
elif __storage_backend == "ceph":
|
||||
return CEPHBasedImageStorageHandler(
|
||||
vm_base=shared.shared.settings["storage"]["ceph_vm_pool"],
|
||||
image_base=shared.shared.settings["storage"]["ceph_image_pool"],
|
||||
)
|
||||
else:
|
||||
raise Exception("Unknown Image Storage Handler")
|
|
@ -0,0 +1,102 @@
|
|||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from os.path import join
|
||||
|
||||
from .classes import SpecificEtcdEntryBase
|
||||
|
||||
|
||||
class VMStatus:
|
||||
stopped = "STOPPED" # After requested_shutdown
|
||||
killed = "KILLED" # either host died or vm died itself
|
||||
running = "RUNNING"
|
||||
error = "ERROR" # An error occurred that cannot be resolved automatically
|
||||
|
||||
|
||||
def declare_stopped(vm):
|
||||
vm["hostname"] = ""
|
||||
vm["in_migration"] = False
|
||||
vm["status"] = VMStatus.stopped
|
||||
|
||||
|
||||
class VMEntry(SpecificEtcdEntryBase):
|
||||
def __init__(self, e):
|
||||
self.owner = None # type: str
|
||||
self.specs = None # type: dict
|
||||
self.hostname = None # type: str
|
||||
self.status = None # type: str
|
||||
self.image_uuid = None # type: str
|
||||
self.log = None # type: list
|
||||
self.in_migration = None # type: bool
|
||||
|
||||
super().__init__(e)
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
return self.key.split("/")[-1]
|
||||
|
||||
def declare_killed(self):
|
||||
self.hostname = ""
|
||||
self.in_migration = False
|
||||
if self.status == VMStatus.running:
|
||||
self.status = VMStatus.killed
|
||||
|
||||
def declare_stopped(self):
|
||||
self.hostname = ""
|
||||
self.in_migration = False
|
||||
self.status = VMStatus.stopped
|
||||
|
||||
def add_log(self, msg):
|
||||
self.log = self.log[:5]
|
||||
self.log.append(
|
||||
"{} - {}".format(datetime.now().isoformat(), msg)
|
||||
)
|
||||
|
||||
|
||||
class VmPool:
|
||||
def __init__(self, etcd_client, vm_prefix):
|
||||
self.client = etcd_client
|
||||
self.prefix = vm_prefix
|
||||
|
||||
@property
|
||||
def vms(self):
|
||||
_vms = self.client.get_prefix(self.prefix, value_in_json=True)
|
||||
return [VMEntry(vm) for vm in _vms]
|
||||
|
||||
def by_host(self, host, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.hostname == host, _vms))
|
||||
|
||||
def by_status(self, status, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.status == status, _vms))
|
||||
|
||||
def by_owner(self, owner, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.owner == owner, _vms))
|
||||
|
||||
def except_status(self, status, _vms=None):
|
||||
if _vms is None:
|
||||
_vms = self.vms
|
||||
return list(filter(lambda x: x.status != status, _vms))
|
||||
|
||||
def get(self, key):
|
||||
if not key.startswith(self.prefix):
|
||||
key = join(self.prefix, key)
|
||||
v = self.client.get(key, value_in_json=True)
|
||||
if v:
|
||||
return VMEntry(v)
|
||||
return None
|
||||
|
||||
def put(self, obj: VMEntry):
|
||||
self.client.put(obj.key, obj.value, value_in_json=True)
|
||||
|
||||
@contextmanager
|
||||
def get_put(self, key) -> VMEntry:
|
||||
# Updates object at key on exit
|
||||
obj = self.get(key)
|
||||
yield obj
|
||||
if obj:
|
||||
self.put(obj)
|
|
@ -0,0 +1,57 @@
|
|||
import os
|
||||
import argparse
|
||||
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
arg_parser = argparse.ArgumentParser('configure', add_help=False)
|
||||
configure_subparsers = arg_parser.add_subparsers(dest='subcommand')
|
||||
|
||||
otp_parser = configure_subparsers.add_parser('otp')
|
||||
otp_parser.add_argument('--verification-controller-url', required=True, metavar='URL')
|
||||
otp_parser.add_argument('--auth-name', required=True, metavar='OTP-NAME')
|
||||
otp_parser.add_argument('--auth-realm', required=True, metavar='OTP-REALM')
|
||||
otp_parser.add_argument('--auth-seed', required=True, metavar='OTP-SEED')
|
||||
|
||||
network_parser = configure_subparsers.add_parser('network')
|
||||
network_parser.add_argument('--prefix-length', required=True, type=int)
|
||||
network_parser.add_argument('--prefix', required=True)
|
||||
network_parser.add_argument('--vxlan-phy-dev', required=True)
|
||||
|
||||
netbox_parser = configure_subparsers.add_parser('netbox')
|
||||
netbox_parser.add_argument('--url', required=True)
|
||||
netbox_parser.add_argument('--token', required=True)
|
||||
|
||||
ssh_parser = configure_subparsers.add_parser('ssh')
|
||||
ssh_parser.add_argument('--username', default='root')
|
||||
ssh_parser.add_argument('--private-key-path', default=os.path.expanduser('~/.ssh/id_rsa'),)
|
||||
|
||||
storage_parser = configure_subparsers.add_parser('storage')
|
||||
storage_parser.add_argument('--file-dir', required=True)
|
||||
storage_parser_subparsers = storage_parser.add_subparsers(dest='storage_backend')
|
||||
|
||||
filesystem_storage_parser = storage_parser_subparsers.add_parser('filesystem')
|
||||
filesystem_storage_parser.add_argument('--vm-dir', required=True)
|
||||
filesystem_storage_parser.add_argument('--image-dir', required=True)
|
||||
|
||||
ceph_storage_parser = storage_parser_subparsers.add_parser('ceph')
|
||||
ceph_storage_parser.add_argument('--ceph-vm-pool', required=True)
|
||||
ceph_storage_parser.add_argument('--ceph-image-pool', required=True)
|
||||
|
||||
|
||||
def update_config(section, kwargs):
|
||||
uncloud_config = shared.etcd_client.get(shared.settings.config_key, value_in_json=True)
|
||||
if not uncloud_config:
|
||||
uncloud_config = {}
|
||||
else:
|
||||
uncloud_config = uncloud_config.value
|
||||
|
||||
uncloud_config[section] = kwargs
|
||||
shared.etcd_client.put(shared.settings.config_key, uncloud_config, value_in_json=True)
|
||||
|
||||
|
||||
def main(arguments):
|
||||
subcommand = arguments['subcommand']
|
||||
if not subcommand:
|
||||
arg_parser.print_help()
|
||||
else:
|
||||
update_config(subcommand, arguments)
|
|
@ -0,0 +1,3 @@
|
|||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
|
@ -0,0 +1,85 @@
|
|||
import glob
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess as sp
|
||||
import time
|
||||
import argparse
|
||||
import bitmath
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from . import logger
|
||||
from uncloud.common.shared import shared
|
||||
|
||||
arg_parser = argparse.ArgumentParser('filescanner', add_help=False)
|
||||
arg_parser.add_argument('--hostname', required=True)
|
||||
|
||||
|
||||
def sha512sum(file: str):
|
||||
"""Use sha512sum utility to compute sha512 sum of arg:file
|
||||
|
||||
IF arg:file does not exists:
|
||||
raise FileNotFoundError exception
|
||||
ELSE IF sum successfully computer:
|
||||
return computed sha512 sum
|
||||
ELSE:
|
||||
return None
|
||||
"""
|
||||
if not isinstance(file, str):
|
||||
raise TypeError
|
||||
try:
|
||||
output = sp.check_output(['sha512sum', file], stderr=sp.PIPE)
|
||||
except sp.CalledProcessError as e:
|
||||
error = e.stderr.decode('utf-8')
|
||||
if 'No such file or directory' in error:
|
||||
raise FileNotFoundError from None
|
||||
else:
|
||||
output = output.decode('utf-8').strip()
|
||||
output = output.split(' ')
|
||||
return output[0]
|
||||
return None
|
||||
|
||||
|
||||
def track_file(file, base_dir, host):
|
||||
file_path = file.relative_to(base_dir)
|
||||
file_str = str(file)
|
||||
# Get Username
|
||||
try:
|
||||
owner = file_path.parts[0]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
file_path = file_path.relative_to(owner)
|
||||
creation_date = time.ctime(os.stat(file_str).st_ctime)
|
||||
|
||||
entry_key = os.path.join(shared.settings['etcd']['file_prefix'], str(uuid4()))
|
||||
entry_value = {
|
||||
'filename': str(file_path),
|
||||
'owner': owner,
|
||||
'sha512sum': sha512sum(file_str),
|
||||
'creation_date': creation_date,
|
||||
'size': str(bitmath.Byte(os.path.getsize(file_str)).to_MB()),
|
||||
'host': host
|
||||
}
|
||||
|
||||
logger.info('Tracking %s', file_str)
|
||||
|
||||
shared.etcd_client.put(entry_key, entry_value, value_in_json=True)
|
||||
|
||||
|
||||
def main(arguments):
|
||||
hostname = arguments['hostname']
|
||||
base_dir = shared.settings['storage']['file_dir']
|
||||
# Recursively Get All Files and Folder below BASE_DIR
|
||||
files = glob.glob('{}/**'.format(base_dir), recursive=True)
|
||||
files = [pathlib.Path(f) for f in files if pathlib.Path(f).is_file()]
|
||||
|
||||
# Files that are already tracked
|
||||
tracked_files = [
|
||||
pathlib.Path(os.path.join(base_dir, f.value['owner'], f.value['filename']))
|
||||
for f in shared.etcd_client.get_prefix(shared.settings['etcd']['file_prefix'], value_in_json=True)
|
||||
if f.value['host'] == hostname
|
||||
]
|
||||
untracked_files = set(files) - set(tracked_files)
|
||||
for file in untracked_files:
|
||||
track_file(file, base_dir, hostname)
|
|
@ -0,0 +1,13 @@
|
|||
This directory contains unfinishe hacks / inspirations
|
||||
* firewalling / networking in ucloud
|
||||
** automatically route a network per VM - /64?
|
||||
** nft: one chain per VM on each vm host (?)
|
||||
*** might have scaling issues?
|
||||
** firewall rules on each VM host
|
||||
- mac filtering:
|
||||
* To add / block
|
||||
** TODO arp poisoning
|
||||
** TODO ndp "poisoning"
|
||||
** TODO ipv4 dhcp server
|
||||
*** drop dhcpv4 requests
|
||||
*** drop dhcpv4 answers
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1 @@
|
|||
HOSTNAME=server1.place10
|
|
@ -0,0 +1,39 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, arguments):
|
||||
""" read arguments dicts as a base """
|
||||
|
||||
self.arguments = arguments
|
||||
|
||||
# Split them so *etcd_args can be used and we can
|
||||
# iterate over etcd_hosts
|
||||
self.etcd_hosts = [ arguments['etcd_host'] ]
|
||||
self.etcd_args = {
|
||||
'ca_cert': arguments['etcd_ca_cert'],
|
||||
'cert_cert': arguments['etcd_cert_cert'],
|
||||
'cert_key': arguments['etcd_cert_key'],
|
||||
# 'user': None,
|
||||
# 'password': None
|
||||
}
|
||||
self.etcd_prefix = '/nicohack/'
|
|
@ -0,0 +1,149 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# 2020 Nico Schottelius (nico.schottelius at ungleich.ch)
|
||||
#
|
||||
# This file is part of uncloud.
|
||||
#
|
||||
# uncloud is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# uncloud is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with uncloud. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
|
||||
import etcd3
|
||||
import json
|
||||
import logging
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from functools import wraps
|
||||
from uncloud import UncloudException
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def db_logentry(message):
|
||||
timestamp = datetime.datetime.now()
|
||||
return {
|
||||
"timestamp": str(timestamp),
|
||||
"message": message
|
||||
}
|
||||
|
||||
|
||||
def readable_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except etcd3.exceptions.ConnectionFailedError as e:
|
||||
raise UncloudException('Cannot connect to etcd: is etcd running and reachable? {}'.format(e))
|
||||
except etcd3.exceptions.ConnectionTimeoutError as e:
|
||||
raise UncloudException('etcd connection timeout. {}'.format(e))
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class DB(object):
|
||||
def __init__(self, config, prefix="/"):
|
||||
self.config = config
|
||||
|
||||
# Root for everything
|
||||
self.base_prefix= '/nicohack'
|
||||
|
||||
# Can be set from outside
|
||||
self.prefix = prefix
|
||||
|
||||
try:
|
||||
self.connect()
|
||||
except FileNotFoundError as e:
|
||||
raise UncloudException("Is the path to the etcd certs correct? {}".format(e))
|
||||
|
||||
@readable_errors
|
||||
def connect(self):
|
||||
self._db_clients = []
|
||||
for endpoint in self.config.etcd_hosts:
|
||||
client = etcd3.client(host=endpoint, **self.config.etcd_args)
|
||||
self._db_clients.append(client)
|
||||
|
||||
def realkey(self, key):
|
||||
return "{}{}/{}".format(self.base_prefix,
|
||||
self.prefix,
|
||||
key)
|
||||
|
||||
@readable_errors
|
||||
def get(self, key, as_json=False, **kwargs):
|
||||
value, _ = self._db_clients[0].get(self.realkey(key), **kwargs)
|
||||
|
||||
if as_json:
|
||||
value = json.loads(value)
|
||||
|
||||
return value
|
||||
|
||||
@readable_errors
|
||||
def get_prefix(self, key, as_json=False, **kwargs):
|
||||
for value, meta in self._db_clients[0].get_prefix(self.realkey(key), **kwargs):
|
||||
k = meta.key.decode("utf-8")
|
||||
value = value.decode("utf-8")
|
||||
if as_json:
|
||||
value = json.loads(value)
|
||||
|
||||
yield (k, value)
|
||||
|
||||
|
||||
@readable_errors
|
||||
def set(self, key, value, as_json=False, **kwargs):
|
||||
if as_json:
|
||||
value = json.dumps(value)
|
||||
|
||||
log.debug("Setting {} = {}".format(self.realkey(key), value))
|
||||
# FIXME: iterate over clients in case of failure ?
|
||||
return self._db_clients[0].put(self.realkey(key), value, **kwargs)
|
||||
|
||||
|
||||
@readable_errors
|
||||
def list_and_filter(self, key, filter_key=None, filter_regexp=None):
|
||||
for k,v in self.get_prefix(key, as_json=True):
|
||||
|
||||
if filter_key and filter_regexp:
|
||||
if filter_key in v:
|
||||
if re.match(filter_regexp, v[filter_key]):
|
||||
yield v
|
||||
else:
|
||||
yield v
|
||||
|
||||
|
||||
@readable_errors
|
||||
def increment(self, key, **kwargs):
|
||||
print(self.realkey(key))
|
||||
|
||||
|
||||
print("prelock")
|
||||
lock = self._db_clients[0].lock('/nicohack/foo')
|
||||
print("prelockacq")
|
||||
lock.acquire()
|
||||
print("prelockrelease")
|
||||
lock.release()
|
||||
|
||||
with self._db_clients[0].lock("/nicohack/mac/last_used_index") as lock:
|
||||
print("in lock")
|
||||
pass
|
||||
|
||||
# with self._db_clients[0].lock(self.realkey(key)) as lock:# value = int(self.get(self.realkey(key), **kwargs))
|
||||
# self.set(self.realkey(key), str(value + 1), **kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
endpoints = [ "https://etcd1.ungleich.ch:2379",
|
||||
"https://etcd2.ungleich.ch:2379",
|
||||
"https://etcd3.ungleich.ch:2379" ]
|
||||
|
||||
db = DB(url=endpoints)
|
|
@ -0,0 +1,3 @@
|
|||
*.iso
|
||||
radvdpid
|
||||
foo
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/sh
|
||||
|
||||
etcdctl --cert=$HOME/vcs/ungleich-dot-cdist/files/etcd/nico.pem \
|
||||
--key=/home/nico/vcs/ungleich-dot-cdist/files/etcd/nico-key.pem \
|
||||
--cacert=$HOME/vcs/ungleich-dot-cdist/files/etcd/ca.pem \
|
||||
--endpoints https://etcd1.ungleich.ch:2379,https://etcd2.ungleich.ch:2379,https://etcd3.ungleich.ch:2379 "$@"
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo $@
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/sh
|
||||
|
||||
dev=$1; shift
|
||||
|
||||
# bridge is setup from outside
|
||||
ip link set dev "$dev" master ${bridge}
|
||||
ip link set dev "$dev" up
|
|
@ -0,0 +1 @@
|
|||
000000000252
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue