ucloud-{api,scheduler,host,filescanner,imagescanner,metadata} combined
This commit is contained in:
commit
da77ac65eb
29 changed files with 3941 additions and 0 deletions
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
.idea
|
||||||
|
.vscode
|
||||||
|
.env
|
||||||
|
|
||||||
|
*/log.txt
|
22
Pipfile
Normal file
22
Pipfile
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
[[source]]
|
||||||
|
name = "pypi"
|
||||||
|
url = "https://pypi.org/simple"
|
||||||
|
verify_ssl = true
|
||||||
|
|
||||||
|
[dev-packages]
|
||||||
|
prospector = {extras = ["with_everything"],version = "*"}
|
||||||
|
|
||||||
|
[packages]
|
||||||
|
python-decouple = "*"
|
||||||
|
requests = "*"
|
||||||
|
flask = "*"
|
||||||
|
flask-restful = "*"
|
||||||
|
bitmath = "*"
|
||||||
|
ucloud-common = {editable = true,git = "git+https://code.ungleich.ch/ucloud/ucloud_common.git",ref = "wip"}
|
||||||
|
etcd3-wrapper = {editable = true,git = "git+https://code.ungleich.ch/ungleich-public/etcd3_wrapper.git",ref = "wip"}
|
||||||
|
python-etcd3 = {editable = true,git = "git+https://github.com/kragniz/python-etcd3.git"}
|
||||||
|
pyotp = "*"
|
||||||
|
sshtunnel = "*"
|
||||||
|
|
||||||
|
[requires]
|
||||||
|
python_version = "3.5"
|
773
Pipfile.lock
generated
Normal file
773
Pipfile.lock
generated
Normal file
|
@ -0,0 +1,773 @@
|
||||||
|
{
|
||||||
|
"_meta": {
|
||||||
|
"hash": {
|
||||||
|
"sha256": "b7a8409bec451e017440f063d8436fe66b18affcde7ad5497b433191ae465a52"
|
||||||
|
},
|
||||||
|
"pipfile-spec": 6,
|
||||||
|
"requires": {
|
||||||
|
"python_version": "3.5"
|
||||||
|
},
|
||||||
|
"sources": [
|
||||||
|
{
|
||||||
|
"name": "pypi",
|
||||||
|
"url": "https://pypi.org/simple",
|
||||||
|
"verify_ssl": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"aniso8601": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:529dcb1f5f26ee0df6c0a1ee84b7b27197c3c50fc3a6321d66c544689237d072",
|
||||||
|
"sha256:c033f63d028b9a58e3ab0c2c7d0532ab4bfa7452bfc788fbfe3ddabd327b181a"
|
||||||
|
],
|
||||||
|
"version": "==8.0.0"
|
||||||
|
},
|
||||||
|
"bcrypt": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
|
||||||
|
"sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
|
||||||
|
"sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
|
||||||
|
"sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
|
||||||
|
"sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752",
|
||||||
|
"sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
|
||||||
|
"sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
|
||||||
|
"sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
|
||||||
|
"sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
|
||||||
|
"sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
|
||||||
|
"sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
|
||||||
|
"sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
|
||||||
|
"sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
|
||||||
|
"sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
|
||||||
|
"sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
|
||||||
|
"sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1",
|
||||||
|
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
|
||||||
|
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
|
||||||
|
],
|
||||||
|
"version": "==3.1.7"
|
||||||
|
},
|
||||||
|
"bitmath": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:293325f01e65defe966853111df11d39215eb705a967cb115851da8c4cfa3eb8"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==1.3.3.1"
|
||||||
|
},
|
||||||
|
"certifi": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50",
|
||||||
|
"sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef"
|
||||||
|
],
|
||||||
|
"version": "==2019.9.11"
|
||||||
|
},
|
||||||
|
"cffi": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:00d890313797d9fe4420506613384b43099ad7d2b905c0752dbcc3a6f14d80fa",
|
||||||
|
"sha256:0cf9e550ac6c5e57b713437e2f4ac2d7fd0cd10336525a27224f5fc1ec2ee59a",
|
||||||
|
"sha256:0ea23c9c0cdd6778146a50d867d6405693ac3b80a68829966c98dd5e1bbae400",
|
||||||
|
"sha256:193697c2918ecdb3865acf6557cddf5076bb39f1f654975e087b67efdff83365",
|
||||||
|
"sha256:1ae14b542bf3b35e5229439c35653d2ef7d8316c1fffb980f9b7647e544baa98",
|
||||||
|
"sha256:1e389e069450609c6ffa37f21f40cce36f9be7643bbe5051ab1de99d5a779526",
|
||||||
|
"sha256:263242b6ace7f9cd4ea401428d2d45066b49a700852334fd55311bde36dcda14",
|
||||||
|
"sha256:33142ae9807665fa6511cfa9857132b2c3ee6ddffb012b3f0933fc11e1e830d5",
|
||||||
|
"sha256:364f8404034ae1b232335d8c7f7b57deac566f148f7222cef78cf8ae28ef764e",
|
||||||
|
"sha256:47368f69fe6529f8f49a5d146ddee713fc9057e31d61e8b6dc86a6a5e38cecc1",
|
||||||
|
"sha256:4895640844f17bec32943995dc8c96989226974dfeb9dd121cc45d36e0d0c434",
|
||||||
|
"sha256:558b3afef987cf4b17abd849e7bedf64ee12b28175d564d05b628a0f9355599b",
|
||||||
|
"sha256:5ba86e1d80d458b338bda676fd9f9d68cb4e7a03819632969cf6d46b01a26730",
|
||||||
|
"sha256:63424daa6955e6b4c70dc2755897f5be1d719eabe71b2625948b222775ed5c43",
|
||||||
|
"sha256:6381a7d8b1ebd0bc27c3bc85bc1bfadbb6e6f756b4d4db0aa1425c3719ba26b4",
|
||||||
|
"sha256:6381ab708158c4e1639da1f2a7679a9bbe3e5a776fc6d1fd808076f0e3145331",
|
||||||
|
"sha256:6fd58366747debfa5e6163ada468a90788411f10c92597d3b0a912d07e580c36",
|
||||||
|
"sha256:728ec653964655d65408949b07f9b2219df78badd601d6c49e28d604efe40599",
|
||||||
|
"sha256:7cfcfda59ef1f95b9f729c56fe8a4041899f96b72685d36ef16a3440a0f85da8",
|
||||||
|
"sha256:819f8d5197c2684524637f940445c06e003c4a541f9983fd30d6deaa2a5487d8",
|
||||||
|
"sha256:825ecffd9574557590e3225560a8a9d751f6ffe4a49e3c40918c9969b93395fa",
|
||||||
|
"sha256:9009e917d8f5ef780c2626e29b6bc126f4cb2a4d43ca67aa2b40f2a5d6385e78",
|
||||||
|
"sha256:9c77564a51d4d914ed5af096cd9843d90c45b784b511723bd46a8a9d09cf16fc",
|
||||||
|
"sha256:a19089fa74ed19c4fe96502a291cfdb89223a9705b1d73b3005df4256976142e",
|
||||||
|
"sha256:a40ed527bffa2b7ebe07acc5a3f782da072e262ca994b4f2085100b5a444bbb2",
|
||||||
|
"sha256:bb75ba21d5716abc41af16eac1145ab2e471deedde1f22c6f99bd9f995504df0",
|
||||||
|
"sha256:e22a00c0c81ffcecaf07c2bfb3672fa372c50e2bd1024ffee0da191c1b27fc71",
|
||||||
|
"sha256:e55b5a746fb77f10c83e8af081979351722f6ea48facea79d470b3731c7b2891",
|
||||||
|
"sha256:ec2fa3ee81707a5232bf2dfbd6623fdb278e070d596effc7e2d788f2ada71a05",
|
||||||
|
"sha256:fd82eb4694be712fcae03c717ca2e0fc720657ac226b80bbb597e971fc6928c2"
|
||||||
|
],
|
||||||
|
"version": "==1.13.1"
|
||||||
|
},
|
||||||
|
"chardet": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||||
|
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||||
|
],
|
||||||
|
"version": "==3.0.4"
|
||||||
|
},
|
||||||
|
"click": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13",
|
||||||
|
"sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7"
|
||||||
|
],
|
||||||
|
"version": "==7.0"
|
||||||
|
},
|
||||||
|
"cryptography": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c",
|
||||||
|
"sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595",
|
||||||
|
"sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad",
|
||||||
|
"sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651",
|
||||||
|
"sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2",
|
||||||
|
"sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff",
|
||||||
|
"sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d",
|
||||||
|
"sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42",
|
||||||
|
"sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d",
|
||||||
|
"sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e",
|
||||||
|
"sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912",
|
||||||
|
"sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793",
|
||||||
|
"sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13",
|
||||||
|
"sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7",
|
||||||
|
"sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0",
|
||||||
|
"sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879",
|
||||||
|
"sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f",
|
||||||
|
"sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9",
|
||||||
|
"sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2",
|
||||||
|
"sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf",
|
||||||
|
"sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8"
|
||||||
|
],
|
||||||
|
"version": "==2.8"
|
||||||
|
},
|
||||||
|
"etcd3-wrapper": {
|
||||||
|
"editable": true,
|
||||||
|
"git": "https://code.ungleich.ch/ungleich-public/etcd3_wrapper.git",
|
||||||
|
"ref": "76fb0bdf797199e9ea161dad1d004eea9b4520f8"
|
||||||
|
},
|
||||||
|
"flask": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:13f9f196f330c7c2c5d7a5cf91af894110ca0215ac051b5844701f2bfd934d52",
|
||||||
|
"sha256:45eb5a6fd193d6cf7e0cf5d8a5b31f83d5faae0293695626f539a823e93b13f6"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==1.1.1"
|
||||||
|
},
|
||||||
|
"flask-restful": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:ecd620c5cc29f663627f99e04f17d1f16d095c83dc1d618426e2ad68b03092f8",
|
||||||
|
"sha256:f8240ec12349afe8df1db168ea7c336c4e5b0271a36982bff7394f93275f2ca9"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==0.3.7"
|
||||||
|
},
|
||||||
|
"grpcio": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:01cb705eafba1108e2a947ba0457da4f6a1e8142c729fc61702b5fdd11009eb1",
|
||||||
|
"sha256:0b5a79e29f167d3cd06faad6b15babbc2661066daaacf79373c3a8e67ca1fca1",
|
||||||
|
"sha256:1097a61a0e97b3580642e6e1460a3a1f1ba1815e2a70d6057173bcc495417076",
|
||||||
|
"sha256:13970e665a4ec4cec7d067d7d3504a0398c657d91d26c581144ad9044e429c9a",
|
||||||
|
"sha256:1557817cea6e0b87fad2a3e20da385170efb03a313db164e8078955add2dfa1b",
|
||||||
|
"sha256:1b0fb036a2f9dd93d9a35c57c26420eeb4b571fcb14b51cddf5b1e73ea5d882b",
|
||||||
|
"sha256:24d9e58d08e8cd545d8a3247a18654aff0e5e60414701696a8098fbb0d792b75",
|
||||||
|
"sha256:2c38b586163d2b91567fe5e6d9e7798f792012365adc838a64b66b22dce3f4d4",
|
||||||
|
"sha256:2df3ab4348507de60e1cbf75196403df1b9b4c4d4dc5bd11ac4eb63c46f691c7",
|
||||||
|
"sha256:32f70f7c90454ea568b868af2e96616743718d9233d23f62407e98caed81dfbf",
|
||||||
|
"sha256:3af2a49d576820045c9c880ff29a5a96d020fe31b35d248519bfc6ccb8be4eac",
|
||||||
|
"sha256:4ff7d63800a63db031ebac6a6f581ae84877c959401c24c28f2cc51fd36c47ad",
|
||||||
|
"sha256:502aaa8be56f0ae69cda66bc27e1fb5531ceaa27ca515ec3c34f6178b1297180",
|
||||||
|
"sha256:55358ce3ec283222e435f7dbc6603521438458f3c65f7c1cb33b8dabf56d70d8",
|
||||||
|
"sha256:5583b01c67f85fa64a2c3fb085e5517c88b9c1500a2cce12d473cd99d0ed2e49",
|
||||||
|
"sha256:58d9a5557d3eb7b734a3cea8b16c891099a522b3953a45a30bd4c034f75fc913",
|
||||||
|
"sha256:5911f042c4ab177757eec5bcb4e2e9a2e823d888835d24577321bf55f02938fa",
|
||||||
|
"sha256:5e16ea922f4e5017c04fd94e2639b1006e03097e9dd0cbb7a1c852af3ea8bf2e",
|
||||||
|
"sha256:656e19d3f1b9050ee01b457f92838a9679d7cf84c995f708780f44484048705e",
|
||||||
|
"sha256:6a1435449a82008c451c7e1a82a834387b9108f9a8d27910f86e7c482f5568e9",
|
||||||
|
"sha256:6ff02ca6cbed0ddb76e93ba0f8beb6a8c77d83a84eb7cafe2ae3399a8b9d69ea",
|
||||||
|
"sha256:76de68f60102f333bf4817f38e81ecbee68b850f5a5da9f355235e948ac40981",
|
||||||
|
"sha256:7c6d7ddd50fc6548ea1dfe09c62509c4f95b8b40082287747be05aa8feb15ee2",
|
||||||
|
"sha256:836b9d29507de729129e363276fe7c7d6a34c7961e0f155787025552b15d22c0",
|
||||||
|
"sha256:869242b2baf8a888a4fe0548f86abc47cb4b48bdfd76ae62d6456e939c202e65",
|
||||||
|
"sha256:8954b24bd08641d906ee50b2d638efc76df893fbd0913149b80484fd0eac40c9",
|
||||||
|
"sha256:8cdea65d1abb2e698420db8daf20c8d272fbd9d96a51b26a713c1c76f237d181",
|
||||||
|
"sha256:90161840b4fe9636f91ed0d3ea1e7e615e488cbea4e77594c889e5f3d7a776db",
|
||||||
|
"sha256:90fb6316b4d7d36700c40db4335902b78dcae13b5466673c21fd3b08a3c1b0c6",
|
||||||
|
"sha256:91b34f58db2611c9a93ecf751028f97fba1f06e65f49b38f272f6aa5d2977331",
|
||||||
|
"sha256:9474944a96a33eb8734fa8dc5805403d57973a3526204a5e1c1780d02e0572b6",
|
||||||
|
"sha256:9a36275db2a4774ac16c6822e7af816ee048071d5030b4c035fd53942b361935",
|
||||||
|
"sha256:9cbe26e2976b994c5f7c2d35a63354674d6ca0ce62f5b513f078bf63c1745229",
|
||||||
|
"sha256:9eaeabb3c0eecd6ddd0c16767fd12d130e2cebb8c2618f959a278b1ff336ddc3",
|
||||||
|
"sha256:a2bc7e10ebcf4be503ae427f9887e75c0cc24e88ce467a8e6eaca6bd2862406e",
|
||||||
|
"sha256:a5b42e6292ba51b8e67e09fc256963ba4ca9c04026de004d2fe59cc17e3c3776",
|
||||||
|
"sha256:bd6ec1233c86c0b9bb5d03ec30dbe3ffbfa53335790320d99a7ae9018c5450f2",
|
||||||
|
"sha256:bef57530816af54d66b1f4c70a8f851f320cb6f84d4b5a0b422b0e9811ea4e59",
|
||||||
|
"sha256:c146a63eaadc6589b732780061f3c94cd0574388d372baccbb3c1597a9ebdb7a",
|
||||||
|
"sha256:c2efd3b130dc639d615b6f58980e1bfd1b177ad821f30827afa5001aa30ddd48",
|
||||||
|
"sha256:c888b18f7392e6cc79a33a803e7ebd7890ac3318f571fca6b356526f35b53b12",
|
||||||
|
"sha256:ca30721fda297ae22f16bc37aa7ed244970ddfdcb98247570cdd26daaad4665e",
|
||||||
|
"sha256:cf5f5340dd682ab034baa52f423a0f91326489c262ac9617fa06309ec05880e9",
|
||||||
|
"sha256:d0726aa0d9b57c56985db5952e90fb1033a317074f2877db5307cdd6eede1564",
|
||||||
|
"sha256:df442945b2dd6f8ae0e20b403e0fd4548cd5c2aad69200047cc3251257b78f65",
|
||||||
|
"sha256:e08e758c31919d167c0867539bd3b2441629ef00aa595e3ea2b635273659f40a",
|
||||||
|
"sha256:e4864339deeeaefaad34dd3a432ee618a039fca28efb292949c855e00878203c",
|
||||||
|
"sha256:f4cd049cb94d9f517b1cab5668a3b345968beba093bc79a637e671000b3540ec"
|
||||||
|
],
|
||||||
|
"version": "==1.24.3"
|
||||||
|
},
|
||||||
|
"idna": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
|
||||||
|
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"
|
||||||
|
],
|
||||||
|
"version": "==2.8"
|
||||||
|
},
|
||||||
|
"itsdangerous": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19",
|
||||||
|
"sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749"
|
||||||
|
],
|
||||||
|
"version": "==1.1.0"
|
||||||
|
},
|
||||||
|
"jinja2": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:74320bb91f31270f9551d46522e33af46a80c3d619f4a4bf42b3164d30b5911f",
|
||||||
|
"sha256:9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de"
|
||||||
|
],
|
||||||
|
"version": "==2.10.3"
|
||||||
|
},
|
||||||
|
"markupsafe": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
|
||||||
|
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
|
||||||
|
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
|
||||||
|
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
|
||||||
|
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
|
||||||
|
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
|
||||||
|
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
|
||||||
|
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
|
||||||
|
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
|
||||||
|
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
|
||||||
|
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
|
||||||
|
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
|
||||||
|
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
|
||||||
|
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
|
||||||
|
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
|
||||||
|
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
|
||||||
|
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
|
||||||
|
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
|
||||||
|
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
|
||||||
|
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
|
||||||
|
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
|
||||||
|
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
|
||||||
|
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
|
||||||
|
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
|
||||||
|
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
|
||||||
|
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
|
||||||
|
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
|
||||||
|
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"
|
||||||
|
],
|
||||||
|
"version": "==1.1.1"
|
||||||
|
},
|
||||||
|
"paramiko": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf",
|
||||||
|
"sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041"
|
||||||
|
],
|
||||||
|
"version": "==2.6.0"
|
||||||
|
},
|
||||||
|
"protobuf": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:125713564d8cfed7610e52444c9769b8dcb0b55e25cc7841f2290ee7bc86636f",
|
||||||
|
"sha256:1accdb7a47e51503be64d9a57543964ba674edac103215576399d2d0e34eac77",
|
||||||
|
"sha256:27003d12d4f68e3cbea9eb67427cab3bfddd47ff90670cb367fcd7a3a89b9657",
|
||||||
|
"sha256:3264f3c431a631b0b31e9db2ae8c927b79fc1a7b1b06b31e8e5bcf2af91fe896",
|
||||||
|
"sha256:3c5ab0f5c71ca5af27143e60613729e3488bb45f6d3f143dc918a20af8bab0bf",
|
||||||
|
"sha256:45dcf8758873e3f69feab075e5f3177270739f146255225474ee0b90429adef6",
|
||||||
|
"sha256:56a77d61a91186cc5676d8e11b36a5feb513873e4ae88d2ee5cf530d52bbcd3b",
|
||||||
|
"sha256:5984e4947bbcef5bd849d6244aec507d31786f2dd3344139adc1489fb403b300",
|
||||||
|
"sha256:6b0441da73796dd00821763bb4119674eaf252776beb50ae3883bed179a60b2a",
|
||||||
|
"sha256:6f6677c5ade94d4fe75a912926d6796d5c71a2a90c2aeefe0d6f211d75c74789",
|
||||||
|
"sha256:84a825a9418d7196e2acc48f8746cf1ee75877ed2f30433ab92a133f3eaf8fbe",
|
||||||
|
"sha256:b842c34fe043ccf78b4a6cf1019d7b80113707d68c88842d061fa2b8fb6ddedc",
|
||||||
|
"sha256:ca33d2f09dae149a1dcf942d2d825ebb06343b77b437198c9e2ef115cf5d5bc1",
|
||||||
|
"sha256:db83b5c12c0cd30150bb568e6feb2435c49ce4e68fe2d7b903113f0e221e58fe",
|
||||||
|
"sha256:f50f3b1c5c1c1334ca7ce9cad5992f098f460ffd6388a3cabad10b66c2006b09",
|
||||||
|
"sha256:f99f127909731cafb841c52f9216e447d3e4afb99b17bebfad327a75aee206de"
|
||||||
|
],
|
||||||
|
"version": "==3.10.0"
|
||||||
|
},
|
||||||
|
"pycparser": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:9d97450dc26e1d2581c18881d8d1c0a92e84c9ac074961e3dc66e70d745a0643",
|
||||||
|
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
|
||||||
|
],
|
||||||
|
"version": "==2.19"
|
||||||
|
},
|
||||||
|
"pynacl": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255",
|
||||||
|
"sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c",
|
||||||
|
"sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e",
|
||||||
|
"sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae",
|
||||||
|
"sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621",
|
||||||
|
"sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56",
|
||||||
|
"sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39",
|
||||||
|
"sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310",
|
||||||
|
"sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1",
|
||||||
|
"sha256:53126cd91356342dcae7e209f840212a58dcf1177ad52c1d938d428eebc9fee5",
|
||||||
|
"sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a",
|
||||||
|
"sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786",
|
||||||
|
"sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b",
|
||||||
|
"sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b",
|
||||||
|
"sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f",
|
||||||
|
"sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20",
|
||||||
|
"sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415",
|
||||||
|
"sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715",
|
||||||
|
"sha256:bf459128feb543cfca16a95f8da31e2e65e4c5257d2f3dfa8c0c1031139c9c92",
|
||||||
|
"sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1",
|
||||||
|
"sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0"
|
||||||
|
],
|
||||||
|
"version": "==1.3.0"
|
||||||
|
},
|
||||||
|
"pyotp": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:c88f37fd47541a580b744b42136f387cdad481b560ef410c0d85c957eb2a2bc0",
|
||||||
|
"sha256:fc537e8acd985c5cbf51e11b7d53c42276fee017a73aec7c07380695671ca1a1"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==2.3.0"
|
||||||
|
},
|
||||||
|
"python-decouple": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1317df14b43efee4337a4aa02914bf004f010cd56d6c4bd894e6474ec8c4fe2d"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==3.1"
|
||||||
|
},
|
||||||
|
"python-etcd3": {
|
||||||
|
"editable": true,
|
||||||
|
"git": "https://github.com/kragniz/python-etcd3.git",
|
||||||
|
"ref": "247e3952d0b47324091a36ace3ad9717469fb6b9"
|
||||||
|
},
|
||||||
|
"pytz": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1c557d7d0e871de1f5ccd5833f60fb2550652da6be2693c1e02300743d21500d",
|
||||||
|
"sha256:b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be"
|
||||||
|
],
|
||||||
|
"version": "==2019.3"
|
||||||
|
},
|
||||||
|
"requests": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4",
|
||||||
|
"sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==2.22.0"
|
||||||
|
},
|
||||||
|
"six": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||||
|
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||||
|
],
|
||||||
|
"version": "==1.12.0"
|
||||||
|
},
|
||||||
|
"sshtunnel": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:c813fdcda8e81c3936ffeac47cb69cfb2d1f5e77ad0de656c6dab56aeebd9249"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==0.1.5"
|
||||||
|
},
|
||||||
|
"tenacity": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:6a7511a59145c2e319b7d04ddd93c12d48cc3d3c8fa42c2846d33a620ee91f57",
|
||||||
|
"sha256:a4eb168dbf55ed2cae27e7c6b2bd48ab54dabaf294177d998330cf59f294c112"
|
||||||
|
],
|
||||||
|
"version": "==5.1.1"
|
||||||
|
},
|
||||||
|
"ucloud-common": {
|
||||||
|
"editable": true,
|
||||||
|
"git": "https://code.ungleich.ch/ucloud/ucloud_common.git",
|
||||||
|
"ref": "9f229eae27f9007e9c6c1021d3d5b12452863763"
|
||||||
|
},
|
||||||
|
"urllib3": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398",
|
||||||
|
"sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86"
|
||||||
|
],
|
||||||
|
"version": "==1.25.6"
|
||||||
|
},
|
||||||
|
"werkzeug": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:7280924747b5733b246fe23972186c6b348f9ae29724135a6dfc1e53cea433e7",
|
||||||
|
"sha256:e5f4a1f98b52b18a93da705a7458e55afb26f32bff83ff5d19189f92462d65c4"
|
||||||
|
],
|
||||||
|
"version": "==0.16.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"develop": {
|
||||||
|
"astroid": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:6560e1e1749f68c64a4b5dee4e091fce798d2f0d84ebe638cf0e0585a343acf4",
|
||||||
|
"sha256:b65db1bbaac9f9f4d190199bb8680af6f6f84fd3769a5ea883df8a91fe68b4c4"
|
||||||
|
],
|
||||||
|
"version": "==2.2.5"
|
||||||
|
},
|
||||||
|
"certifi": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50",
|
||||||
|
"sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef"
|
||||||
|
],
|
||||||
|
"version": "==2019.9.11"
|
||||||
|
},
|
||||||
|
"chardet": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||||
|
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||||
|
],
|
||||||
|
"version": "==3.0.4"
|
||||||
|
},
|
||||||
|
"coverage": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:08907593569fe59baca0bf152c43f3863201efb6113ecb38ce7e97ce339805a6",
|
||||||
|
"sha256:0be0f1ed45fc0c185cfd4ecc19a1d6532d72f86a2bac9de7e24541febad72650",
|
||||||
|
"sha256:141f08ed3c4b1847015e2cd62ec06d35e67a3ac185c26f7635f4406b90afa9c5",
|
||||||
|
"sha256:19e4df788a0581238e9390c85a7a09af39c7b539b29f25c89209e6c3e371270d",
|
||||||
|
"sha256:23cc09ed395b03424d1ae30dcc292615c1372bfba7141eb85e11e50efaa6b351",
|
||||||
|
"sha256:245388cda02af78276b479f299bbf3783ef0a6a6273037d7c60dc73b8d8d7755",
|
||||||
|
"sha256:331cb5115673a20fb131dadd22f5bcaf7677ef758741312bee4937d71a14b2ef",
|
||||||
|
"sha256:386e2e4090f0bc5df274e720105c342263423e77ee8826002dcffe0c9533dbca",
|
||||||
|
"sha256:3a794ce50daee01c74a494919d5ebdc23d58873747fa0e288318728533a3e1ca",
|
||||||
|
"sha256:60851187677b24c6085248f0a0b9b98d49cba7ecc7ec60ba6b9d2e5574ac1ee9",
|
||||||
|
"sha256:63a9a5fc43b58735f65ed63d2cf43508f462dc49857da70b8980ad78d41d52fc",
|
||||||
|
"sha256:6b62544bb68106e3f00b21c8930e83e584fdca005d4fffd29bb39fb3ffa03cb5",
|
||||||
|
"sha256:6ba744056423ef8d450cf627289166da65903885272055fb4b5e113137cfa14f",
|
||||||
|
"sha256:7494b0b0274c5072bddbfd5b4a6c6f18fbbe1ab1d22a41e99cd2d00c8f96ecfe",
|
||||||
|
"sha256:826f32b9547c8091679ff292a82aca9c7b9650f9fda3e2ca6bf2ac905b7ce888",
|
||||||
|
"sha256:93715dffbcd0678057f947f496484e906bf9509f5c1c38fc9ba3922893cda5f5",
|
||||||
|
"sha256:9a334d6c83dfeadae576b4d633a71620d40d1c379129d587faa42ee3e2a85cce",
|
||||||
|
"sha256:af7ed8a8aa6957aac47b4268631fa1df984643f07ef00acd374e456364b373f5",
|
||||||
|
"sha256:bf0a7aed7f5521c7ca67febd57db473af4762b9622254291fbcbb8cd0ba5e33e",
|
||||||
|
"sha256:bf1ef9eb901113a9805287e090452c05547578eaab1b62e4ad456fcc049a9b7e",
|
||||||
|
"sha256:c0afd27bc0e307a1ffc04ca5ec010a290e49e3afbe841c5cafc5c5a80ecd81c9",
|
||||||
|
"sha256:dd579709a87092c6dbee09d1b7cfa81831040705ffa12a1b248935274aee0437",
|
||||||
|
"sha256:df6712284b2e44a065097846488f66840445eb987eb81b3cc6e4149e7b6982e1",
|
||||||
|
"sha256:e07d9f1a23e9e93ab5c62902833bf3e4b1f65502927379148b6622686223125c",
|
||||||
|
"sha256:e2ede7c1d45e65e209d6093b762e98e8318ddeff95317d07a27a2140b80cfd24",
|
||||||
|
"sha256:e4ef9c164eb55123c62411f5936b5c2e521b12356037b6e1c2617cef45523d47",
|
||||||
|
"sha256:eca2b7343524e7ba246cab8ff00cab47a2d6d54ada3b02772e908a45675722e2",
|
||||||
|
"sha256:eee64c616adeff7db37cc37da4180a3a5b6177f5c46b187894e633f088fb5b28",
|
||||||
|
"sha256:ef824cad1f980d27f26166f86856efe11eff9912c4fed97d3804820d43fa550c",
|
||||||
|
"sha256:efc89291bd5a08855829a3c522df16d856455297cf35ae827a37edac45f466a7",
|
||||||
|
"sha256:fa964bae817babece5aa2e8c1af841bebb6d0b9add8e637548809d040443fee0",
|
||||||
|
"sha256:ff37757e068ae606659c28c3bd0d923f9d29a85de79bf25b2b34b148473b5025"
|
||||||
|
],
|
||||||
|
"version": "==4.5.4"
|
||||||
|
},
|
||||||
|
"coveralls": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:9bc5a1f92682eef59f688a8f280207190d9a6afb84cef8f567fa47631a784060",
|
||||||
|
"sha256:fb51cddef4bc458de347274116df15d641a735d3f0a580a9472174e2e62f408c"
|
||||||
|
],
|
||||||
|
"version": "==1.8.2"
|
||||||
|
},
|
||||||
|
"docopt": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"
|
||||||
|
],
|
||||||
|
"version": "==0.6.2"
|
||||||
|
},
|
||||||
|
"docutils": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0",
|
||||||
|
"sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827",
|
||||||
|
"sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"
|
||||||
|
],
|
||||||
|
"version": "==0.15.2"
|
||||||
|
},
|
||||||
|
"dodgy": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:65e13cf878d7aff129f1461c13cb5fd1bb6dfe66bb5327e09379c3877763280c"
|
||||||
|
],
|
||||||
|
"version": "==0.1.9"
|
||||||
|
},
|
||||||
|
"frosted": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:c6a30ad502ea373f6fe4cafbcd896ece66948406b04365d14a3eb764cc529b07",
|
||||||
|
"sha256:d1e5d2b43a064b33c289b9a986a7425fd9a36bed8f519ca430ac7a0915e32b51"
|
||||||
|
],
|
||||||
|
"version": "==1.4.1"
|
||||||
|
},
|
||||||
|
"idna": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
|
||||||
|
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"
|
||||||
|
],
|
||||||
|
"version": "==2.8"
|
||||||
|
},
|
||||||
|
"isort": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1",
|
||||||
|
"sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"
|
||||||
|
],
|
||||||
|
"version": "==4.3.21"
|
||||||
|
},
|
||||||
|
"lazy-object-proxy": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:02b260c8deb80db09325b99edf62ae344ce9bc64d68b7a634410b8e9a568edbf",
|
||||||
|
"sha256:18f9c401083a4ba6e162355873f906315332ea7035803d0fd8166051e3d402e3",
|
||||||
|
"sha256:1f2c6209a8917c525c1e2b55a716135ca4658a3042b5122d4e3413a4030c26ce",
|
||||||
|
"sha256:2f06d97f0ca0f414f6b707c974aaf8829c2292c1c497642f63824119d770226f",
|
||||||
|
"sha256:616c94f8176808f4018b39f9638080ed86f96b55370b5a9463b2ee5c926f6c5f",
|
||||||
|
"sha256:63b91e30ef47ef68a30f0c3c278fbfe9822319c15f34b7538a829515b84ca2a0",
|
||||||
|
"sha256:77b454f03860b844f758c5d5c6e5f18d27de899a3db367f4af06bec2e6013a8e",
|
||||||
|
"sha256:83fe27ba321e4cfac466178606147d3c0aa18e8087507caec78ed5a966a64905",
|
||||||
|
"sha256:84742532d39f72df959d237912344d8a1764c2d03fe58beba96a87bfa11a76d8",
|
||||||
|
"sha256:874ebf3caaf55a020aeb08acead813baf5a305927a71ce88c9377970fe7ad3c2",
|
||||||
|
"sha256:9f5caf2c7436d44f3cec97c2fa7791f8a675170badbfa86e1992ca1b84c37009",
|
||||||
|
"sha256:a0c8758d01fcdfe7ae8e4b4017b13552efa7f1197dd7358dc9da0576f9d0328a",
|
||||||
|
"sha256:a4def978d9d28cda2d960c279318d46b327632686d82b4917516c36d4c274512",
|
||||||
|
"sha256:ad4f4be843dace866af5fc142509e9b9817ca0c59342fdb176ab6ad552c927f5",
|
||||||
|
"sha256:ae33dd198f772f714420c5ab698ff05ff900150486c648d29951e9c70694338e",
|
||||||
|
"sha256:b4a2b782b8a8c5522ad35c93e04d60e2ba7f7dcb9271ec8e8c3e08239be6c7b4",
|
||||||
|
"sha256:c462eb33f6abca3b34cdedbe84d761f31a60b814e173b98ede3c81bb48967c4f",
|
||||||
|
"sha256:fd135b8d35dfdcdb984828c84d695937e58cc5f49e1c854eb311c4d6aa03f4f1"
|
||||||
|
],
|
||||||
|
"version": "==1.4.2"
|
||||||
|
},
|
||||||
|
"mccabe": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||||
|
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||||
|
],
|
||||||
|
"version": "==0.6.1"
|
||||||
|
},
|
||||||
|
"mock": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3",
|
||||||
|
"sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8"
|
||||||
|
],
|
||||||
|
"version": "==3.0.5"
|
||||||
|
},
|
||||||
|
"mypy": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1521c186a3d200c399bd5573c828ea2db1362af7209b2adb1bb8532cea2fb36f",
|
||||||
|
"sha256:31a046ab040a84a0fc38bc93694876398e62bc9f35eca8ccbf6418b7297f4c00",
|
||||||
|
"sha256:3b1a411909c84b2ae9b8283b58b48541654b918e8513c20a400bb946aa9111ae",
|
||||||
|
"sha256:48c8bc99380575deb39f5d3400ebb6a8a1cb5cc669bbba4d3bb30f904e0a0e7d",
|
||||||
|
"sha256:540c9caa57a22d0d5d3c69047cc9dd0094d49782603eb03069821b41f9e970e9",
|
||||||
|
"sha256:672e418425d957e276c291930a3921b4a6413204f53fe7c37cad7bc57b9a3391",
|
||||||
|
"sha256:6ed3b9b3fdc7193ea7aca6f3c20549b377a56f28769783a8f27191903a54170f",
|
||||||
|
"sha256:9371290aa2cad5ad133e4cdc43892778efd13293406f7340b9ffe99d5ec7c1d9",
|
||||||
|
"sha256:ace6ac1d0f87d4072f05b5468a084a45b4eda970e4d26704f201e06d47ab2990",
|
||||||
|
"sha256:b428f883d2b3fe1d052c630642cc6afddd07d5cd7873da948644508be3b9d4a7",
|
||||||
|
"sha256:d5bf0e6ec8ba346a2cf35cb55bf4adfddbc6b6576fcc9e10863daa523e418dbb",
|
||||||
|
"sha256:d7574e283f83c08501607586b3167728c58e8442947e027d2d4c7dcd6d82f453",
|
||||||
|
"sha256:dc889c84241a857c263a2b1cd1121507db7d5b5f5e87e77147097230f374d10b",
|
||||||
|
"sha256:f4748697b349f373002656bf32fede706a0e713d67bfdcf04edf39b1f61d46eb"
|
||||||
|
],
|
||||||
|
"version": "==0.740"
|
||||||
|
},
|
||||||
|
"mypy-extensions": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d",
|
||||||
|
"sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"
|
||||||
|
],
|
||||||
|
"version": "==0.4.3"
|
||||||
|
},
|
||||||
|
"nose": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac",
|
||||||
|
"sha256:dadcddc0aefbf99eea214e0f1232b94f2fa9bd98fa8353711dacb112bfcbbb2a",
|
||||||
|
"sha256:f1bffef9cbc82628f6e7d7b40d7e255aefaa1adb6a1b1d26c69a8b79e6208a98"
|
||||||
|
],
|
||||||
|
"version": "==1.3.7"
|
||||||
|
},
|
||||||
|
"pep8-naming": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1b419fa45b68b61cd8c5daf4e0c96d28915ad14d3d5f35fcc1e7e95324a33a2e",
|
||||||
|
"sha256:4eedfd4c4b05e48796f74f5d8628c068ff788b9c2b08471ad408007fc6450e5a"
|
||||||
|
],
|
||||||
|
"version": "==0.4.1"
|
||||||
|
},
|
||||||
|
"pies": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:79a652dddc64c6fa42c7dfe9686ae7b1d856391094b873e2f52fcd0bd662c102",
|
||||||
|
"sha256:e8a76923ce0e0f605240901983fe492814a65d3d803efe3013a0e1815b75e4e9"
|
||||||
|
],
|
||||||
|
"version": "==2.6.7"
|
||||||
|
},
|
||||||
|
"prospector": {
|
||||||
|
"extras": [
|
||||||
|
"with_everything"
|
||||||
|
],
|
||||||
|
"hashes": [
|
||||||
|
"sha256:aba551e53dc1a5a432afa67385eaa81d7b4cf4c162dc1a4d0ee00b3a0712ad90"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==1.1.7"
|
||||||
|
},
|
||||||
|
"pycodestyle": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:cbc619d09254895b0d12c2c691e237b2e91e9b2ecf5e84c26b35400f93dcfb83",
|
||||||
|
"sha256:cbfca99bd594a10f674d0cd97a3d802a1fdef635d4361e1a2658de47ed261e3a"
|
||||||
|
],
|
||||||
|
"version": "==2.4.0"
|
||||||
|
},
|
||||||
|
"pydocstyle": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:04c84e034ebb56eb6396c820442b8c4499ac5eb94a3bda88951ac3dc519b6058",
|
||||||
|
"sha256:66aff87ffe34b1e49bff2dd03a88ce6843be2f3346b0c9814410d34987fbab59"
|
||||||
|
],
|
||||||
|
"version": "==4.0.1"
|
||||||
|
},
|
||||||
|
"pyflakes": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:08bd6a50edf8cffa9fa09a463063c425ecaaf10d1eb0335a7e8b1401aef89e6f",
|
||||||
|
"sha256:8d616a382f243dbf19b54743f280b80198be0bca3a5396f1d2e1fca6223e8805"
|
||||||
|
],
|
||||||
|
"version": "==1.6.0"
|
||||||
|
},
|
||||||
|
"pylint": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:5d77031694a5fb97ea95e828c8d10fc770a1df6eb3906067aaed42201a8a6a09",
|
||||||
|
"sha256:723e3db49555abaf9bf79dc474c6b9e2935ad82230b10c1138a71ea41ac0fff1"
|
||||||
|
],
|
||||||
|
"version": "==2.3.1"
|
||||||
|
},
|
||||||
|
"pylint-celery": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:41e32094e7408d15c044178ea828dd524beedbdbe6f83f712c5e35bde1de4beb"
|
||||||
|
],
|
||||||
|
"version": "==0.3"
|
||||||
|
},
|
||||||
|
"pylint-django": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:75c69d1ec2275918c37f175976da20e2f1e1e62e067098a685cd263ffa833dfd",
|
||||||
|
"sha256:c7cb6384ea7b33ea77052a5ae07358c10d377807390ef27b2e6ff997303fadb7"
|
||||||
|
],
|
||||||
|
"version": "==2.0.10"
|
||||||
|
},
|
||||||
|
"pylint-flask": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:f4d97de2216bf7bfce07c9c08b166e978fe9f2725de2a50a9845a97de7e31517"
|
||||||
|
],
|
||||||
|
"version": "==0.6"
|
||||||
|
},
|
||||||
|
"pylint-plugin-utils": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:2f30510e1c46edf268d3a195b2849bd98a1b9433229bb2ba63b8d776e1fc4d0a",
|
||||||
|
"sha256:57625dcca20140f43731311cd8fd879318bf45a8b0fd17020717a8781714a25a"
|
||||||
|
],
|
||||||
|
"version": "==0.6"
|
||||||
|
},
|
||||||
|
"pyroma": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:54d332f540d4828bc5672b75ccf9e12d4b2f72a42a4f304bcec1c73565aecc26",
|
||||||
|
"sha256:6b94feb609e1896579302f0836ef2fad3f17e0557e3ddcd0d76206cd3e366d27"
|
||||||
|
],
|
||||||
|
"version": "==2.5"
|
||||||
|
},
|
||||||
|
"pyyaml": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9",
|
||||||
|
"sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4",
|
||||||
|
"sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8",
|
||||||
|
"sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696",
|
||||||
|
"sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34",
|
||||||
|
"sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9",
|
||||||
|
"sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73",
|
||||||
|
"sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299",
|
||||||
|
"sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b",
|
||||||
|
"sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae",
|
||||||
|
"sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681",
|
||||||
|
"sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41",
|
||||||
|
"sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8"
|
||||||
|
],
|
||||||
|
"version": "==5.1.2"
|
||||||
|
},
|
||||||
|
"requests": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4",
|
||||||
|
"sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==2.22.0"
|
||||||
|
},
|
||||||
|
"requirements-detector": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:9fbc4b24e8b7c3663aff32e3eba34596848c6b91bd425079b386973bd8d08931"
|
||||||
|
],
|
||||||
|
"version": "==0.6"
|
||||||
|
},
|
||||||
|
"setoptconf": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:5b0b5d8e0077713f5d5152d4f63be6f048d9a1bb66be15d089a11c898c3cf49c"
|
||||||
|
],
|
||||||
|
"version": "==0.2.0"
|
||||||
|
},
|
||||||
|
"six": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||||
|
"sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"
|
||||||
|
],
|
||||||
|
"version": "==1.12.0"
|
||||||
|
},
|
||||||
|
"snowballstemmer": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0",
|
||||||
|
"sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52"
|
||||||
|
],
|
||||||
|
"version": "==2.0.0"
|
||||||
|
},
|
||||||
|
"typed-ast": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1170afa46a3799e18b4c977777ce137bb53c7485379d9706af8a59f2ea1aa161",
|
||||||
|
"sha256:18511a0b3e7922276346bcb47e2ef9f38fb90fd31cb9223eed42c85d1312344e",
|
||||||
|
"sha256:262c247a82d005e43b5b7f69aff746370538e176131c32dda9cb0f324d27141e",
|
||||||
|
"sha256:2b907eb046d049bcd9892e3076c7a6456c93a25bebfe554e931620c90e6a25b0",
|
||||||
|
"sha256:354c16e5babd09f5cb0ee000d54cfa38401d8b8891eefa878ac772f827181a3c",
|
||||||
|
"sha256:48e5b1e71f25cfdef98b013263a88d7145879fbb2d5185f2a0c79fa7ebbeae47",
|
||||||
|
"sha256:4e0b70c6fc4d010f8107726af5fd37921b666f5b31d9331f0bd24ad9a088e631",
|
||||||
|
"sha256:630968c5cdee51a11c05a30453f8cd65e0cc1d2ad0d9192819df9978984529f4",
|
||||||
|
"sha256:66480f95b8167c9c5c5c87f32cf437d585937970f3fc24386f313a4c97b44e34",
|
||||||
|
"sha256:71211d26ffd12d63a83e079ff258ac9d56a1376a25bc80b1cdcdf601b855b90b",
|
||||||
|
"sha256:7954560051331d003b4e2b3eb822d9dd2e376fa4f6d98fee32f452f52dd6ebb2",
|
||||||
|
"sha256:838997f4310012cf2e1ad3803bce2f3402e9ffb71ded61b5ee22617b3a7f6b6e",
|
||||||
|
"sha256:95bd11af7eafc16e829af2d3df510cecfd4387f6453355188342c3e79a2ec87a",
|
||||||
|
"sha256:bc6c7d3fa1325a0c6613512a093bc2a2a15aeec350451cbdf9e1d4bffe3e3233",
|
||||||
|
"sha256:cc34a6f5b426748a507dd5d1de4c1978f2eb5626d51326e43280941206c209e1",
|
||||||
|
"sha256:d755f03c1e4a51e9b24d899561fec4ccaf51f210d52abdf8c07ee2849b212a36",
|
||||||
|
"sha256:d7c45933b1bdfaf9f36c579671fec15d25b06c8398f113dab64c18ed1adda01d",
|
||||||
|
"sha256:d896919306dd0aa22d0132f62a1b78d11aaf4c9fc5b3410d3c666b818191630a",
|
||||||
|
"sha256:fdc1c9bbf79510b76408840e009ed65958feba92a88833cdceecff93ae8fff66",
|
||||||
|
"sha256:ffde2fbfad571af120fcbfbbc61c72469e72f550d676c3342492a9dfdefb8f12"
|
||||||
|
],
|
||||||
|
"markers": "implementation_name == 'cpython'",
|
||||||
|
"version": "==1.4.0"
|
||||||
|
},
|
||||||
|
"typing-extensions": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:2ed632b30bb54fc3941c382decfd0ee4148f5c591651c9272473fea2c6397d95",
|
||||||
|
"sha256:b1edbbf0652660e32ae780ac9433f4231e7339c7f9a8057d0f042fcbcea49b87",
|
||||||
|
"sha256:d8179012ec2c620d3791ca6fe2bf7979d979acdbef1fca0bc56b37411db682ed"
|
||||||
|
],
|
||||||
|
"version": "==3.7.4"
|
||||||
|
},
|
||||||
|
"urllib3": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398",
|
||||||
|
"sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86"
|
||||||
|
],
|
||||||
|
"version": "==1.25.6"
|
||||||
|
},
|
||||||
|
"vulture": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:17be5f6a7c88ea43f2619f80338af7407275ee46a24000abe2570e59ca44b3d0",
|
||||||
|
"sha256:23d837cf619c3bb75f87bc498c79cd4f27f0c54031ca88a9e05606c9dd627fef"
|
||||||
|
],
|
||||||
|
"version": "==0.24"
|
||||||
|
},
|
||||||
|
"wrapt": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:565a021fd19419476b9362b05eeaa094178de64f8361e44468f9e9d7843901e1"
|
||||||
|
],
|
||||||
|
"version": "==1.11.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
6
TODO.md
Normal file
6
TODO.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# TODO
|
||||||
|
|
||||||
|
- Check for `etcd3.exceptions.ConnectionFailedError` when calling some etcd operation to
|
||||||
|
avoid crashing whole application
|
||||||
|
- Throw KeyError instead of returning None when some key is not found in etcd
|
||||||
|
- Specify image format when using qemu-img when creating virtual machine
|
12
api/README.md
Executable file
12
api/README.md
Executable file
|
@ -0,0 +1,12 @@
|
||||||
|
# ucloud-api
|
||||||
|
[![Project Status: WIP – Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
**Make sure you have Python >= 3.5 and Pipenv installed.**
|
||||||
|
|
||||||
|
1. Clone the repository and `cd` into it.
|
||||||
|
2. Run the following commands
|
||||||
|
- `pipenv install`
|
||||||
|
- `pipenv shell`
|
||||||
|
- `python main.py`
|
48
api/common_fields.py
Executable file
48
api/common_fields.py
Executable file
|
@ -0,0 +1,48 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
from config import etcd_client as client
|
||||||
|
from config import VM_PREFIX
|
||||||
|
|
||||||
|
|
||||||
|
class Field:
|
||||||
|
def __init__(self, _name, _type, _value=None):
|
||||||
|
self.name = _name
|
||||||
|
self.value = _value
|
||||||
|
self.type = _type
|
||||||
|
self.__errors = []
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_valid(self):
|
||||||
|
if self.value == KeyError:
|
||||||
|
self.add_error("'{}' field is a required field".format(self.name))
|
||||||
|
else:
|
||||||
|
if not isinstance(self.value, self.type):
|
||||||
|
self.add_error("Incorrect Type for '{}' field".format(self.name))
|
||||||
|
else:
|
||||||
|
self.validation()
|
||||||
|
|
||||||
|
if self.__errors:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_errors(self):
|
||||||
|
return self.__errors
|
||||||
|
|
||||||
|
def add_error(self, error):
|
||||||
|
self.__errors.append(error)
|
||||||
|
|
||||||
|
|
||||||
|
class VmUUIDField(Field):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.uuid = data.get("uuid", KeyError)
|
||||||
|
|
||||||
|
super().__init__("uuid", str, self.uuid)
|
||||||
|
|
||||||
|
self.validation = self.vm_uuid_validation
|
||||||
|
|
||||||
|
def vm_uuid_validation(self):
|
||||||
|
r = client.get(os.path.join(VM_PREFIX, self.uuid))
|
||||||
|
if not r:
|
||||||
|
self.add_error("VM with uuid {} does not exists".format(self.uuid))
|
32
api/config.py
Normal file
32
api/config.py
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
|
||||||
|
from ucloud_common.vm import VmPool
|
||||||
|
from ucloud_common.host import HostPool
|
||||||
|
from ucloud_common.request import RequestPool
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="log.txt",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s: %(levelname)s - %(message)s",
|
||||||
|
datefmt="%d-%b-%y %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
WITHOUT_CEPH = config("WITHOUT_CEPH", False, cast=bool)
|
||||||
|
VM_PREFIX = config("VM_PREFIX")
|
||||||
|
HOST_PREFIX = config("HOST_PREFIX")
|
||||||
|
REQUEST_PREFIX = config("REQUEST_PREFIX")
|
||||||
|
FILE_PREFIX = config("FILE_PREFIX")
|
||||||
|
IMAGE_PREFIX = config("IMAGE_PREFIX")
|
||||||
|
IMAGE_STORE_PREFIX = config("IMAGE_STORE_PREFIX")
|
||||||
|
|
||||||
|
etcd_client = Etcd3Wrapper(host=config("ETCD_URL"))
|
||||||
|
|
||||||
|
VM_POOL = VmPool(etcd_client, VM_PREFIX)
|
||||||
|
HOST_POOL = HostPool(etcd_client, HOST_PREFIX)
|
||||||
|
REQUEST_POOL = RequestPool(etcd_client, REQUEST_PREFIX)
|
17
api/create_image_store.py
Executable file
17
api/create_image_store.py
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from config import etcd_client as client
|
||||||
|
from config import IMAGE_STORE_PREFIX
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"is_public": True,
|
||||||
|
"type": "ceph",
|
||||||
|
"name": "images",
|
||||||
|
"description": "first ever public image-store",
|
||||||
|
"attributes": {"list": [], "key": [], "pool": "images"},
|
||||||
|
}
|
||||||
|
|
||||||
|
client.put(os.path.join(IMAGE_STORE_PREFIX, uuid4().hex), json.dumps(data))
|
70
api/helper.py
Executable file
70
api/helper.py
Executable file
|
@ -0,0 +1,70 @@
|
||||||
|
import binascii
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
from pyotp import TOTP
|
||||||
|
from config import VM_POOL
|
||||||
|
|
||||||
|
|
||||||
|
def check_otp(name, realm, token):
|
||||||
|
try:
|
||||||
|
data = {
|
||||||
|
"auth_name": config("AUTH_NAME", ""),
|
||||||
|
"auth_token": TOTP(config("AUTH_SEED", "")).now(),
|
||||||
|
"auth_realm": config("AUTH_REALM", ""),
|
||||||
|
"name": name,
|
||||||
|
"realm": realm,
|
||||||
|
"token": token,
|
||||||
|
}
|
||||||
|
except binascii.Error:
|
||||||
|
return 400
|
||||||
|
|
||||||
|
response = requests.get(
|
||||||
|
"{OTP_SERVER}{OTP_VERIFY_ENDPOINT}".format(
|
||||||
|
OTP_SERVER=config("OTP_SERVER", ""),
|
||||||
|
OTP_VERIFY_ENDPOINT=config("OTP_VERIFY_ENDPOINT", "verify"),
|
||||||
|
),
|
||||||
|
json=data,
|
||||||
|
)
|
||||||
|
return response.status_code
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_vm_name(name, owner):
|
||||||
|
"""Return UUID of Virtual Machine of name == name and owner == owner
|
||||||
|
|
||||||
|
Input: name of vm, owner of vm.
|
||||||
|
Output: uuid of vm if found otherwise None
|
||||||
|
"""
|
||||||
|
result = next(
|
||||||
|
filter(
|
||||||
|
lambda vm: vm.value["owner"] == owner and vm.value["name"] == name,
|
||||||
|
VM_POOL.vms,
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if result:
|
||||||
|
return result.key.split("/")[-1]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
import random
|
||||||
|
|
||||||
|
def random_bytes(num=6):
|
||||||
|
return [random.randrange(256) for _ in range(num)]
|
||||||
|
|
||||||
|
def generate_mac(uaa=False, multicast=False, oui=None, separator=':', byte_fmt='%02x'):
|
||||||
|
mac = random_bytes()
|
||||||
|
if oui:
|
||||||
|
if type(oui) == str:
|
||||||
|
oui = [int(chunk) for chunk in oui.split(separator)]
|
||||||
|
mac = oui + random_bytes(num=6-len(oui))
|
||||||
|
else:
|
||||||
|
if multicast:
|
||||||
|
mac[0] |= 1 # set bit 0
|
||||||
|
else:
|
||||||
|
mac[0] &= ~1 # clear bit 0
|
||||||
|
if uaa:
|
||||||
|
mac[0] &= ~(1 << 1) # clear bit 1
|
||||||
|
else:
|
||||||
|
mac[0] |= 1 << 1 # set bit 1
|
||||||
|
return separator.join(byte_fmt % b for b in mac)
|
380
api/main.py
Normal file
380
api/main.py
Normal file
|
@ -0,0 +1,380 @@
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from flask import Flask, request
|
||||||
|
from flask_restful import Resource, Api
|
||||||
|
|
||||||
|
from ucloud_common.vm import VMStatus
|
||||||
|
from ucloud_common.request import RequestEntry, RequestType
|
||||||
|
|
||||||
|
from helper import generate_mac
|
||||||
|
|
||||||
|
from config import (
|
||||||
|
etcd_client,
|
||||||
|
WITHOUT_CEPH,
|
||||||
|
VM_PREFIX,
|
||||||
|
HOST_PREFIX,
|
||||||
|
FILE_PREFIX,
|
||||||
|
IMAGE_PREFIX,
|
||||||
|
logging,
|
||||||
|
REQUEST_POOL,
|
||||||
|
VM_POOL,
|
||||||
|
HOST_POOL,
|
||||||
|
)
|
||||||
|
from schemas import (
|
||||||
|
CreateVMSchema,
|
||||||
|
VMStatusSchema,
|
||||||
|
CreateImageSchema,
|
||||||
|
VmActionSchema,
|
||||||
|
OTPSchema,
|
||||||
|
CreateHostSchema,
|
||||||
|
VmMigrationSchema,
|
||||||
|
AddSSHSchema,
|
||||||
|
RemoveSSHSchema,
|
||||||
|
GetSSHSchema
|
||||||
|
)
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
api = Api(app)
|
||||||
|
|
||||||
|
|
||||||
|
class CreateVM(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
print(data)
|
||||||
|
validator = CreateVMSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
vm_uuid = uuid4().hex
|
||||||
|
vm_key = os.path.join(VM_PREFIX, vm_uuid)
|
||||||
|
specs = {
|
||||||
|
'cpu': validator.specs['cpu'],
|
||||||
|
'ram': validator.specs['ram'],
|
||||||
|
'os-ssd': validator.specs['os-ssd'],
|
||||||
|
'hdd': validator.specs['hdd']
|
||||||
|
}
|
||||||
|
|
||||||
|
vm_entry = {
|
||||||
|
"name": data["vm_name"],
|
||||||
|
"owner": data["name"],
|
||||||
|
"owner_realm": data["realm"],
|
||||||
|
"specs": specs,
|
||||||
|
"hostname": "",
|
||||||
|
"status": "",
|
||||||
|
"image_uuid": data["image_uuid"],
|
||||||
|
"log": [],
|
||||||
|
"vnc_socket": "",
|
||||||
|
"mac": str(generate_mac()),
|
||||||
|
"metadata": {
|
||||||
|
"ssh-keys": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
etcd_client.put(vm_key, vm_entry, value_in_json=True)
|
||||||
|
|
||||||
|
# Create ScheduleVM Request
|
||||||
|
r = RequestEntry.from_scratch(type=RequestType.ScheduleVM, uuid=vm_uuid)
|
||||||
|
REQUEST_POOL.put(r)
|
||||||
|
|
||||||
|
return {"message": "VM Creation Queued"}, 200
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class VmStatus(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = request.json
|
||||||
|
validator = VMStatusSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
vm = VM_POOL.get(os.path.join(VM_PREFIX, data["uuid"]))
|
||||||
|
return json.dumps(str(vm))
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class CreateImage(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = CreateImageSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
file_entry = etcd_client.get(os.path.join(FILE_PREFIX, data["uuid"]))
|
||||||
|
file_entry_value = json.loads(file_entry.value)
|
||||||
|
|
||||||
|
image_entry_json = {
|
||||||
|
"status": "TO_BE_CREATED",
|
||||||
|
"owner": file_entry_value["owner"],
|
||||||
|
"filename": file_entry_value["filename"],
|
||||||
|
"name": data["name"],
|
||||||
|
"store_name": data["image_store"],
|
||||||
|
"visibility": "public",
|
||||||
|
}
|
||||||
|
etcd_client.put(
|
||||||
|
os.path.join(IMAGE_PREFIX, data["uuid"]), json.dumps(image_entry_json)
|
||||||
|
)
|
||||||
|
|
||||||
|
return {"message": "Image successfully created"}
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListPublicImages(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
images = etcd_client.get_prefix(IMAGE_PREFIX)
|
||||||
|
r = {}
|
||||||
|
for image in images:
|
||||||
|
r[image.key.split("/")[-1]] = json.loads(image.value)
|
||||||
|
return r, 200
|
||||||
|
|
||||||
|
|
||||||
|
class VMAction(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = VmActionSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vm_entry = VM_POOL.get(os.path.join(VM_PREFIX, data["uuid"]))
|
||||||
|
action = data["action"]
|
||||||
|
|
||||||
|
if action == "start":
|
||||||
|
vm_entry.status = VMStatus.requested_start
|
||||||
|
VM_POOL.put(vm_entry)
|
||||||
|
action = "schedule"
|
||||||
|
|
||||||
|
if action == "delete" and vm_entry.hostname == "":
|
||||||
|
try:
|
||||||
|
path_without_protocol = vm_entry.path[vm_entry.path.find(":") + 1 :]
|
||||||
|
|
||||||
|
if WITHOUT_CEPH:
|
||||||
|
command_to_delete = [
|
||||||
|
"rm", "-rf",
|
||||||
|
os.path.join("/var/vm", vm_entry.uuid),
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
command_to_delete = ["rbd", "rm", path_without_protocol]
|
||||||
|
|
||||||
|
subprocess.check_output(command_to_delete, stderr=subprocess.PIPE)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
if "No such file" in e.stderr.decode("utf-8"):
|
||||||
|
etcd_client.client.delete(vm_entry.key)
|
||||||
|
return {"message": "VM successfully deleted"}
|
||||||
|
else:
|
||||||
|
logging.exception(e)
|
||||||
|
return {"message": "Some error occurred while deleting VM"}
|
||||||
|
else:
|
||||||
|
etcd_client.client.delete(vm_entry.key)
|
||||||
|
return {"message": "VM successfully deleted"}
|
||||||
|
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type="{}VM".format(action.title()),
|
||||||
|
uuid=data["uuid"],
|
||||||
|
hostname=vm_entry.hostname,
|
||||||
|
)
|
||||||
|
REQUEST_POOL.put(r)
|
||||||
|
return {"message": "VM {} Queued".format(action.title())}, 200
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class VMMigration(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = VmMigrationSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vm = VM_POOL.get(data["uuid"])
|
||||||
|
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type=RequestType.ScheduleVM,
|
||||||
|
uuid=vm.uuid,
|
||||||
|
destination=os.path.join(HOST_PREFIX, data["destination"]),
|
||||||
|
migration=True,
|
||||||
|
)
|
||||||
|
REQUEST_POOL.put(r)
|
||||||
|
return {"message": "VM Migration Initialization Queued"}, 200
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListUserVM(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = request.json
|
||||||
|
validator = OTPSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
vms = etcd_client.get_prefix(VM_PREFIX, value_in_json=True)
|
||||||
|
return_vms = []
|
||||||
|
user_vms = filter(lambda v: v.value["owner"] == data["name"], vms)
|
||||||
|
for vm in user_vms:
|
||||||
|
return_vms.append(
|
||||||
|
{
|
||||||
|
"name": vm.value["name"],
|
||||||
|
"vm_uuid": vm.key.split("/")[-1],
|
||||||
|
"specs": vm.value["specs"],
|
||||||
|
"status": vm.value["status"],
|
||||||
|
"hostname": vm.value["hostname"],
|
||||||
|
"mac": vm.value["mac"],
|
||||||
|
"vnc_socket": None
|
||||||
|
if vm.value.get("vnc_socket", None) is None
|
||||||
|
else vm.value["vnc_socket"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if return_vms:
|
||||||
|
return {"message": return_vms}, 200
|
||||||
|
return {"message": "No VM found"}, 404
|
||||||
|
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListUserFiles(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = request.json
|
||||||
|
validator = OTPSchema(data)
|
||||||
|
|
||||||
|
if validator.is_valid():
|
||||||
|
files = etcd_client.get_prefix(FILE_PREFIX, value_in_json=True)
|
||||||
|
return_files = []
|
||||||
|
user_files = list(filter(lambda f: f.value["owner"] == data["name"], files))
|
||||||
|
for file in user_files:
|
||||||
|
return_files.append(
|
||||||
|
{
|
||||||
|
"filename": file.value["filename"],
|
||||||
|
"uuid": file.key.split("/")[-1],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return {"message": return_files}, 200
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class CreateHost(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = CreateHostSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
host_key = os.path.join(HOST_PREFIX, uuid4().hex)
|
||||||
|
host_entry = {
|
||||||
|
"specs": data["specs"],
|
||||||
|
"hostname": data["hostname"],
|
||||||
|
"status": "DEAD",
|
||||||
|
"last_heartbeat": "",
|
||||||
|
}
|
||||||
|
etcd_client.put(host_key, host_entry, value_in_json=True)
|
||||||
|
|
||||||
|
return {"message": "Host Created"}, 200
|
||||||
|
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class ListHost(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
hosts = HOST_POOL.hosts
|
||||||
|
r = {
|
||||||
|
host.key: {
|
||||||
|
"status": host.status,
|
||||||
|
"specs": host.specs,
|
||||||
|
"hostname": host.hostname,
|
||||||
|
}
|
||||||
|
for host in hosts
|
||||||
|
}
|
||||||
|
return r, 200
|
||||||
|
|
||||||
|
|
||||||
|
class GetSSHKeys(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = request.json
|
||||||
|
validator = GetSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
if not validator.key_name.value:
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/
|
||||||
|
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"], "key")
|
||||||
|
etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
|
||||||
|
|
||||||
|
keys = {key.key.split("/")[-1]: key.value for key in etcd_entry}
|
||||||
|
return {"keys": keys}
|
||||||
|
else:
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"],
|
||||||
|
"key", data["key_name"])
|
||||||
|
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
||||||
|
|
||||||
|
if etcd_entry:
|
||||||
|
return {"keys": {etcd_entry.key.split("/")[-1]: etcd_entry.value}}
|
||||||
|
else:
|
||||||
|
return {"keys": {}}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class AddSSHKey(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
data = request.json
|
||||||
|
validator = AddSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"],
|
||||||
|
"key", data["key_name"])
|
||||||
|
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
||||||
|
if etcd_entry:
|
||||||
|
return {"message": "Key with name '{}' already exists".format(data["key_name"])}
|
||||||
|
else:
|
||||||
|
# Key Not Found. It implies user' haven't added any key yet.
|
||||||
|
etcd_client.put(etcd_key, data["key"], value_in_json=True)
|
||||||
|
return {"message": "Key added successfully"}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
|
||||||
|
class RemoveSSHKey(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = request.json
|
||||||
|
validator = RemoveSSHSchema(data)
|
||||||
|
if validator.is_valid():
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key/{key_name}
|
||||||
|
etcd_key = os.path.join(USER_PREFIX, data["realm"], data["name"],
|
||||||
|
"key", data["key_name"])
|
||||||
|
etcd_entry = etcd_client.get(etcd_key, value_in_json=True)
|
||||||
|
if etcd_entry:
|
||||||
|
etcd_client.client.delete(etcd_key)
|
||||||
|
return {"message": "Key successfully removed."}
|
||||||
|
else:
|
||||||
|
return {"message": "No Key with name '{}' Exists at all.".format(data["key_name"])}
|
||||||
|
else:
|
||||||
|
return validator.get_errors(), 400
|
||||||
|
|
||||||
|
api.add_resource(CreateVM, "/vm/create")
|
||||||
|
api.add_resource(VmStatus, "/vm/status")
|
||||||
|
|
||||||
|
api.add_resource(VMAction, "/vm/action")
|
||||||
|
api.add_resource(VMMigration, "/vm/migrate")
|
||||||
|
|
||||||
|
api.add_resource(CreateImage, "/image/create")
|
||||||
|
api.add_resource(ListPublicImages, "/image/list-public")
|
||||||
|
|
||||||
|
api.add_resource(ListUserVM, "/user/vms")
|
||||||
|
api.add_resource(ListUserFiles, "/user/files")
|
||||||
|
|
||||||
|
api.add_resource(AddSSHKey, "/user/add-ssh")
|
||||||
|
api.add_resource(RemoveSSHKey, "/user/remove-ssh")
|
||||||
|
api.add_resource(GetSSHKeys, "/user/get-ssh")
|
||||||
|
|
||||||
|
api.add_resource(CreateHost, "/host/create")
|
||||||
|
api.add_resource(ListHost, "/host/list")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
app.run(host="::", debug=True)
|
415
api/schemas.py
Executable file
415
api/schemas.py
Executable file
|
@ -0,0 +1,415 @@
|
||||||
|
"""
|
||||||
|
This module contain classes thats validates and intercept/modify
|
||||||
|
data coming from ucloud-cli (user)
|
||||||
|
|
||||||
|
It was primarily developed as an alternative to argument parser
|
||||||
|
of Flask_Restful which is going to be deprecated. I also tried
|
||||||
|
marshmallow for that purpose but it was an overkill (because it
|
||||||
|
do validation + serialization + deserialization) and little
|
||||||
|
inflexible for our purpose.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: Fix error message when user's mentioned VM (referred by name)
|
||||||
|
# does not exists.
|
||||||
|
#
|
||||||
|
# Currently, it says uuid is a required field.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import bitmath
|
||||||
|
|
||||||
|
from ucloud_common.host import HostPool, HostStatus
|
||||||
|
from ucloud_common.vm import VmPool, VMStatus
|
||||||
|
|
||||||
|
from common_fields import Field, VmUUIDField
|
||||||
|
from helper import check_otp, resolve_vm_name
|
||||||
|
from config import etcd_client as client
|
||||||
|
from config import (HOST_PREFIX, VM_PREFIX, IMAGE_PREFIX,
|
||||||
|
FILE_PREFIX, IMAGE_STORE_PREFIX)
|
||||||
|
|
||||||
|
HOST_POOL = HostPool(client, HOST_PREFIX)
|
||||||
|
VM_POOL = VmPool(client, VM_PREFIX)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseSchema:
|
||||||
|
def __init__(self, data, fields=None):
|
||||||
|
_ = data # suppress linter warning
|
||||||
|
self.__errors = []
|
||||||
|
if fields is None:
|
||||||
|
self.fields = []
|
||||||
|
else:
|
||||||
|
self.fields = fields
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
# custom validation is optional
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_valid(self):
|
||||||
|
for field in self.fields:
|
||||||
|
field.is_valid()
|
||||||
|
self.add_field_errors(field)
|
||||||
|
|
||||||
|
for parent in self.__class__.__bases__:
|
||||||
|
try:
|
||||||
|
parent.validation(self)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
if not self.__errors:
|
||||||
|
self.validation()
|
||||||
|
|
||||||
|
if self.__errors:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_errors(self):
|
||||||
|
return {"message": self.__errors}
|
||||||
|
|
||||||
|
def add_field_errors(self, field: Field):
|
||||||
|
self.__errors += field.get_errors()
|
||||||
|
|
||||||
|
def add_error(self, error):
|
||||||
|
self.__errors.append(error)
|
||||||
|
|
||||||
|
|
||||||
|
class OTPSchema(BaseSchema):
|
||||||
|
def __init__(self, data: dict, fields=None):
|
||||||
|
self.name = Field("name", str, data.get("name", KeyError))
|
||||||
|
self.realm = Field("realm", str, data.get("realm", KeyError))
|
||||||
|
self.token = Field("token", str, data.get("token", KeyError))
|
||||||
|
|
||||||
|
_fields = [self.name, self.realm, self.token]
|
||||||
|
if fields:
|
||||||
|
_fields += fields
|
||||||
|
super().__init__(data=data, fields=_fields)
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
print(self.name.value, self.realm.value, self.token.value)
|
||||||
|
if check_otp(self.name.value, self.realm.value, self.token.value) != 200:
|
||||||
|
self.add_error("Wrong Credentials")
|
||||||
|
|
||||||
|
|
||||||
|
########################## Image Operations ###############################################
|
||||||
|
|
||||||
|
|
||||||
|
class CreateImageSchema(BaseSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
# Fields
|
||||||
|
self.uuid = Field("uuid", str, data.get("uuid", KeyError))
|
||||||
|
self.name = Field("name", str, data.get("name", KeyError))
|
||||||
|
self.image_store = Field("image_store", str, data.get("image_store", KeyError))
|
||||||
|
|
||||||
|
# Validations
|
||||||
|
self.uuid.validation = self.file_uuid_validation
|
||||||
|
self.image_store.validation = self.image_store_name_validation
|
||||||
|
|
||||||
|
# All Fields
|
||||||
|
fields = [self.uuid, self.name, self.image_store]
|
||||||
|
super().__init__(data, fields)
|
||||||
|
|
||||||
|
def file_uuid_validation(self):
|
||||||
|
file_entry = client.get(os.path.join(FILE_PREFIX, self.uuid.value))
|
||||||
|
if file_entry is None:
|
||||||
|
self.add_error(
|
||||||
|
"Image File with uuid '{}' Not Found".format(self.uuid.value)
|
||||||
|
)
|
||||||
|
|
||||||
|
def image_store_name_validation(self):
|
||||||
|
image_stores = list(client.get_prefix(IMAGE_STORE_PREFIX))
|
||||||
|
|
||||||
|
image_store = next(
|
||||||
|
filter(
|
||||||
|
lambda s: json.loads(s.value)["name"] == self.image_store.value,
|
||||||
|
image_stores,
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if not image_store:
|
||||||
|
self.add_error("Store '{}' does not exists".format(self.image_store.value))
|
||||||
|
|
||||||
|
|
||||||
|
# Host Operations
|
||||||
|
|
||||||
|
class CreateHostSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.parsed_specs = {}
|
||||||
|
# Fields
|
||||||
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
|
self.hostname = Field("hostname", str, data.get("hostname", KeyError))
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
self.specs.validation = self.specs_validation
|
||||||
|
|
||||||
|
fields = [self.hostname, self.specs]
|
||||||
|
|
||||||
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
||||||
|
def specs_validation(self):
|
||||||
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
|
_cpu = self.specs.value.get('cpu', KeyError)
|
||||||
|
_ram = self.specs.value.get('ram', KeyError)
|
||||||
|
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
||||||
|
_hdd = self.specs.value.get('hdd', KeyError)
|
||||||
|
|
||||||
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
|
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified RAM is not in correct units")
|
||||||
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified OS-SSD is not in correct units")
|
||||||
|
|
||||||
|
if _cpu < 1:
|
||||||
|
self.add_error("CPU must be atleast 1")
|
||||||
|
|
||||||
|
if parsed_ram < bitmath.GB(1):
|
||||||
|
self.add_error("RAM must be atleast 1 GB")
|
||||||
|
|
||||||
|
if parsed_os_ssd < bitmath.GB(10):
|
||||||
|
self.add_error("OS-SSD must be atleast 10 GB")
|
||||||
|
|
||||||
|
parsed_hdd = []
|
||||||
|
for hdd in _hdd:
|
||||||
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified HDD is not in correct units")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
# TODO: Find some good error message
|
||||||
|
self.add_error("Specs are not correct.")
|
||||||
|
else:
|
||||||
|
if self.get_errors():
|
||||||
|
self.specs = {
|
||||||
|
'cpu': _cpu,
|
||||||
|
'ram': str(parsed_ram),
|
||||||
|
'os-ssd': str(parsed_os_ssd),
|
||||||
|
'hdd': parsed_hdd
|
||||||
|
}
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
if self.realm.value != "ungleich-admin":
|
||||||
|
self.add_error("Invalid Credentials/Insufficient Permission")
|
||||||
|
|
||||||
|
|
||||||
|
# VM Operations
|
||||||
|
|
||||||
|
|
||||||
|
class CreateVMSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.parsed_specs = {}
|
||||||
|
# Fields
|
||||||
|
self.specs = Field("specs", dict, data.get("specs", KeyError))
|
||||||
|
self.vm_name = Field("vm_name", str, data.get("vm_name", KeyError))
|
||||||
|
self.image_uuid = Field("image_uuid", str, data.get("image_uuid", KeyError))
|
||||||
|
|
||||||
|
# Validation
|
||||||
|
self.image_uuid.validation = self.image_uuid_validation
|
||||||
|
self.vm_name.validation = self.vm_name_validation
|
||||||
|
self.specs.validation = self.specs_validation
|
||||||
|
|
||||||
|
fields = [self.vm_name, self.image_uuid, self.specs]
|
||||||
|
|
||||||
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
||||||
|
def image_uuid_validation(self):
|
||||||
|
images = client.get_prefix(IMAGE_PREFIX)
|
||||||
|
|
||||||
|
if self.image_uuid.value not in [i.key.split("/")[-1] for i in images]:
|
||||||
|
self.add_error("Image UUID not valid")
|
||||||
|
|
||||||
|
def vm_name_validation(self):
|
||||||
|
if resolve_vm_name(name=self.vm_name.value, owner=self.name.value):
|
||||||
|
self.add_error(
|
||||||
|
'VM with same name "{}" already exists'.format(self.vm_name.value)
|
||||||
|
)
|
||||||
|
|
||||||
|
def specs_validation(self):
|
||||||
|
ALLOWED_BASE = 10
|
||||||
|
|
||||||
|
_cpu = self.specs.value.get('cpu', KeyError)
|
||||||
|
_ram = self.specs.value.get('ram', KeyError)
|
||||||
|
_os_ssd = self.specs.value.get('os-ssd', KeyError)
|
||||||
|
_hdd = self.specs.value.get('hdd', KeyError)
|
||||||
|
|
||||||
|
if KeyError in [_cpu, _ram, _os_ssd, _hdd]:
|
||||||
|
self.add_error("You must specify CPU, RAM and OS-SSD in your specs")
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
parsed_ram = bitmath.parse_string_unsafe(_ram)
|
||||||
|
parsed_os_ssd = bitmath.parse_string_unsafe(_os_ssd)
|
||||||
|
|
||||||
|
if parsed_ram.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified RAM is not in correct units")
|
||||||
|
if parsed_os_ssd.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified OS-SSD is not in correct units")
|
||||||
|
|
||||||
|
if _cpu < 1:
|
||||||
|
self.add_error("CPU must be atleast 1")
|
||||||
|
|
||||||
|
if parsed_ram < bitmath.GB(1):
|
||||||
|
self.add_error("RAM must be atleast 1 GB")
|
||||||
|
|
||||||
|
if parsed_os_ssd < bitmath.GB(1):
|
||||||
|
self.add_error("OS-SSD must be atleast 1 GB")
|
||||||
|
|
||||||
|
parsed_hdd = []
|
||||||
|
for hdd in _hdd:
|
||||||
|
_parsed_hdd = bitmath.parse_string_unsafe(hdd)
|
||||||
|
if _parsed_hdd.base != ALLOWED_BASE:
|
||||||
|
self.add_error("Your specified HDD is not in correct units")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
parsed_hdd.append(str(_parsed_hdd))
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
# TODO: Find some good error message
|
||||||
|
self.add_error("Specs are not correct.")
|
||||||
|
else:
|
||||||
|
if self.get_errors():
|
||||||
|
self.specs = {
|
||||||
|
'cpu': _cpu,
|
||||||
|
'ram': str(parsed_ram),
|
||||||
|
'os-ssd': str(parsed_os_ssd),
|
||||||
|
'hdd': parsed_hdd
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class VMStatusSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
data["uuid"] = (
|
||||||
|
resolve_vm_name(
|
||||||
|
name=data.get("vm_name", None),
|
||||||
|
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
|
)
|
||||||
|
self.uuid = VmUUIDField(data)
|
||||||
|
|
||||||
|
fields = [self.uuid]
|
||||||
|
|
||||||
|
super().__init__(data, fields)
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
vm = VM_POOL.get(self.uuid.value)
|
||||||
|
if not (
|
||||||
|
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
||||||
|
):
|
||||||
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
|
|
||||||
|
class VmActionSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
data["uuid"] = (
|
||||||
|
resolve_vm_name(
|
||||||
|
name=data.get("vm_name", None),
|
||||||
|
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
|
)
|
||||||
|
self.uuid = VmUUIDField(data)
|
||||||
|
self.action = Field("action", str, data.get("action", KeyError))
|
||||||
|
|
||||||
|
self.action.validation = self.action_validation
|
||||||
|
|
||||||
|
_fields = [self.uuid, self.action]
|
||||||
|
|
||||||
|
super().__init__(data=data, fields=_fields)
|
||||||
|
|
||||||
|
def action_validation(self):
|
||||||
|
allowed_actions = ["start", "stop", "delete"]
|
||||||
|
if self.action.value not in allowed_actions:
|
||||||
|
self.add_error(
|
||||||
|
"Invalid Action. Allowed Actions are {}".format(allowed_actions)
|
||||||
|
)
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
vm = VM_POOL.get(self.uuid.value)
|
||||||
|
if not (
|
||||||
|
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
||||||
|
):
|
||||||
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.action.value == "start"
|
||||||
|
and vm.status == VMStatus.running
|
||||||
|
and vm.hostname != ""
|
||||||
|
):
|
||||||
|
self.add_error("VM Already Running")
|
||||||
|
|
||||||
|
if self.action.value == "stop":
|
||||||
|
if vm.status == VMStatus.stopped:
|
||||||
|
self.add_error("VM Already Stopped")
|
||||||
|
elif vm.status != VMStatus.running:
|
||||||
|
self.add_error("Cannot stop non-running VM")
|
||||||
|
|
||||||
|
|
||||||
|
class VmMigrationSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
data["uuid"] = (
|
||||||
|
resolve_vm_name(
|
||||||
|
name=data.get("vm_name", None),
|
||||||
|
owner=(data.get("in_support_of", None) or data.get("name", None)),
|
||||||
|
)
|
||||||
|
or KeyError
|
||||||
|
)
|
||||||
|
|
||||||
|
self.uuid = VmUUIDField(data)
|
||||||
|
self.destination = Field("destination", str, data.get("destination", KeyError))
|
||||||
|
|
||||||
|
self.destination.validation = self.destination_validation
|
||||||
|
|
||||||
|
fields = [self.destination]
|
||||||
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
||||||
|
def destination_validation(self):
|
||||||
|
host_key = self.destination.value
|
||||||
|
host = HOST_POOL.get(host_key)
|
||||||
|
if not host:
|
||||||
|
self.add_error("No Such Host ({}) exists".format(self.destination.value))
|
||||||
|
elif host.status != HostStatus.alive:
|
||||||
|
self.add_error("Destination Host is dead")
|
||||||
|
|
||||||
|
def validation(self):
|
||||||
|
vm = VM_POOL.get(self.uuid.value)
|
||||||
|
if not (
|
||||||
|
vm.value["owner"] == self.name.value or self.realm.value == "ungleich-admin"
|
||||||
|
):
|
||||||
|
self.add_error("Invalid User")
|
||||||
|
|
||||||
|
if vm.status != VMStatus.running:
|
||||||
|
self.add_error("Can't migrate non-running VM")
|
||||||
|
|
||||||
|
if vm.hostname == os.path.join(HOST_PREFIX, self.destination.value):
|
||||||
|
self.add_error("Destination host couldn't be same as Source Host")
|
||||||
|
|
||||||
|
|
||||||
|
class AddSSHSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
||||||
|
self.key = Field("key", str, data.get("key_name", KeyError))
|
||||||
|
|
||||||
|
fields = [self.key_name, self.key]
|
||||||
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
||||||
|
|
||||||
|
class RemoveSSHSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.key_name = Field("key_name", str, data.get("key_name", KeyError))
|
||||||
|
|
||||||
|
fields = [self.key_name]
|
||||||
|
super().__init__(data=data, fields=fields)
|
||||||
|
|
||||||
|
|
||||||
|
class GetSSHSchema(OTPSchema):
|
||||||
|
def __init__(self, data):
|
||||||
|
self.key_name = Field("key_name", str, data.get("key_name", None))
|
||||||
|
|
||||||
|
fields = [self.key_name]
|
||||||
|
super().__init__(data=data, fields=fields)
|
109
filescanner/main.py
Executable file
109
filescanner/main.py
Executable file
|
@ -0,0 +1,109 @@
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import pathlib
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
import subprocess as sp
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def getxattr(file, attr):
|
||||||
|
try:
|
||||||
|
attr = "user." + attr
|
||||||
|
value = sp.check_output(['getfattr', file,
|
||||||
|
'--name', attr,
|
||||||
|
'--only-values',
|
||||||
|
'--absolute-names'])
|
||||||
|
value = value.decode("utf-8")
|
||||||
|
except sp.CalledProcessError:
|
||||||
|
value = None
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def setxattr(file, attr, value):
|
||||||
|
attr = "user." + attr
|
||||||
|
sp.check_output(['setfattr', file,
|
||||||
|
'--name', attr,
|
||||||
|
'--value', str(value)])
|
||||||
|
|
||||||
|
|
||||||
|
def sha512sum(filename):
|
||||||
|
_sum = hashlib.sha512()
|
||||||
|
buffer_size = 2**16
|
||||||
|
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
while True:
|
||||||
|
data = f.read(buffer_size)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
_sum.update(data)
|
||||||
|
|
||||||
|
return _sum.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
sp.check_output(['which', 'getfattr'])
|
||||||
|
sp.check_output(['which', 'setfattr'])
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
print('Make sure you have getfattr and setfattr available')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
BASE_DIR = config("BASE_DIR")
|
||||||
|
|
||||||
|
FILE_PREFIX = config("FILE_PREFIX")
|
||||||
|
|
||||||
|
etcd_client = Etcd3Wrapper(host=config("ETCD_URL"))
|
||||||
|
|
||||||
|
# Recursively Get All Files and Folder below BASE_DIR
|
||||||
|
files = glob.glob("{}/**".format(BASE_DIR), recursive=True)
|
||||||
|
|
||||||
|
# Retain only Files
|
||||||
|
files = list(filter(os.path.isfile, files))
|
||||||
|
|
||||||
|
untracked_files = list(
|
||||||
|
filter(lambda f: not bool(getxattr(f, "user.utracked")), files)
|
||||||
|
)
|
||||||
|
|
||||||
|
tracked_files = list(
|
||||||
|
filter(lambda f: f not in untracked_files, files)
|
||||||
|
)
|
||||||
|
for file in untracked_files:
|
||||||
|
file_id = uuid4()
|
||||||
|
|
||||||
|
# Get Username
|
||||||
|
owner = pathlib.Path(file).parts[3]
|
||||||
|
# Get Creation Date of File
|
||||||
|
# Here, we are assuming that ctime is creation time
|
||||||
|
# which is mostly not true.
|
||||||
|
creation_date = time.ctime(os.stat(file).st_ctime)
|
||||||
|
|
||||||
|
# Get File Size
|
||||||
|
size = os.path.getsize(file)
|
||||||
|
|
||||||
|
# Compute sha512 sum
|
||||||
|
sha_sum = sha512sum(file)
|
||||||
|
|
||||||
|
# File Path excluding base and username
|
||||||
|
file_path = pathlib.Path(file).parts[4:]
|
||||||
|
file_path = os.path.join(*file_path)
|
||||||
|
|
||||||
|
# Create Entry
|
||||||
|
entry_key = os.path.join(FILE_PREFIX, str(file_id))
|
||||||
|
entry_value = {
|
||||||
|
"filename": file_path,
|
||||||
|
"owner": owner,
|
||||||
|
"sha512sum": sha_sum,
|
||||||
|
"creation_date": creation_date,
|
||||||
|
"size": size
|
||||||
|
}
|
||||||
|
|
||||||
|
print("Tracking {}".format(file))
|
||||||
|
# Insert Entry
|
||||||
|
etcd_client.put(entry_key, entry_value, value_in_json=True)
|
||||||
|
setxattr(file, "user.utracked", True)
|
32
host/config.py
Executable file
32
host/config.py
Executable file
|
@ -0,0 +1,32 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from ucloud_common.vm import VmPool
|
||||||
|
from ucloud_common.host import HostPool
|
||||||
|
from ucloud_common.request import RequestPool
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
WITHOUT_CEPH = config("WITHOUT_CEPH", False, cast=bool)
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="log.txt",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s: %(levelname)s - %(message)s",
|
||||||
|
datefmt="%d-%b-%y %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
etcd_client = Etcd3Wrapper(host=config("ETCD_URL"))
|
||||||
|
|
||||||
|
HOST_PREFIX = config("HOST_PREFIX")
|
||||||
|
VM_PREFIX = config("VM_PREFIX")
|
||||||
|
REQUEST_PREFIX = config("REQUEST_PREFIX")
|
||||||
|
VM_DIR = config("VM_DIR")
|
||||||
|
IMAGE_DIR = config("IMAGE_DIR")
|
||||||
|
|
||||||
|
|
||||||
|
host_pool = HostPool(etcd_client, HOST_PREFIX)
|
||||||
|
vm_pool = VmPool(etcd_client, VM_PREFIX)
|
||||||
|
request_pool = RequestPool(etcd_client, REQUEST_PREFIX)
|
||||||
|
|
||||||
|
running_vms = []
|
137
host/main.py
Executable file
137
host/main.py
Executable file
|
@ -0,0 +1,137 @@
|
||||||
|
import argparse
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import virtualmachine
|
||||||
|
|
||||||
|
from ucloud_common.host import HostEntry
|
||||||
|
from ucloud_common.request import RequestEntry, RequestType
|
||||||
|
|
||||||
|
from config import (vm_pool, host_pool, request_pool,
|
||||||
|
etcd_client, logging, running_vms,
|
||||||
|
REQUEST_PREFIX, WITHOUT_CEPH)
|
||||||
|
|
||||||
|
|
||||||
|
def update_heartbeat(host: HostEntry):
|
||||||
|
while True:
|
||||||
|
host.update_heartbeat()
|
||||||
|
host_pool.put(host)
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
logging.info("Updated last heartbeat time %s", host.last_heartbeat)
|
||||||
|
|
||||||
|
|
||||||
|
def maintenance(host):
|
||||||
|
# To capture vm running according to running_vms list
|
||||||
|
|
||||||
|
# This is to capture successful migration of a VM.
|
||||||
|
# Suppose, this host is running "vm1" and user initiated
|
||||||
|
# request to migrate this "vm1" to some other host. On,
|
||||||
|
# successful migration the destination host would set
|
||||||
|
# the vm hostname to itself. Thus, we are checking
|
||||||
|
# whether this host vm is successfully migrated. If yes
|
||||||
|
# then we shutdown "vm1" on this host.
|
||||||
|
|
||||||
|
for running_vm in running_vms:
|
||||||
|
with vm_pool.get_put(running_vm.key) as vm_entry:
|
||||||
|
if vm_entry.hostname != host.key and not vm_entry.in_migration:
|
||||||
|
running_vm.handle.shutdown()
|
||||||
|
vm_entry.add_log("VM on source host shutdown.")
|
||||||
|
# To check vm running according to etcd entries
|
||||||
|
alleged_running_vms = vm_pool.by_status("RUNNING", vm_pool.by_host(host.key))
|
||||||
|
|
||||||
|
for vm_entry in alleged_running_vms:
|
||||||
|
_vm = virtualmachine.get_vm(running_vms, vm_entry.key)
|
||||||
|
|
||||||
|
# Whether, the allegedly running vm is in our
|
||||||
|
# running_vms list or not if it is said to be
|
||||||
|
# running on this host but it is not then we
|
||||||
|
# need to shut it down
|
||||||
|
|
||||||
|
# This is to capture poweroff/shutdown of a VM
|
||||||
|
# initiated by user inside VM. OR crash of VM by some
|
||||||
|
# user running process
|
||||||
|
if (_vm and not _vm.handle.is_running()) or not _vm:
|
||||||
|
vm_entry.add_log("""{} is not running but is said to be running.
|
||||||
|
So, shutting it down and declare it killed""".format(vm_entry.key))
|
||||||
|
vm_entry.declare_killed()
|
||||||
|
vm_pool.put(vm_entry)
|
||||||
|
if _vm:
|
||||||
|
running_vms.remove(_vm)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argparser = argparse.ArgumentParser()
|
||||||
|
argparser.add_argument("hostname", help="Name of this host. e.g /v1/host/1")
|
||||||
|
args = argparser.parse_args()
|
||||||
|
|
||||||
|
host = host_pool.get(args.hostname)
|
||||||
|
if not host:
|
||||||
|
print("No Such Host")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
if WITHOUT_CEPH and not os.path.isdir("/var/vm"):
|
||||||
|
print("You have set WITHOUT_CEPH to True. So, the /var/vm must exists. But, it don't")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
logging.info("%s Session Started %s", '*' * 5, '*' * 5)
|
||||||
|
|
||||||
|
# It is seen that under heavy load, timeout event doesn't come
|
||||||
|
# in a predictive manner (which is intentional because we give
|
||||||
|
# higher priority to customer's requests) which delays heart
|
||||||
|
# beat update which in turn misunderstood by scheduler that the
|
||||||
|
# host is dead when it is actually alive. So, to ensure that we
|
||||||
|
# update the heart beat in a predictive manner we start Heart
|
||||||
|
# beat updating mechanism in separated thread
|
||||||
|
|
||||||
|
heartbeat_updating_thread = threading.Thread(target=update_heartbeat, args=(host,))
|
||||||
|
try:
|
||||||
|
heartbeat_updating_thread.start()
|
||||||
|
except Exception as e:
|
||||||
|
logging.info("No Need To Go Further. Our heartbeat updating mechanism is not working")
|
||||||
|
logging.exception(e)
|
||||||
|
exit(-1)
|
||||||
|
|
||||||
|
for events_iterator in [
|
||||||
|
etcd_client.get_prefix(REQUEST_PREFIX, value_in_json=True),
|
||||||
|
etcd_client.watch_prefix(REQUEST_PREFIX, timeout=10, value_in_json=True),
|
||||||
|
]:
|
||||||
|
for request_event in events_iterator:
|
||||||
|
request_event = RequestEntry(request_event)
|
||||||
|
|
||||||
|
if request_event.type == "TIMEOUT":
|
||||||
|
logging.info("Timeout Event")
|
||||||
|
maintenance(host)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If the event is directed toward me OR I am destination of a InitVMMigration
|
||||||
|
if (request_event.hostname == host.key or request_event.destination == host.key):
|
||||||
|
logging.debug("EVENT: %s", request_event)
|
||||||
|
|
||||||
|
request_pool.client.client.delete(request_event.key)
|
||||||
|
vm_entry = vm_pool.get(request_event.uuid)
|
||||||
|
|
||||||
|
if vm_entry:
|
||||||
|
if request_event.type == RequestType.StartVM:
|
||||||
|
virtualmachine.start(vm_entry)
|
||||||
|
|
||||||
|
elif request_event.type == RequestType.StopVM:
|
||||||
|
virtualmachine.stop(vm_entry)
|
||||||
|
|
||||||
|
elif request_event.type == RequestType.DeleteVM:
|
||||||
|
virtualmachine.delete(vm_entry)
|
||||||
|
|
||||||
|
elif request_event.type == RequestType.InitVMMigration:
|
||||||
|
virtualmachine.init_migration(vm_entry, host.key)
|
||||||
|
|
||||||
|
elif request_event.type == RequestType.TransferVM:
|
||||||
|
virtualmachine.transfer(request_event)
|
||||||
|
else:
|
||||||
|
logging.info("VM Entry missing")
|
||||||
|
|
||||||
|
logging.info("Running VMs %s", running_vms)
|
||||||
|
|
||||||
|
|
||||||
|
main()
|
534
host/qmp/__init__.py
Executable file
534
host/qmp/__init__.py
Executable file
|
@ -0,0 +1,534 @@
|
||||||
|
# QEMU library
|
||||||
|
#
|
||||||
|
# Copyright (C) 2015-2016 Red Hat Inc.
|
||||||
|
# Copyright (C) 2012 IBM Corp.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Fam Zheng <famz@redhat.com>
|
||||||
|
#
|
||||||
|
# This work is licensed under the terms of the GNU GPL, version 2. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
#
|
||||||
|
# Based on qmp.py.
|
||||||
|
#
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import socket
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from . import qmp
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Mapping host architecture to any additional architectures it can
|
||||||
|
# support which often includes its 32 bit cousin.
|
||||||
|
ADDITIONAL_ARCHES = {
|
||||||
|
"x86_64" : "i386",
|
||||||
|
"aarch64" : "armhf"
|
||||||
|
}
|
||||||
|
|
||||||
|
def kvm_available(target_arch=None):
|
||||||
|
host_arch = os.uname()[4]
|
||||||
|
if target_arch and target_arch != host_arch:
|
||||||
|
if target_arch != ADDITIONAL_ARCHES.get(host_arch):
|
||||||
|
return False
|
||||||
|
return os.access("/dev/kvm", os.R_OK | os.W_OK)
|
||||||
|
|
||||||
|
|
||||||
|
class QEMUMachineError(Exception):
|
||||||
|
"""
|
||||||
|
Exception called when an error in QEMUMachine happens.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class QEMUMachineAddDeviceError(QEMUMachineError):
|
||||||
|
"""
|
||||||
|
Exception raised when a request to add a device can not be fulfilled
|
||||||
|
|
||||||
|
The failures are caused by limitations, lack of information or conflicting
|
||||||
|
requests on the QEMUMachine methods. This exception does not represent
|
||||||
|
failures reported by the QEMU binary itself.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class MonitorResponseError(qmp.QMPError):
|
||||||
|
"""
|
||||||
|
Represents erroneous QMP monitor reply
|
||||||
|
"""
|
||||||
|
def __init__(self, reply):
|
||||||
|
try:
|
||||||
|
desc = reply["error"]["desc"]
|
||||||
|
except KeyError:
|
||||||
|
desc = reply
|
||||||
|
super(MonitorResponseError, self).__init__(desc)
|
||||||
|
self.reply = reply
|
||||||
|
|
||||||
|
|
||||||
|
class QEMUMachine(object):
|
||||||
|
"""
|
||||||
|
A QEMU VM
|
||||||
|
|
||||||
|
Use this object as a context manager to ensure the QEMU process terminates::
|
||||||
|
|
||||||
|
with VM(binary) as vm:
|
||||||
|
...
|
||||||
|
# vm is guaranteed to be shut down here
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, binary, args=None, wrapper=None, name=None,
|
||||||
|
test_dir="/var/tmp", monitor_address=None,
|
||||||
|
socket_scm_helper=None):
|
||||||
|
'''
|
||||||
|
Initialize a QEMUMachine
|
||||||
|
|
||||||
|
@param binary: path to the qemu binary
|
||||||
|
@param args: list of extra arguments
|
||||||
|
@param wrapper: list of arguments used as prefix to qemu binary
|
||||||
|
@param name: prefix for socket and log file names (default: qemu-PID)
|
||||||
|
@param test_dir: where to create socket and log file
|
||||||
|
@param monitor_address: address for QMP monitor
|
||||||
|
@param socket_scm_helper: helper program, required for send_fd_scm()
|
||||||
|
@note: Qemu process is not started until launch() is used.
|
||||||
|
'''
|
||||||
|
if args is None:
|
||||||
|
args = []
|
||||||
|
if wrapper is None:
|
||||||
|
wrapper = []
|
||||||
|
if name is None:
|
||||||
|
name = "qemu-%d" % os.getpid()
|
||||||
|
self._name = name
|
||||||
|
self._monitor_address = monitor_address
|
||||||
|
self._vm_monitor = None
|
||||||
|
self._qemu_log_path = None
|
||||||
|
self._qemu_log_file = None
|
||||||
|
self._popen = None
|
||||||
|
self._binary = binary
|
||||||
|
self._args = list(args) # Force copy args in case we modify them
|
||||||
|
self._wrapper = wrapper
|
||||||
|
self._events = []
|
||||||
|
self._iolog = None
|
||||||
|
self._socket_scm_helper = socket_scm_helper
|
||||||
|
self._qmp = None
|
||||||
|
self._qemu_full_args = None
|
||||||
|
self._test_dir = test_dir
|
||||||
|
self._temp_dir = None
|
||||||
|
self._launched = False
|
||||||
|
self._machine = None
|
||||||
|
self._console_set = False
|
||||||
|
self._console_device_type = None
|
||||||
|
self._console_address = None
|
||||||
|
self._console_socket = None
|
||||||
|
|
||||||
|
# just in case logging wasn't configured by the main script:
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.shutdown()
|
||||||
|
return False
|
||||||
|
|
||||||
|
# This can be used to add an unused monitor instance.
|
||||||
|
def add_monitor_null(self):
|
||||||
|
self._args.append('-monitor')
|
||||||
|
self._args.append('null')
|
||||||
|
|
||||||
|
def add_fd(self, fd, fdset, opaque, opts=''):
|
||||||
|
"""
|
||||||
|
Pass a file descriptor to the VM
|
||||||
|
"""
|
||||||
|
options = ['fd=%d' % fd,
|
||||||
|
'set=%d' % fdset,
|
||||||
|
'opaque=%s' % opaque]
|
||||||
|
if opts:
|
||||||
|
options.append(opts)
|
||||||
|
|
||||||
|
# This did not exist before 3.4, but since then it is
|
||||||
|
# mandatory for our purpose
|
||||||
|
if hasattr(os, 'set_inheritable'):
|
||||||
|
os.set_inheritable(fd, True)
|
||||||
|
|
||||||
|
self._args.append('-add-fd')
|
||||||
|
self._args.append(','.join(options))
|
||||||
|
return self
|
||||||
|
|
||||||
|
# Exactly one of fd and file_path must be given.
|
||||||
|
# (If it is file_path, the helper will open that file and pass its
|
||||||
|
# own fd)
|
||||||
|
def send_fd_scm(self, fd=None, file_path=None):
|
||||||
|
# In iotest.py, the qmp should always use unix socket.
|
||||||
|
assert self._qmp.is_scm_available()
|
||||||
|
if self._socket_scm_helper is None:
|
||||||
|
raise QEMUMachineError("No path to socket_scm_helper set")
|
||||||
|
if not os.path.exists(self._socket_scm_helper):
|
||||||
|
raise QEMUMachineError("%s does not exist" %
|
||||||
|
self._socket_scm_helper)
|
||||||
|
|
||||||
|
# This did not exist before 3.4, but since then it is
|
||||||
|
# mandatory for our purpose
|
||||||
|
if hasattr(os, 'set_inheritable'):
|
||||||
|
os.set_inheritable(self._qmp.get_sock_fd(), True)
|
||||||
|
if fd is not None:
|
||||||
|
os.set_inheritable(fd, True)
|
||||||
|
|
||||||
|
fd_param = ["%s" % self._socket_scm_helper,
|
||||||
|
"%d" % self._qmp.get_sock_fd()]
|
||||||
|
|
||||||
|
if file_path is not None:
|
||||||
|
assert fd is None
|
||||||
|
fd_param.append(file_path)
|
||||||
|
else:
|
||||||
|
assert fd is not None
|
||||||
|
fd_param.append(str(fd))
|
||||||
|
|
||||||
|
devnull = open(os.path.devnull, 'rb')
|
||||||
|
proc = subprocess.Popen(fd_param, stdin=devnull, stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT, close_fds=False)
|
||||||
|
output = proc.communicate()[0]
|
||||||
|
if output:
|
||||||
|
LOG.debug(output)
|
||||||
|
|
||||||
|
return proc.returncode
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _remove_if_exists(path):
|
||||||
|
"""
|
||||||
|
Remove file object at path if it exists
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
os.remove(path)
|
||||||
|
except OSError as exception:
|
||||||
|
if exception.errno == errno.ENOENT:
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
|
||||||
|
def is_running(self):
|
||||||
|
return self._popen is not None and self._popen.poll() is None
|
||||||
|
|
||||||
|
def exitcode(self):
|
||||||
|
if self._popen is None:
|
||||||
|
return None
|
||||||
|
return self._popen.poll()
|
||||||
|
|
||||||
|
def get_pid(self):
|
||||||
|
if not self.is_running():
|
||||||
|
return None
|
||||||
|
return self._popen.pid
|
||||||
|
|
||||||
|
def _load_io_log(self):
|
||||||
|
if self._qemu_log_path is not None:
|
||||||
|
with open(self._qemu_log_path, "r") as iolog:
|
||||||
|
self._iolog = iolog.read()
|
||||||
|
|
||||||
|
def _base_args(self):
|
||||||
|
if isinstance(self._monitor_address, tuple):
|
||||||
|
moncdev = "socket,id=mon,host=%s,port=%s" % (
|
||||||
|
self._monitor_address[0],
|
||||||
|
self._monitor_address[1])
|
||||||
|
else:
|
||||||
|
moncdev = 'socket,id=mon,path=%s' % self._vm_monitor
|
||||||
|
args = ['-chardev', moncdev,
|
||||||
|
'-mon', 'chardev=mon,mode=control']
|
||||||
|
if self._machine is not None:
|
||||||
|
args.extend(['-machine', self._machine])
|
||||||
|
if self._console_set:
|
||||||
|
self._console_address = os.path.join(self._temp_dir,
|
||||||
|
self._name + "-console.sock")
|
||||||
|
chardev = ('socket,id=console,path=%s,server,nowait' %
|
||||||
|
self._console_address)
|
||||||
|
args.extend(['-chardev', chardev])
|
||||||
|
if self._console_device_type is None:
|
||||||
|
args.extend(['-serial', 'chardev:console'])
|
||||||
|
else:
|
||||||
|
device = '%s,chardev=console' % self._console_device_type
|
||||||
|
args.extend(['-device', device])
|
||||||
|
return args
|
||||||
|
|
||||||
|
def _pre_launch(self):
|
||||||
|
self._temp_dir = tempfile.mkdtemp(dir=self._test_dir)
|
||||||
|
if self._monitor_address is not None:
|
||||||
|
self._vm_monitor = self._monitor_address
|
||||||
|
else:
|
||||||
|
self._vm_monitor = os.path.join(self._temp_dir,
|
||||||
|
self._name + "-monitor.sock")
|
||||||
|
self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
|
||||||
|
self._qemu_log_file = open(self._qemu_log_path, 'wb')
|
||||||
|
|
||||||
|
self._qmp = qmp.QEMUMonitorProtocol(self._vm_monitor,
|
||||||
|
server=True)
|
||||||
|
|
||||||
|
def _post_launch(self):
|
||||||
|
self._qmp.accept()
|
||||||
|
|
||||||
|
def _post_shutdown(self):
|
||||||
|
if self._qemu_log_file is not None:
|
||||||
|
self._qemu_log_file.close()
|
||||||
|
self._qemu_log_file = None
|
||||||
|
|
||||||
|
self._qemu_log_path = None
|
||||||
|
|
||||||
|
if self._console_socket is not None:
|
||||||
|
self._console_socket.close()
|
||||||
|
self._console_socket = None
|
||||||
|
|
||||||
|
if self._temp_dir is not None:
|
||||||
|
shutil.rmtree(self._temp_dir)
|
||||||
|
self._temp_dir = None
|
||||||
|
|
||||||
|
def launch(self):
|
||||||
|
"""
|
||||||
|
Launch the VM and make sure we cleanup and expose the
|
||||||
|
command line/output in case of exception
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self._launched:
|
||||||
|
raise QEMUMachineError('VM already launched')
|
||||||
|
|
||||||
|
self._iolog = None
|
||||||
|
self._qemu_full_args = None
|
||||||
|
try:
|
||||||
|
self._launch()
|
||||||
|
self._launched = True
|
||||||
|
except:
|
||||||
|
self.shutdown()
|
||||||
|
|
||||||
|
LOG.debug('Error launching VM')
|
||||||
|
if self._qemu_full_args:
|
||||||
|
LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
|
||||||
|
if self._iolog:
|
||||||
|
LOG.debug('Output: %r', self._iolog)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _launch(self):
|
||||||
|
"""
|
||||||
|
Launch the VM and establish a QMP connection
|
||||||
|
"""
|
||||||
|
devnull = open(os.path.devnull, 'rb')
|
||||||
|
self._pre_launch()
|
||||||
|
self._qemu_full_args = (self._wrapper + [self._binary] +
|
||||||
|
self._base_args() + self._args)
|
||||||
|
LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
|
||||||
|
self._popen = subprocess.Popen(self._qemu_full_args,
|
||||||
|
stdin=devnull,
|
||||||
|
stdout=self._qemu_log_file,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
shell=False,
|
||||||
|
close_fds=False)
|
||||||
|
self._post_launch()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
"""
|
||||||
|
Wait for the VM to power off
|
||||||
|
"""
|
||||||
|
self._popen.wait()
|
||||||
|
self._qmp.close()
|
||||||
|
self._load_io_log()
|
||||||
|
self._post_shutdown()
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
"""
|
||||||
|
Terminate the VM and clean up
|
||||||
|
"""
|
||||||
|
if self.is_running():
|
||||||
|
try:
|
||||||
|
self._qmp.cmd('quit')
|
||||||
|
self._qmp.close()
|
||||||
|
except:
|
||||||
|
self._popen.kill()
|
||||||
|
self._popen.wait()
|
||||||
|
|
||||||
|
self._load_io_log()
|
||||||
|
self._post_shutdown()
|
||||||
|
|
||||||
|
exitcode = self.exitcode()
|
||||||
|
if exitcode is not None and exitcode < 0:
|
||||||
|
msg = 'qemu received signal %i: %s'
|
||||||
|
if self._qemu_full_args:
|
||||||
|
command = ' '.join(self._qemu_full_args)
|
||||||
|
else:
|
||||||
|
command = ''
|
||||||
|
LOG.warn(msg, -exitcode, command)
|
||||||
|
|
||||||
|
self._launched = False
|
||||||
|
|
||||||
|
def qmp(self, cmd, conv_keys=True, **args):
|
||||||
|
"""
|
||||||
|
Invoke a QMP command and return the response dict
|
||||||
|
"""
|
||||||
|
qmp_args = dict()
|
||||||
|
for key, value in args.items():
|
||||||
|
if conv_keys:
|
||||||
|
qmp_args[key.replace('_', '-')] = value
|
||||||
|
else:
|
||||||
|
qmp_args[key] = value
|
||||||
|
|
||||||
|
return self._qmp.cmd(cmd, args=qmp_args)
|
||||||
|
|
||||||
|
def command(self, cmd, conv_keys=True, **args):
|
||||||
|
"""
|
||||||
|
Invoke a QMP command.
|
||||||
|
On success return the response dict.
|
||||||
|
On failure raise an exception.
|
||||||
|
"""
|
||||||
|
reply = self.qmp(cmd, conv_keys, **args)
|
||||||
|
if reply is None:
|
||||||
|
raise qmp.QMPError("Monitor is closed")
|
||||||
|
if "error" in reply:
|
||||||
|
raise MonitorResponseError(reply)
|
||||||
|
return reply["return"]
|
||||||
|
|
||||||
|
def get_qmp_event(self, wait=False):
|
||||||
|
"""
|
||||||
|
Poll for one queued QMP events and return it
|
||||||
|
"""
|
||||||
|
if len(self._events) > 0:
|
||||||
|
return self._events.pop(0)
|
||||||
|
return self._qmp.pull_event(wait=wait)
|
||||||
|
|
||||||
|
def get_qmp_events(self, wait=False):
|
||||||
|
"""
|
||||||
|
Poll for queued QMP events and return a list of dicts
|
||||||
|
"""
|
||||||
|
events = self._qmp.get_events(wait=wait)
|
||||||
|
events.extend(self._events)
|
||||||
|
del self._events[:]
|
||||||
|
self._qmp.clear_events()
|
||||||
|
return events
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def event_match(event, match=None):
|
||||||
|
"""
|
||||||
|
Check if an event matches optional match criteria.
|
||||||
|
|
||||||
|
The match criteria takes the form of a matching subdict. The event is
|
||||||
|
checked to be a superset of the subdict, recursively, with matching
|
||||||
|
values whenever the subdict values are not None.
|
||||||
|
|
||||||
|
This has a limitation that you cannot explicitly check for None values.
|
||||||
|
|
||||||
|
Examples, with the subdict queries on the left:
|
||||||
|
- None matches any object.
|
||||||
|
- {"foo": None} matches {"foo": {"bar": 1}}
|
||||||
|
- {"foo": None} matches {"foo": 5}
|
||||||
|
- {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
|
||||||
|
- {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
|
||||||
|
"""
|
||||||
|
if match is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
try:
|
||||||
|
for key in match:
|
||||||
|
if key in event:
|
||||||
|
if not QEMUMachine.event_match(event[key], match[key]):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
except TypeError:
|
||||||
|
# either match or event wasn't iterable (not a dict)
|
||||||
|
return match == event
|
||||||
|
|
||||||
|
def event_wait(self, name, timeout=60.0, match=None):
|
||||||
|
"""
|
||||||
|
event_wait waits for and returns a named event from QMP with a timeout.
|
||||||
|
|
||||||
|
name: The event to wait for.
|
||||||
|
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
|
||||||
|
match: Optional match criteria. See event_match for details.
|
||||||
|
"""
|
||||||
|
return self.events_wait([(name, match)], timeout)
|
||||||
|
|
||||||
|
def events_wait(self, events, timeout=60.0):
|
||||||
|
"""
|
||||||
|
events_wait waits for and returns a named event from QMP with a timeout.
|
||||||
|
|
||||||
|
events: a sequence of (name, match_criteria) tuples.
|
||||||
|
The match criteria are optional and may be None.
|
||||||
|
See event_match for details.
|
||||||
|
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
|
||||||
|
"""
|
||||||
|
def _match(event):
|
||||||
|
for name, match in events:
|
||||||
|
if (event['event'] == name and
|
||||||
|
self.event_match(event, match)):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Search cached events
|
||||||
|
for event in self._events:
|
||||||
|
if _match(event):
|
||||||
|
self._events.remove(event)
|
||||||
|
return event
|
||||||
|
|
||||||
|
# Poll for new events
|
||||||
|
while True:
|
||||||
|
event = self._qmp.pull_event(wait=timeout)
|
||||||
|
if _match(event):
|
||||||
|
return event
|
||||||
|
self._events.append(event)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_log(self):
|
||||||
|
"""
|
||||||
|
After self.shutdown or failed qemu execution, this returns the output
|
||||||
|
of the qemu process.
|
||||||
|
"""
|
||||||
|
return self._iolog
|
||||||
|
|
||||||
|
def add_args(self, *args):
|
||||||
|
"""
|
||||||
|
Adds to the list of extra arguments to be given to the QEMU binary
|
||||||
|
"""
|
||||||
|
self._args.extend(args)
|
||||||
|
|
||||||
|
def set_machine(self, machine_type):
|
||||||
|
"""
|
||||||
|
Sets the machine type
|
||||||
|
|
||||||
|
If set, the machine type will be added to the base arguments
|
||||||
|
of the resulting QEMU command line.
|
||||||
|
"""
|
||||||
|
self._machine = machine_type
|
||||||
|
|
||||||
|
def set_console(self, device_type=None):
|
||||||
|
"""
|
||||||
|
Sets the device type for a console device
|
||||||
|
|
||||||
|
If set, the console device and a backing character device will
|
||||||
|
be added to the base arguments of the resulting QEMU command
|
||||||
|
line.
|
||||||
|
|
||||||
|
This is a convenience method that will either use the provided
|
||||||
|
device type, or default to a "-serial chardev:console" command
|
||||||
|
line argument.
|
||||||
|
|
||||||
|
The actual setting of command line arguments will be be done at
|
||||||
|
machine launch time, as it depends on the temporary directory
|
||||||
|
to be created.
|
||||||
|
|
||||||
|
@param device_type: the device type, such as "isa-serial". If
|
||||||
|
None is given (the default value) a "-serial
|
||||||
|
chardev:console" command line argument will
|
||||||
|
be used instead, resorting to the machine's
|
||||||
|
default device type.
|
||||||
|
"""
|
||||||
|
self._console_set = True
|
||||||
|
self._console_device_type = device_type
|
||||||
|
|
||||||
|
@property
|
||||||
|
def console_socket(self):
|
||||||
|
"""
|
||||||
|
Returns a socket connected to the console
|
||||||
|
"""
|
||||||
|
if self._console_socket is None:
|
||||||
|
self._console_socket = socket.socket(socket.AF_UNIX,
|
||||||
|
socket.SOCK_STREAM)
|
||||||
|
self._console_socket.connect(self._console_address)
|
||||||
|
return self._console_socket
|
BIN
host/qmp/__pycache__/__init__.cpython-37.pyc
Executable file
BIN
host/qmp/__pycache__/__init__.cpython-37.pyc
Executable file
Binary file not shown.
BIN
host/qmp/__pycache__/qmp.cpython-37.pyc
Executable file
BIN
host/qmp/__pycache__/qmp.cpython-37.pyc
Executable file
Binary file not shown.
256
host/qmp/qmp.py
Executable file
256
host/qmp/qmp.py
Executable file
|
@ -0,0 +1,256 @@
|
||||||
|
# QEMU Monitor Protocol Python class
|
||||||
|
#
|
||||||
|
# Copyright (C) 2009, 2010 Red Hat Inc.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Luiz Capitulino <lcapitulino@redhat.com>
|
||||||
|
#
|
||||||
|
# This work is licensed under the terms of the GNU GPL, version 2. See
|
||||||
|
# the COPYING file in the top-level directory.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import errno
|
||||||
|
import socket
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
class QMPError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class QMPConnectError(QMPError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class QMPCapabilitiesError(QMPError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class QMPTimeoutError(QMPError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class QEMUMonitorProtocol(object):
|
||||||
|
|
||||||
|
#: Logger object for debugging messages
|
||||||
|
logger = logging.getLogger('QMP')
|
||||||
|
#: Socket's error class
|
||||||
|
error = socket.error
|
||||||
|
#: Socket's timeout
|
||||||
|
timeout = socket.timeout
|
||||||
|
|
||||||
|
def __init__(self, address, server=False):
|
||||||
|
"""
|
||||||
|
Create a QEMUMonitorProtocol class.
|
||||||
|
|
||||||
|
@param address: QEMU address, can be either a unix socket path (string)
|
||||||
|
or a tuple in the form ( address, port ) for a TCP
|
||||||
|
connection
|
||||||
|
@param server: server mode listens on the socket (bool)
|
||||||
|
@raise socket.error on socket connection errors
|
||||||
|
@note No connection is established, this is done by the connect() or
|
||||||
|
accept() methods
|
||||||
|
"""
|
||||||
|
self.__events = []
|
||||||
|
self.__address = address
|
||||||
|
self.__sock = self.__get_sock()
|
||||||
|
self.__sockfile = None
|
||||||
|
if server:
|
||||||
|
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
self.__sock.bind(self.__address)
|
||||||
|
self.__sock.listen(1)
|
||||||
|
|
||||||
|
def __get_sock(self):
|
||||||
|
if isinstance(self.__address, tuple):
|
||||||
|
family = socket.AF_INET
|
||||||
|
else:
|
||||||
|
family = socket.AF_UNIX
|
||||||
|
return socket.socket(family, socket.SOCK_STREAM)
|
||||||
|
|
||||||
|
def __negotiate_capabilities(self):
|
||||||
|
greeting = self.__json_read()
|
||||||
|
if greeting is None or "QMP" not in greeting:
|
||||||
|
raise QMPConnectError
|
||||||
|
# Greeting seems ok, negotiate capabilities
|
||||||
|
resp = self.cmd('qmp_capabilities')
|
||||||
|
if "return" in resp:
|
||||||
|
return greeting
|
||||||
|
raise QMPCapabilitiesError
|
||||||
|
|
||||||
|
def __json_read(self, only_event=False):
|
||||||
|
while True:
|
||||||
|
data = self.__sockfile.readline()
|
||||||
|
if not data:
|
||||||
|
return
|
||||||
|
resp = json.loads(data)
|
||||||
|
if 'event' in resp:
|
||||||
|
self.logger.debug("<<< %s", resp)
|
||||||
|
self.__events.append(resp)
|
||||||
|
if not only_event:
|
||||||
|
continue
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def __get_events(self, wait=False):
|
||||||
|
"""
|
||||||
|
Check for new events in the stream and cache them in __events.
|
||||||
|
|
||||||
|
@param wait (bool): block until an event is available.
|
||||||
|
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||||
|
|
||||||
|
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||||
|
period elapses.
|
||||||
|
@raise QMPConnectError: If wait is True but no events could be
|
||||||
|
retrieved or if some other error occurred.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Check for new events regardless and pull them into the cache:
|
||||||
|
self.__sock.setblocking(0)
|
||||||
|
try:
|
||||||
|
self.__json_read()
|
||||||
|
except socket.error as err:
|
||||||
|
if err[0] == errno.EAGAIN:
|
||||||
|
# No data available
|
||||||
|
pass
|
||||||
|
self.__sock.setblocking(1)
|
||||||
|
|
||||||
|
# Wait for new events, if needed.
|
||||||
|
# if wait is 0.0, this means "no wait" and is also implicitly false.
|
||||||
|
if not self.__events and wait:
|
||||||
|
if isinstance(wait, float):
|
||||||
|
self.__sock.settimeout(wait)
|
||||||
|
try:
|
||||||
|
ret = self.__json_read(only_event=True)
|
||||||
|
except socket.timeout:
|
||||||
|
raise QMPTimeoutError("Timeout waiting for event")
|
||||||
|
except:
|
||||||
|
raise QMPConnectError("Error while reading from socket")
|
||||||
|
if ret is None:
|
||||||
|
raise QMPConnectError("Error while reading from socket")
|
||||||
|
self.__sock.settimeout(None)
|
||||||
|
|
||||||
|
def connect(self, negotiate=True):
|
||||||
|
"""
|
||||||
|
Connect to the QMP Monitor and perform capabilities negotiation.
|
||||||
|
|
||||||
|
@return QMP greeting dict
|
||||||
|
@raise socket.error on socket connection errors
|
||||||
|
@raise QMPConnectError if the greeting is not received
|
||||||
|
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
||||||
|
"""
|
||||||
|
self.__sock.connect(self.__address)
|
||||||
|
self.__sockfile = self.__sock.makefile()
|
||||||
|
if negotiate:
|
||||||
|
return self.__negotiate_capabilities()
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
"""
|
||||||
|
Await connection from QMP Monitor and perform capabilities negotiation.
|
||||||
|
|
||||||
|
@return QMP greeting dict
|
||||||
|
@raise socket.error on socket connection errors
|
||||||
|
@raise QMPConnectError if the greeting is not received
|
||||||
|
@raise QMPCapabilitiesError if fails to negotiate capabilities
|
||||||
|
"""
|
||||||
|
self.__sock.settimeout(15)
|
||||||
|
self.__sock, _ = self.__sock.accept()
|
||||||
|
self.__sockfile = self.__sock.makefile()
|
||||||
|
return self.__negotiate_capabilities()
|
||||||
|
|
||||||
|
def cmd_obj(self, qmp_cmd):
|
||||||
|
"""
|
||||||
|
Send a QMP command to the QMP Monitor.
|
||||||
|
|
||||||
|
@param qmp_cmd: QMP command to be sent as a Python dict
|
||||||
|
@return QMP response as a Python dict or None if the connection has
|
||||||
|
been closed
|
||||||
|
"""
|
||||||
|
self.logger.debug(">>> %s", qmp_cmd)
|
||||||
|
try:
|
||||||
|
self.__sock.sendall(json.dumps(qmp_cmd).encode('utf-8'))
|
||||||
|
except socket.error as err:
|
||||||
|
if err[0] == errno.EPIPE:
|
||||||
|
return
|
||||||
|
raise socket.error(err)
|
||||||
|
resp = self.__json_read()
|
||||||
|
self.logger.debug("<<< %s", resp)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def cmd(self, name, args=None, cmd_id=None):
|
||||||
|
"""
|
||||||
|
Build a QMP command and send it to the QMP Monitor.
|
||||||
|
|
||||||
|
@param name: command name (string)
|
||||||
|
@param args: command arguments (dict)
|
||||||
|
@param cmd_id: command id (dict, list, string or int)
|
||||||
|
"""
|
||||||
|
qmp_cmd = {'execute': name}
|
||||||
|
if args:
|
||||||
|
qmp_cmd['arguments'] = args
|
||||||
|
if cmd_id:
|
||||||
|
qmp_cmd['id'] = cmd_id
|
||||||
|
return self.cmd_obj(qmp_cmd)
|
||||||
|
|
||||||
|
def command(self, cmd, **kwds):
|
||||||
|
"""
|
||||||
|
Build and send a QMP command to the monitor, report errors if any
|
||||||
|
"""
|
||||||
|
ret = self.cmd(cmd, kwds)
|
||||||
|
if "error" in ret:
|
||||||
|
raise Exception(ret['error']['desc'])
|
||||||
|
return ret['return']
|
||||||
|
|
||||||
|
def pull_event(self, wait=False):
|
||||||
|
"""
|
||||||
|
Pulls a single event.
|
||||||
|
|
||||||
|
@param wait (bool): block until an event is available.
|
||||||
|
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||||
|
|
||||||
|
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||||
|
period elapses.
|
||||||
|
@raise QMPConnectError: If wait is True but no events could be
|
||||||
|
retrieved or if some other error occurred.
|
||||||
|
|
||||||
|
@return The first available QMP event, or None.
|
||||||
|
"""
|
||||||
|
self.__get_events(wait)
|
||||||
|
|
||||||
|
if self.__events:
|
||||||
|
return self.__events.pop(0)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_events(self, wait=False):
|
||||||
|
"""
|
||||||
|
Get a list of available QMP events.
|
||||||
|
|
||||||
|
@param wait (bool): block until an event is available.
|
||||||
|
@param wait (float): If wait is a float, treat it as a timeout value.
|
||||||
|
|
||||||
|
@raise QMPTimeoutError: If a timeout float is provided and the timeout
|
||||||
|
period elapses.
|
||||||
|
@raise QMPConnectError: If wait is True but no events could be
|
||||||
|
retrieved or if some other error occurred.
|
||||||
|
|
||||||
|
@return The list of available QMP events.
|
||||||
|
"""
|
||||||
|
self.__get_events(wait)
|
||||||
|
return self.__events
|
||||||
|
|
||||||
|
def clear_events(self):
|
||||||
|
"""
|
||||||
|
Clear current list of pending events.
|
||||||
|
"""
|
||||||
|
self.__events = []
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.__sock.close()
|
||||||
|
self.__sockfile.close()
|
||||||
|
|
||||||
|
def settimeout(self, timeout):
|
||||||
|
self.__sock.settimeout(timeout)
|
||||||
|
|
||||||
|
def get_sock_fd(self):
|
||||||
|
return self.__sock.fileno()
|
||||||
|
|
||||||
|
def is_scm_available(self):
|
||||||
|
return self.__sock.family == socket.AF_UNIX
|
310
host/virtualmachine.py
Executable file
310
host/virtualmachine.py
Executable file
|
@ -0,0 +1,310 @@
|
||||||
|
# QEMU Manual
|
||||||
|
# https://qemu.weilnetz.de/doc/qemu-doc.html
|
||||||
|
|
||||||
|
# For QEMU Monitor Protocol Commands Information, See
|
||||||
|
# https://qemu.weilnetz.de/doc/qemu-doc.html#pcsys_005fmonitor
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
|
||||||
|
from functools import wraps
|
||||||
|
from os.path import join
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import bitmath
|
||||||
|
import sshtunnel
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
import qmp
|
||||||
|
from config import (WITHOUT_CEPH, VM_PREFIX, VM_DIR, IMAGE_DIR,
|
||||||
|
etcd_client, logging, request_pool,
|
||||||
|
running_vms, vm_pool)
|
||||||
|
from ucloud_common.helpers import get_ipv4_address
|
||||||
|
from ucloud_common.request import RequestEntry, RequestType
|
||||||
|
from ucloud_common.vm import VMEntry, VMStatus
|
||||||
|
|
||||||
|
|
||||||
|
class VM:
|
||||||
|
def __init__(self, key, handle, vnc_socket_file):
|
||||||
|
self.key = key # type: str
|
||||||
|
self.handle = handle # type: qmp.QEMUMachine
|
||||||
|
self.vnc_socket_file = vnc_socket_file # type: tempfile.NamedTemporaryFile
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "VM({})".format(self.key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_start_command_args(
|
||||||
|
vm_entry, vnc_sock_filename: str, migration=False, migration_port=4444
|
||||||
|
):
|
||||||
|
threads_per_core = 1
|
||||||
|
vm_memory = int(bitmath.parse_string(vm_entry.specs["ram"]).to_MB())
|
||||||
|
vm_cpus = int(vm_entry.specs["cpu"])
|
||||||
|
vm_uuid = vm_entry.uuid
|
||||||
|
|
||||||
|
if WITHOUT_CEPH:
|
||||||
|
command = "-drive file={},format=raw,if=virtio,cache=none".format(
|
||||||
|
os.path.join(VM_DIR, vm_uuid)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
command = "-drive file=rbd:uservms/{},format=raw,if=virtio,cache=none".format(
|
||||||
|
vm_uuid
|
||||||
|
)
|
||||||
|
|
||||||
|
command += " -device virtio-rng-pci -vnc unix:{}".format(vnc_sock_filename)
|
||||||
|
command += " -m {} -smp cores={},threads={}".format(
|
||||||
|
vm_memory, vm_cpus, threads_per_core
|
||||||
|
)
|
||||||
|
command += " -name {}".format(vm_uuid)
|
||||||
|
|
||||||
|
if migration:
|
||||||
|
command += " -incoming tcp:0:{}".format(migration_port)
|
||||||
|
|
||||||
|
command += " -nic tap,model=virtio,mac={}".format(vm_entry.mac)
|
||||||
|
return command.split(" ")
|
||||||
|
|
||||||
|
|
||||||
|
def create_vm_object(vm_entry, migration=False, migration_port=4444):
|
||||||
|
# NOTE: If migration suddenly stop working, having different
|
||||||
|
# VNC unix filename on source and destination host can
|
||||||
|
# be a possible cause of it.
|
||||||
|
|
||||||
|
# REQUIREMENT: Use Unix Socket instead of TCP Port for VNC
|
||||||
|
vnc_sock_file = tempfile.NamedTemporaryFile()
|
||||||
|
|
||||||
|
qemu_args = get_start_command_args(
|
||||||
|
vm_entry=vm_entry,
|
||||||
|
vnc_sock_filename=vnc_sock_file.name,
|
||||||
|
migration=migration,
|
||||||
|
migration_port=migration_port,
|
||||||
|
)
|
||||||
|
qemu_machine = qmp.QEMUMachine("/usr/bin/qemu-system-x86_64", args=qemu_args)
|
||||||
|
return VM(vm_entry.key, qemu_machine, vnc_sock_file)
|
||||||
|
|
||||||
|
|
||||||
|
def get_vm(vm_list: list, vm_key) -> Union[VM, None]:
|
||||||
|
return next((vm for vm in vm_list if vm.key == vm_key), None)
|
||||||
|
|
||||||
|
|
||||||
|
def need_running_vm(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(e):
|
||||||
|
vm = get_vm(running_vms, e.key)
|
||||||
|
if vm:
|
||||||
|
try:
|
||||||
|
status = vm.handle.command("query-status")
|
||||||
|
logging.debug("VM Status Check - %s", status)
|
||||||
|
except Exception as exception:
|
||||||
|
logging.info("%s failed - VM %s %s", func.__name__, e, exception)
|
||||||
|
else:
|
||||||
|
return func(e)
|
||||||
|
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
logging.info("%s failed because VM %s is not running", func.__name__, e.key)
|
||||||
|
return None
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def create(vm_entry: VMEntry):
|
||||||
|
vm_hdd = int(bitmath.parse_string(vm_entry.specs["os-ssd"]).to_MB())
|
||||||
|
|
||||||
|
if WITHOUT_CEPH:
|
||||||
|
_command_to_create = [
|
||||||
|
"cp",
|
||||||
|
os.path.join(IMAGE_DIR, vm_entry.image_uuid),
|
||||||
|
os.path.join(VM_DIR, vm_entry.uuid),
|
||||||
|
]
|
||||||
|
|
||||||
|
_command_to_extend = [
|
||||||
|
"qemu-img",
|
||||||
|
"resize",
|
||||||
|
"-f", "raw",
|
||||||
|
os.path.join(VM_DIR, vm_entry.uuid),
|
||||||
|
"{}M".format(vm_hdd),
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
_command_to_create = [
|
||||||
|
"rbd",
|
||||||
|
"clone",
|
||||||
|
"images/{}@protected".format(vm_entry.image_uuid),
|
||||||
|
"uservms/{}".format(vm_entry.uuid),
|
||||||
|
]
|
||||||
|
|
||||||
|
_command_to_extend = [
|
||||||
|
"rbd",
|
||||||
|
"resize",
|
||||||
|
"uservms/{}".format(vm_entry.uuid),
|
||||||
|
"--size",
|
||||||
|
vm_hdd,
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.check_output(_command_to_create)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
if e.returncode == errno.EEXIST:
|
||||||
|
logging.debug("Image for vm %s exists", vm_entry.uuid)
|
||||||
|
# File Already exists. No Problem Continue
|
||||||
|
return
|
||||||
|
|
||||||
|
# This exception catches all other exceptions
|
||||||
|
# i.e FileNotFound (BaseImage), pool Does Not Exists etc.
|
||||||
|
logging.exception(e)
|
||||||
|
|
||||||
|
vm_entry.status = "ERROR"
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
subprocess.check_output(_command_to_extend)
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
else:
|
||||||
|
logging.info("New VM Created")
|
||||||
|
|
||||||
|
|
||||||
|
def start(vm_entry: VMEntry):
|
||||||
|
_vm = get_vm(running_vms, vm_entry.key)
|
||||||
|
|
||||||
|
# VM already running. No need to proceed further.
|
||||||
|
if _vm:
|
||||||
|
logging.info("VM %s already running", vm_entry.uuid)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
create(vm_entry)
|
||||||
|
launch_vm(vm_entry)
|
||||||
|
|
||||||
|
|
||||||
|
@need_running_vm
|
||||||
|
def stop(vm_entry):
|
||||||
|
vm = get_vm(running_vms, vm_entry.key)
|
||||||
|
vm.handle.shutdown()
|
||||||
|
if not vm.handle.is_running():
|
||||||
|
vm_entry.add_log("Shutdown successfully")
|
||||||
|
vm_entry.declare_stopped()
|
||||||
|
vm_pool.put(vm_entry)
|
||||||
|
running_vms.remove(vm)
|
||||||
|
|
||||||
|
|
||||||
|
def delete(vm_entry):
|
||||||
|
logging.info("Deleting VM | %s", vm_entry)
|
||||||
|
stop(vm_entry)
|
||||||
|
path_without_protocol = vm_entry.path[vm_entry.path.find(":") + 1 :]
|
||||||
|
|
||||||
|
if WITHOUT_CEPH:
|
||||||
|
vm_deletion_command = ["rm", os.path.join(VM_DIR, vm_entry.uuid)]
|
||||||
|
else:
|
||||||
|
vm_deletion_command = ["rbd", "rm", path_without_protocol]
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.check_output(vm_deletion_command)
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
else:
|
||||||
|
etcd_client.client.delete(vm_entry.key)
|
||||||
|
|
||||||
|
|
||||||
|
def transfer(request_event):
|
||||||
|
# This function would run on source host i.e host on which the vm
|
||||||
|
# is running initially. This host would be responsible for transferring
|
||||||
|
# vm state to destination host.
|
||||||
|
|
||||||
|
_host, _port = request_event.parameters["host"], request_event.parameters["port"]
|
||||||
|
_uuid = request_event.uuid
|
||||||
|
_destination = request_event.destination_host_key
|
||||||
|
vm = get_vm(running_vms, join(VM_PREFIX, _uuid))
|
||||||
|
|
||||||
|
if vm:
|
||||||
|
tunnel = sshtunnel.SSHTunnelForwarder(
|
||||||
|
(_host, 22),
|
||||||
|
ssh_username=config("ssh_username"),
|
||||||
|
ssh_pkey=config("ssh_pkey"),
|
||||||
|
ssh_private_key_password=config("ssh_private_key_password"),
|
||||||
|
remote_bind_address=("127.0.0.1", _port),
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
tunnel.start()
|
||||||
|
except sshtunnel.BaseSSHTunnelForwarderError:
|
||||||
|
logging.exception("Couldn't establish connection to (%s, 22)", _host)
|
||||||
|
else:
|
||||||
|
vm.handle.command(
|
||||||
|
"migrate", uri="tcp:{}:{}".format(_host, tunnel.local_bind_port)
|
||||||
|
)
|
||||||
|
|
||||||
|
status = vm.handle.command("query-migrate")["status"]
|
||||||
|
while status not in ["failed", "completed"]:
|
||||||
|
time.sleep(2)
|
||||||
|
status = vm.handle.command("query-migrate")["status"]
|
||||||
|
|
||||||
|
with vm_pool.get_put(request_event.uuid) as source_vm:
|
||||||
|
if status == "failed":
|
||||||
|
source_vm.add_log("Migration Failed")
|
||||||
|
elif status == "completed":
|
||||||
|
# If VM is successfully migrated then shutdown the VM
|
||||||
|
# on this host and update hostname to destination host key
|
||||||
|
source_vm.add_log("Successfully migrated")
|
||||||
|
source_vm.hostname = _destination
|
||||||
|
running_vms.remove(vm)
|
||||||
|
vm.handle.shutdown()
|
||||||
|
source_vm.in_migration = False # VM transfer finished
|
||||||
|
finally:
|
||||||
|
tunnel.close()
|
||||||
|
|
||||||
|
|
||||||
|
def init_migration(vm_entry, destination_host_key):
|
||||||
|
# This function would run on destination host i.e host on which the vm
|
||||||
|
# would be transferred after migration.
|
||||||
|
# This host would be responsible for starting VM that would receive
|
||||||
|
# state of VM running on source host.
|
||||||
|
|
||||||
|
_vm = get_vm(running_vms, vm_entry.key)
|
||||||
|
|
||||||
|
if _vm:
|
||||||
|
# VM already running. No need to proceed further.
|
||||||
|
logging.info("%s Already running", _vm.key)
|
||||||
|
return
|
||||||
|
|
||||||
|
launch_vm(vm_entry, migration=True, migration_port=4444,
|
||||||
|
destination_host_key=destination_host_key)
|
||||||
|
|
||||||
|
|
||||||
|
def launch_vm(vm_entry, migration=False, migration_port=None, destination_host_key=None):
|
||||||
|
logging.info("Starting %s", vm_entry.key)
|
||||||
|
|
||||||
|
vm = create_vm_object(vm_entry, migration=migration, migration_port=migration_port)
|
||||||
|
try:
|
||||||
|
vm.handle.launch()
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
|
||||||
|
if migration:
|
||||||
|
# We don't care whether MachineError or any other error occurred
|
||||||
|
vm.handle.shutdown()
|
||||||
|
else:
|
||||||
|
# Error during typical launch of a vm
|
||||||
|
vm_entry.add_log("Error Occurred while starting VM")
|
||||||
|
vm_entry.declare_killed()
|
||||||
|
vm_pool.put(vm_entry)
|
||||||
|
else:
|
||||||
|
vm_entry.vnc_socket = vm.vnc_socket_file.name
|
||||||
|
running_vms.append(vm)
|
||||||
|
|
||||||
|
if migration:
|
||||||
|
vm_entry.in_migration = True
|
||||||
|
r = RequestEntry.from_scratch(
|
||||||
|
type=RequestType.TransferVM,
|
||||||
|
hostname=vm_entry.hostname,
|
||||||
|
parameters={"host": get_ipv4_address(), "port": 4444},
|
||||||
|
uuid=vm_entry.uuid,
|
||||||
|
destination_host_key=destination_host_key,
|
||||||
|
)
|
||||||
|
request_pool.put(r)
|
||||||
|
else:
|
||||||
|
# Typical launching of a vm
|
||||||
|
vm_entry.status = VMStatus.running
|
||||||
|
vm_entry.add_log("Started successfully")
|
||||||
|
|
||||||
|
vm_pool.put(vm_entry)
|
||||||
|
|
22
imagescanner/config.py
Executable file
22
imagescanner/config.py
Executable file
|
@ -0,0 +1,22 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
BASE_PATH = config("BASE_DIR", "/var/www")
|
||||||
|
WITHOUT_CEPH = config("WITHOUT_CEPH", False, cast=bool)
|
||||||
|
ETCD_URL = config("ETCD_URL")
|
||||||
|
IMAGE_PREFIX = config("IMAGE_PREFIX")
|
||||||
|
IMAGE_STORE_PREFIX = config("IMAGE_STORE_PREFIX")
|
||||||
|
IMAGE_DIR = config("IMAGE_DIR")
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="log.txt",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s: %(levelname)s - %(message)s",
|
||||||
|
datefmt="%d-%b-%y %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
client = Etcd3Wrapper(host=ETCD_URL)
|
108
imagescanner/main.py
Executable file
108
imagescanner/main.py
Executable file
|
@ -0,0 +1,108 @@
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from config import (logging, client, IMAGE_DIR,
|
||||||
|
BASE_PATH, WITHOUT_CEPH,
|
||||||
|
IMAGE_PREFIX, IMAGE_STORE_PREFIX)
|
||||||
|
|
||||||
|
|
||||||
|
def qemu_img_type(path):
|
||||||
|
qemu_img_info_command = ["qemu-img", "info", "--output", "json", path]
|
||||||
|
try:
|
||||||
|
qemu_img_info = subprocess.check_output(qemu_img_info_command)
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
qemu_img_info = json.loads(qemu_img_info.decode("utf-8"))
|
||||||
|
return qemu_img_info["format"]
|
||||||
|
|
||||||
|
# If you are using WITHOUT_CEPH FLAG in .env
|
||||||
|
# then please make sure that IMAGE_DIR directory
|
||||||
|
# exists otherwise this script would fail
|
||||||
|
if WITHOUT_CEPH and not os.path.isdir(IMAGE_DIR):
|
||||||
|
print("You have set WITHOUT_CEPH to True. So,"
|
||||||
|
"the {} must exists. But, it don't".format(IMAGE_DIR))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.check_output(['which', 'qemu-img'])
|
||||||
|
except Exception:
|
||||||
|
print("qemu-img missing")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# We want to get images entries that requests images to be created
|
||||||
|
images = client.get_prefix(IMAGE_PREFIX, value_in_json=True)
|
||||||
|
images_to_be_created = list(filter(lambda im: im.value['status'] == 'TO_BE_CREATED', images))
|
||||||
|
|
||||||
|
for image in images_to_be_created:
|
||||||
|
try:
|
||||||
|
image_uuid = image.key.split('/')[-1]
|
||||||
|
image_owner = image.value['owner']
|
||||||
|
image_filename = image.value['filename']
|
||||||
|
image_store_name = image.value['store_name']
|
||||||
|
image_full_path = os.path.join(BASE_PATH, image_owner, image_filename)
|
||||||
|
|
||||||
|
image_stores = client.get_prefix(IMAGE_STORE_PREFIX, value_in_json=True)
|
||||||
|
user_image_store = next(filter(
|
||||||
|
lambda s, store_name=image_store_name: s.value["name"] == store_name,
|
||||||
|
image_stores
|
||||||
|
))
|
||||||
|
|
||||||
|
image_store_pool = user_image_store.value['attributes']['pool']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
else:
|
||||||
|
# At least our basic data is available
|
||||||
|
|
||||||
|
qemu_img_convert_command = ["qemu-img", "convert", "-f", "qcow2",
|
||||||
|
"-O", "raw", image_full_path, "image.raw"]
|
||||||
|
|
||||||
|
|
||||||
|
if WITHOUT_CEPH:
|
||||||
|
image_import_command = ["mv", "image.raw", os.path.join(IMAGE_DIR, image_uuid)]
|
||||||
|
snapshot_creation_command = ["true"]
|
||||||
|
snapshot_protect_command = ["true"]
|
||||||
|
else:
|
||||||
|
image_import_command = ["rbd", "import", "image.raw",
|
||||||
|
"{}/{}".format(image_store_pool, image_uuid)]
|
||||||
|
snapshot_creation_command = ["rbd", "snap", "create",
|
||||||
|
"{}/{}@protected".format(image_store_pool, image_uuid)]
|
||||||
|
snapshot_protect_command = ["rbd", "snap", "protect",
|
||||||
|
"{}/{}@protected".format(image_store_pool, image_uuid)]
|
||||||
|
|
||||||
|
|
||||||
|
# First check whether the image is qcow2
|
||||||
|
|
||||||
|
if qemu_img_type(image_full_path) == "qcow2":
|
||||||
|
try:
|
||||||
|
# Convert .qcow2 to .raw
|
||||||
|
subprocess.check_output(qemu_img_convert_command)
|
||||||
|
|
||||||
|
# Import image either to ceph/filesystem
|
||||||
|
subprocess.check_output(image_import_command)
|
||||||
|
|
||||||
|
# Create and Protect Snapshot
|
||||||
|
subprocess.check_output(snapshot_creation_command)
|
||||||
|
subprocess.check_output(snapshot_protect_command)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Everything is successfully done
|
||||||
|
image.value["status"] = "CREATED"
|
||||||
|
client.put(image.key, json.dumps(image.value))
|
||||||
|
else:
|
||||||
|
# The user provided image is either not found or of invalid format
|
||||||
|
image.value["status"] = "INVALID_IMAGE"
|
||||||
|
client.put(image.key, json.dumps(image.value))
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.remove("image.raw")
|
||||||
|
except Exception:
|
||||||
|
pass
|
21
metadata/config.py
Normal file
21
metadata/config.py
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
|
from ucloud_common.vm import VmPool
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="log.txt",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s: %(levelname)s - %(message)s",
|
||||||
|
datefmt="%d-%b-%y %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
VM_PREFIX = config("VM_PREFIX")
|
||||||
|
|
||||||
|
etcd_client = Etcd3Wrapper(host=config("ETCD_URL"))
|
||||||
|
|
||||||
|
VM_POOL = VmPool(etcd_client, VM_PREFIX)
|
84
metadata/main.py
Normal file
84
metadata/main.py
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
from flask import Flask, request
|
||||||
|
from flask_restful import Resource, Api
|
||||||
|
from config import etcd_client, VM_POOL
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
api = Api(app)
|
||||||
|
|
||||||
|
|
||||||
|
def get_vm_entry(mac_addr):
|
||||||
|
return next(filter(lambda vm: vm.mac == mac_addr, VM_POOL.vms), None)
|
||||||
|
|
||||||
|
|
||||||
|
# https://stackoverflow.com/questions/37140846/how-to-convert-ipv6-link-local-address-to-mac-address-in-python
|
||||||
|
def ipv62mac(ipv6):
|
||||||
|
# remove subnet info if given
|
||||||
|
subnet_index = ipv6.find('/')
|
||||||
|
if subnet_index != -1:
|
||||||
|
ipv6 = ipv6[:subnet_index]
|
||||||
|
|
||||||
|
ipv6_parts = ipv6.split(':')
|
||||||
|
mac_parts = list()
|
||||||
|
for ipv6_part in ipv6_parts[-4:]:
|
||||||
|
while len(ipv6_part) < 4:
|
||||||
|
ipv6_part = '0' + ipv6_part
|
||||||
|
mac_parts.append(ipv6_part[:2])
|
||||||
|
mac_parts.append(ipv6_part[-2:])
|
||||||
|
|
||||||
|
# modify parts to match MAC value
|
||||||
|
mac_parts[0] = '%02x' % (int(mac_parts[0], 16) ^ 2)
|
||||||
|
del mac_parts[4]
|
||||||
|
del mac_parts[3]
|
||||||
|
|
||||||
|
return ':'.join(mac_parts)
|
||||||
|
|
||||||
|
|
||||||
|
class Root(Resource):
|
||||||
|
@staticmethod
|
||||||
|
def get():
|
||||||
|
data = get_vm_entry(ipv62mac(request.remote_addr))
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return {'message': 'Metadata for such VM does not exists.'}, 404
|
||||||
|
else:
|
||||||
|
|
||||||
|
# {user_prefix}/{realm}/{name}/key
|
||||||
|
etcd_key = os.path.join(USER_PREFIX, data.value['owner_realm'],
|
||||||
|
data.value['owner'], 'key')
|
||||||
|
etcd_entry = etcd_client.get_prefix(etcd_key, value_in_json=True)
|
||||||
|
user_personal_ssh_keys = [key.value for key in etcd_entry]
|
||||||
|
data.value['metadata']['ssh-keys'] += user_personal_ssh_keys
|
||||||
|
return data.value['metadata'], 200
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def post():
|
||||||
|
return {'message': 'Previous Implementation is deprecated.'}
|
||||||
|
# data = etcd_client.get("/v1/metadata/{}".format(request.remote_addr), value_in_json=True)
|
||||||
|
# print(data)
|
||||||
|
# if data:
|
||||||
|
# for k in request.json:
|
||||||
|
# if k not in data.value:
|
||||||
|
# data.value[k] = request.json[k]
|
||||||
|
# if k.endswith("-list"):
|
||||||
|
# data.value[k] = [request.json[k]]
|
||||||
|
# else:
|
||||||
|
# if k.endswith("-list"):
|
||||||
|
# data.value[k].append(request.json[k])
|
||||||
|
# else:
|
||||||
|
# data.value[k] = request.json[k]
|
||||||
|
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
||||||
|
# data.value, value_in_json=True)
|
||||||
|
# else:
|
||||||
|
# data = {}
|
||||||
|
# for k in request.json:
|
||||||
|
# data[k] = request.json[k]
|
||||||
|
# if k.endswith("-list"):
|
||||||
|
# data[k] = [request.json[k]]
|
||||||
|
# etcd_client.put("/v1/metadata/{}".format(request.remote_addr),
|
||||||
|
# data, value_in_json=True)
|
||||||
|
|
||||||
|
|
||||||
|
api.add_resource(Root, '/')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(debug=True, host="::", port="80")
|
25
scheduler/config.py
Executable file
25
scheduler/config.py
Executable file
|
@ -0,0 +1,25 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
from etcd3_wrapper import Etcd3Wrapper
|
||||||
|
from ucloud_common.vm import VmPool
|
||||||
|
from ucloud_common.host import HostPool
|
||||||
|
from ucloud_common.request import RequestPool
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
filename="log.txt",
|
||||||
|
filemode="a",
|
||||||
|
format="%(asctime)s: %(levelname)s - %(message)s",
|
||||||
|
datefmt="%d-%b-%y %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
vm_prefix = config("VM_PREFIX")
|
||||||
|
host_prefix = config("HOST_PREFIX")
|
||||||
|
request_prefix = config("REQUEST_PREFIX")
|
||||||
|
|
||||||
|
etcd_client = Etcd3Wrapper(host=config("ETCD_URL"))
|
||||||
|
|
||||||
|
vm_pool = VmPool(etcd_client, vm_prefix)
|
||||||
|
host_pool = HostPool(etcd_client, host_prefix)
|
||||||
|
request_pool = RequestPool(etcd_client, request_prefix)
|
123
scheduler/helper.py
Executable file
123
scheduler/helper.py
Executable file
|
@ -0,0 +1,123 @@
|
||||||
|
import bitmath
|
||||||
|
|
||||||
|
from collections import Counter
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
from ucloud_common.vm import VmPool, VMStatus
|
||||||
|
from ucloud_common.host import HostPool, HostStatus
|
||||||
|
from ucloud_common.request import RequestEntry, RequestPool, RequestType
|
||||||
|
|
||||||
|
from decouple import config
|
||||||
|
from config import etcd_client as client
|
||||||
|
|
||||||
|
vm_pool = VmPool(client, config("VM_PREFIX"))
|
||||||
|
host_pool = HostPool(client, config("HOST_PREFIX"))
|
||||||
|
request_pool = RequestPool(client, config("REQUEST_PREFIX"))
|
||||||
|
|
||||||
|
|
||||||
|
def accumulated_specs(vms_specs):
|
||||||
|
if not vms_specs:
|
||||||
|
return {}
|
||||||
|
return reduce((lambda x, y: Counter(x) + Counter(y)), vms_specs)
|
||||||
|
|
||||||
|
|
||||||
|
def remaining_resources(host_specs, vms_specs):
|
||||||
|
# Return remaining resources host_specs - vms
|
||||||
|
|
||||||
|
_vms_specs = Counter(vms_specs)
|
||||||
|
_remaining = Counter(host_specs)
|
||||||
|
|
||||||
|
for component in _vms_specs:
|
||||||
|
if isinstance(_vms_specs[component], str):
|
||||||
|
_vms_specs[component] = int(bitmath.parse_string(_vms_specs[component]).to_MB())
|
||||||
|
elif isinstance(_vms_specs[component], list):
|
||||||
|
_vms_specs[component] = map(lambda x: int(bitmath.parse_string(x).to_MB()), _vms_specs[component])
|
||||||
|
_vms_specs[component] = reduce(lambda x, y: x + y, _vms_specs[component], 0)
|
||||||
|
|
||||||
|
for component in _remaining:
|
||||||
|
if isinstance(_remaining[component], str):
|
||||||
|
_remaining[component] = int(bitmath.parse_string(_remaining[component]).to_MB())
|
||||||
|
elif isinstance(_remaining[component], list):
|
||||||
|
_remaining[component] = map(lambda x: int(bitmath.parse_string(x).to_MB()), _remaining[component])
|
||||||
|
_remaining[component] = reduce(lambda x, y: x + y, _remaining[component], 0)
|
||||||
|
|
||||||
|
print(_vms_specs, _remaining)
|
||||||
|
_remaining.subtract(_vms_specs)
|
||||||
|
|
||||||
|
return _remaining
|
||||||
|
|
||||||
|
|
||||||
|
class NoSuitableHostFound(Exception):
|
||||||
|
"""Exception when no host found that can host a VM."""
|
||||||
|
|
||||||
|
|
||||||
|
def get_suitable_host(vm_specs, hosts=None):
|
||||||
|
if hosts is None:
|
||||||
|
hosts = host_pool.by_status(HostStatus.alive)
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
# Filter them by host_name
|
||||||
|
vms = vm_pool.by_host(host.key)
|
||||||
|
|
||||||
|
# Filter them by status
|
||||||
|
vms = vm_pool.by_status(VMStatus.running, vms)
|
||||||
|
|
||||||
|
running_vms_specs = [vm.specs for vm in vms]
|
||||||
|
|
||||||
|
# Accumulate all of their combined specs
|
||||||
|
running_vms_accumulated_specs = accumulated_specs(
|
||||||
|
running_vms_specs
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find out remaining resources after
|
||||||
|
# host_specs - already running vm_specs
|
||||||
|
remaining = remaining_resources(
|
||||||
|
host.specs, running_vms_accumulated_specs
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find out remaining - new_vm_specs
|
||||||
|
remaining = remaining_resources(remaining, vm_specs)
|
||||||
|
|
||||||
|
if all(map(lambda x: x >= 0, remaining.values())):
|
||||||
|
return host.key
|
||||||
|
|
||||||
|
raise NoSuitableHostFound
|
||||||
|
|
||||||
|
|
||||||
|
def dead_host_detection():
|
||||||
|
# Bring out your dead! - Monty Python and the Holy Grail
|
||||||
|
hosts = host_pool.by_status(HostStatus.alive)
|
||||||
|
dead_hosts_keys = []
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
# Only check those who claims to be alive
|
||||||
|
if host.status == HostStatus.alive:
|
||||||
|
if not host.is_alive():
|
||||||
|
dead_hosts_keys.append(host.key)
|
||||||
|
|
||||||
|
return dead_hosts_keys
|
||||||
|
|
||||||
|
|
||||||
|
def dead_host_mitigation(dead_hosts_keys):
|
||||||
|
for host_key in dead_hosts_keys:
|
||||||
|
host = host_pool.get(host_key)
|
||||||
|
host.declare_dead()
|
||||||
|
|
||||||
|
vms_hosted_on_dead_host = vm_pool.by_host(host_key)
|
||||||
|
for vm in vms_hosted_on_dead_host:
|
||||||
|
vm.declare_killed()
|
||||||
|
vm_pool.put(vm)
|
||||||
|
host_pool.put(host)
|
||||||
|
|
||||||
|
|
||||||
|
def assign_host(vm):
|
||||||
|
vm.hostname = get_suitable_host(vm.specs)
|
||||||
|
vm_pool.put(vm)
|
||||||
|
|
||||||
|
r = RequestEntry.from_scratch(type=RequestType.StartVM,
|
||||||
|
uuid=vm.uuid,
|
||||||
|
hostname=vm.hostname)
|
||||||
|
request_pool.put(r)
|
||||||
|
|
||||||
|
vm.log.append("VM scheduled for starting")
|
||||||
|
return vm.hostname
|
89
scheduler/main.py
Executable file
89
scheduler/main.py
Executable file
|
@ -0,0 +1,89 @@
|
||||||
|
# TODO
|
||||||
|
# 1. send an email to an email address defined by env['admin-email']
|
||||||
|
# if resources are finished
|
||||||
|
# 2. Introduce a status endpoint of the scheduler -
|
||||||
|
# maybe expose a prometheus compatible output
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from ucloud_common.request import RequestEntry, RequestType
|
||||||
|
|
||||||
|
from config import etcd_client as client
|
||||||
|
from config import (host_pool, request_pool, vm_pool, request_prefix)
|
||||||
|
from helper import (get_suitable_host, dead_host_mitigation, dead_host_detection,
|
||||||
|
assign_host, NoSuitableHostFound)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
pending_vms = []
|
||||||
|
|
||||||
|
for request_iterator in [
|
||||||
|
client.get_prefix(request_prefix, value_in_json=True),
|
||||||
|
client.watch_prefix(request_prefix, timeout=5, value_in_json=True),
|
||||||
|
]:
|
||||||
|
for request_event in request_iterator:
|
||||||
|
request_entry = RequestEntry(request_event)
|
||||||
|
logging.debug("%s, %s", request_entry.key, request_entry.value)
|
||||||
|
|
||||||
|
# Never Run time critical mechanism inside timeout
|
||||||
|
# mechanism because timeout mechanism only comes
|
||||||
|
# when no other event is happening. It means under
|
||||||
|
# heavy load there would not be a timeout event.
|
||||||
|
if request_entry.type == "TIMEOUT":
|
||||||
|
|
||||||
|
# Detect hosts that are dead and set their status
|
||||||
|
# to "DEAD", and their VMs' status to "KILLED"
|
||||||
|
logging.debug("TIMEOUT event occured")
|
||||||
|
dead_hosts = dead_host_detection()
|
||||||
|
logging.debug("Dead hosts: %s", dead_hosts)
|
||||||
|
dead_host_mitigation(dead_hosts)
|
||||||
|
|
||||||
|
# If there are VMs that weren't assigned a host
|
||||||
|
# because there wasn't a host available which
|
||||||
|
# meets requirement of that VM then we would
|
||||||
|
# create a new ScheduleVM request for that VM
|
||||||
|
# on our behalf.
|
||||||
|
while pending_vms:
|
||||||
|
pending_vm_entry = pending_vms.pop()
|
||||||
|
r = RequestEntry.from_scratch(type="ScheduleVM",
|
||||||
|
uuid=pending_vm_entry.uuid,
|
||||||
|
hostname=pending_vm_entry.hostname)
|
||||||
|
request_pool.put(r)
|
||||||
|
|
||||||
|
elif request_entry.type == RequestType.ScheduleVM:
|
||||||
|
vm_entry = vm_pool.get(request_entry.uuid)
|
||||||
|
client.client.delete(request_entry.key) # consume Request
|
||||||
|
|
||||||
|
# If the Request is about a VM which is labelled as "migration"
|
||||||
|
# and has a destination
|
||||||
|
if hasattr(request_entry, "migration") and request_entry.migration \
|
||||||
|
and hasattr(request_entry, "destination") and request_entry.destination:
|
||||||
|
try:
|
||||||
|
get_suitable_host(vm_specs=vm_entry.specs,
|
||||||
|
hosts=[host_pool.get(request_entry.destination)])
|
||||||
|
except NoSuitableHostFound:
|
||||||
|
logging.info("Requested destination host doesn't have enough capacity"
|
||||||
|
"to hold %s", vm_entry.uuid)
|
||||||
|
else:
|
||||||
|
r = RequestEntry.from_scratch(type=RequestType.InitVMMigration,
|
||||||
|
uuid=request_entry.uuid,
|
||||||
|
destination=request_entry.destination)
|
||||||
|
request_pool.put(r)
|
||||||
|
|
||||||
|
# If the Request is about a VM that just want to get started/created
|
||||||
|
else:
|
||||||
|
# assign_host only returns None when we couldn't be able to assign
|
||||||
|
# a host to a VM because of resource constraints
|
||||||
|
try:
|
||||||
|
assign_host(vm_entry)
|
||||||
|
except NoSuitableHostFound:
|
||||||
|
vm_entry.log.append("Can't schedule VM. No Resource Left.")
|
||||||
|
vm_pool.put(vm_entry)
|
||||||
|
|
||||||
|
pending_vms.append(vm_entry)
|
||||||
|
logging.info("No Resource Left. Emailing admin....")
|
||||||
|
|
||||||
|
|
||||||
|
logging.info("%s SESSION STARTED %s", '*' * 5, '*' * 5)
|
||||||
|
main()
|
214
scheduler/tests/test_basics.py
Executable file
214
scheduler/tests/test_basics.py
Executable file
|
@ -0,0 +1,214 @@
|
||||||
|
import unittest
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import multiprocessing
|
||||||
|
import time
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from os.path import dirname
|
||||||
|
|
||||||
|
|
||||||
|
BASE_DIR = dirname(dirname(__file__))
|
||||||
|
sys.path.insert(0, BASE_DIR)
|
||||||
|
|
||||||
|
from main import (
|
||||||
|
accumulated_specs,
|
||||||
|
remaining_resources,
|
||||||
|
VmPool,
|
||||||
|
dead_host_detection,
|
||||||
|
dead_host_mitigation,
|
||||||
|
main,
|
||||||
|
)
|
||||||
|
|
||||||
|
from config import etcd_client
|
||||||
|
|
||||||
|
class TestFunctions(unittest.TestCase):
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
cls.client = etcd_client
|
||||||
|
cls.host_prefix = "/test/host"
|
||||||
|
cls.vm_prefix = "/test/vm"
|
||||||
|
|
||||||
|
# These deletion could also be in
|
||||||
|
# tearDown() but it is more appropriate here
|
||||||
|
# as it enable us to check the ETCD store
|
||||||
|
# even after test is run
|
||||||
|
cls.client.client.delete_prefix(cls.host_prefix)
|
||||||
|
cls.client.client.delete_prefix(cls.vm_prefix)
|
||||||
|
cls.create_hosts(cls)
|
||||||
|
cls.create_vms(cls)
|
||||||
|
|
||||||
|
cls.p = multiprocessing.Process(
|
||||||
|
target=main, args=[cls.vm_prefix, cls.host_prefix]
|
||||||
|
)
|
||||||
|
cls.p.start()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
cls.p.terminate()
|
||||||
|
|
||||||
|
def create_hosts(self):
|
||||||
|
host1 = {
|
||||||
|
"cpu": 32,
|
||||||
|
"ram": 128,
|
||||||
|
"hdd": 1024,
|
||||||
|
"sdd": 0,
|
||||||
|
"status": "ALIVE",
|
||||||
|
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||||
|
}
|
||||||
|
host2 = {
|
||||||
|
"cpu": 16,
|
||||||
|
"ram": 64,
|
||||||
|
"hdd": 512,
|
||||||
|
"sdd": 0,
|
||||||
|
"status": "ALIVE",
|
||||||
|
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||||
|
}
|
||||||
|
|
||||||
|
host3 = {
|
||||||
|
"cpu": 16,
|
||||||
|
"ram": 32,
|
||||||
|
"hdd": 256,
|
||||||
|
"sdd": 256,
|
||||||
|
"status": "ALIVE",
|
||||||
|
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||||
|
}
|
||||||
|
with self.client.client.lock("lock"):
|
||||||
|
self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
|
||||||
|
self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
|
||||||
|
self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
|
||||||
|
|
||||||
|
def create_vms(self):
|
||||||
|
vm1 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm2 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm3 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 16, "ram": 32, "hdd": 128, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm4 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 16, "ram": 64, "hdd": 512, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm5 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 2, "ram": 2, "hdd": 10, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm6 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
vm7 = json.dumps(
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.client.put(f"{self.vm_prefix}/1", vm1)
|
||||||
|
self.client.put(f"{self.vm_prefix}/2", vm2)
|
||||||
|
self.client.put(f"{self.vm_prefix}/3", vm3)
|
||||||
|
self.client.put(f"{self.vm_prefix}/4", vm4)
|
||||||
|
self.client.put(f"{self.vm_prefix}/5", vm5)
|
||||||
|
self.client.put(f"{self.vm_prefix}/6", vm6)
|
||||||
|
self.client.put(f"{self.vm_prefix}/7", vm7)
|
||||||
|
|
||||||
|
def test_accumulated_specs(self):
|
||||||
|
vms = [
|
||||||
|
{"ssd": 10, "cpu": 4, "ram": 8},
|
||||||
|
{"hdd": 10, "cpu": 4, "ram": 8},
|
||||||
|
{"cpu": 8, "ram": 32},
|
||||||
|
]
|
||||||
|
self.assertEqual(
|
||||||
|
accumulated_specs(vms), {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_remaining_resources(self):
|
||||||
|
host_specs = {"ssd": 10, "cpu": 16, "ram": 48, "hdd": 10}
|
||||||
|
vms_specs = {"ssd": 10, "cpu": 32, "ram": 12, "hdd": 0}
|
||||||
|
resultant_specs = {"ssd": 0, "cpu": -16, "ram": 36, "hdd": 10}
|
||||||
|
self.assertEqual(remaining_resources(host_specs, vms_specs),
|
||||||
|
resultant_specs)
|
||||||
|
|
||||||
|
def test_vmpool(self):
|
||||||
|
self.p.join(1)
|
||||||
|
vm_pool = VmPool(self.client, self.vm_prefix)
|
||||||
|
|
||||||
|
# vm_pool by host
|
||||||
|
actual = vm_pool.by_host(vm_pool.vms, f"{self.host_prefix}/3")
|
||||||
|
ground_truth = [
|
||||||
|
(
|
||||||
|
f"{self.vm_prefix}/1",
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 4, "ram": 8, "hdd": 100, "sdd": 256},
|
||||||
|
"hostname": f"{self.host_prefix}/3",
|
||||||
|
"status": "SCHEDULED_DEPLOY",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
self.assertEqual(actual[0], ground_truth[0])
|
||||||
|
|
||||||
|
# vm_pool by status
|
||||||
|
actual = vm_pool.by_status(vm_pool.vms, "REQUESTED_NEW")
|
||||||
|
ground_truth = [
|
||||||
|
(
|
||||||
|
f"{self.vm_prefix}/7",
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
self.assertEqual(actual[0], ground_truth[0])
|
||||||
|
|
||||||
|
# vm_pool by except status
|
||||||
|
actual = vm_pool.except_status(vm_pool.vms, "SCHEDULED_DEPLOY")
|
||||||
|
ground_truth = [
|
||||||
|
(
|
||||||
|
f"{self.vm_prefix}/7",
|
||||||
|
{
|
||||||
|
"owner": "meow",
|
||||||
|
"specs": {"cpu": 10, "ram": 22, "hdd": 146, "sdd": 0},
|
||||||
|
"hostname": "",
|
||||||
|
"status": "REQUESTED_NEW",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
]
|
||||||
|
self.assertEqual(actual[0], ground_truth[0])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
81
scheduler/tests/test_dead_host_mechanism.py
Executable file
81
scheduler/tests/test_dead_host_mechanism.py
Executable file
|
@ -0,0 +1,81 @@
|
||||||
|
import unittest
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import multiprocessing
|
||||||
|
import time
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from os.path import dirname
|
||||||
|
BASE_DIR = dirname(dirname(__file__))
|
||||||
|
sys.path.insert(0, BASE_DIR)
|
||||||
|
|
||||||
|
from main import (
|
||||||
|
accumulated_specs,
|
||||||
|
remaining_resources,
|
||||||
|
VmPool,
|
||||||
|
dead_host_detection,
|
||||||
|
dead_host_mitigation,
|
||||||
|
main,
|
||||||
|
config
|
||||||
|
)
|
||||||
|
|
||||||
|
class TestDeadHostMechanism(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.client = config.etcd_client
|
||||||
|
self.host_prefix = "/test/host"
|
||||||
|
self.vm_prefix = "/test/vm"
|
||||||
|
|
||||||
|
self.client.client.delete_prefix(self.host_prefix)
|
||||||
|
self.client.client.delete_prefix(self.vm_prefix)
|
||||||
|
|
||||||
|
self.create_hosts()
|
||||||
|
|
||||||
|
def create_hosts(self):
|
||||||
|
host1 = {
|
||||||
|
"cpu": 32,
|
||||||
|
"ram": 128,
|
||||||
|
"hdd": 1024,
|
||||||
|
"sdd": 0,
|
||||||
|
"status": "ALIVE",
|
||||||
|
"last_heartbeat": datetime.utcnow().isoformat(),
|
||||||
|
}
|
||||||
|
host2 = {
|
||||||
|
"cpu": 16,
|
||||||
|
"ram": 64,
|
||||||
|
"hdd": 512,
|
||||||
|
"sdd": 0,
|
||||||
|
"status": "ALIVE",
|
||||||
|
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
||||||
|
}
|
||||||
|
|
||||||
|
host3 = {"cpu": 16, "ram": 32, "hdd": 256, "sdd": 256}
|
||||||
|
host4 = {
|
||||||
|
"cpu": 16,
|
||||||
|
"ram": 32,
|
||||||
|
"hdd": 256,
|
||||||
|
"sdd": 256,
|
||||||
|
"status": "DEAD",
|
||||||
|
"last_heartbeat": datetime(2011, 1, 1).isoformat(),
|
||||||
|
}
|
||||||
|
with self.client.client.lock("lock"):
|
||||||
|
self.client.put(f"{self.host_prefix}/1", host1, value_in_json=True)
|
||||||
|
self.client.put(f"{self.host_prefix}/2", host2, value_in_json=True)
|
||||||
|
self.client.put(f"{self.host_prefix}/3", host3, value_in_json=True)
|
||||||
|
self.client.put(f"{self.host_prefix}/4", host4, value_in_json=True)
|
||||||
|
|
||||||
|
def test_dead_host_detection(self):
|
||||||
|
hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
|
||||||
|
deads = dead_host_detection(hosts)
|
||||||
|
self.assertEqual(deads, ["/test/host/2", "/test/host/3"])
|
||||||
|
return deads
|
||||||
|
|
||||||
|
def test_dead_host_mitigation(self):
|
||||||
|
deads = self.test_dead_host_detection()
|
||||||
|
dead_host_mitigation(self.client, deads)
|
||||||
|
hosts = self.client.get_prefix(self.host_prefix, value_in_json=True)
|
||||||
|
deads = dead_host_detection(hosts)
|
||||||
|
self.assertEqual(deads, [])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
16
ucloud.py
Normal file
16
ucloud.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
import argparse
|
||||||
|
import subprocess as sp
|
||||||
|
arg_parser = argparse.ArgumentParser(prog='ucloud',
|
||||||
|
description='Open Source Cloud Management Software')
|
||||||
|
arg_parser.add_argument('component',
|
||||||
|
choices=['api', 'scheduler', 'host',
|
||||||
|
'filescanner','imagescanner',
|
||||||
|
'metadata'])
|
||||||
|
arg_parser.add_argument('component_args', nargs='*')
|
||||||
|
args = arg_parser.parse_args()
|
||||||
|
|
||||||
|
try:
|
||||||
|
command = ['pipenv', 'run', 'python', 'main.py', *args.component_args]
|
||||||
|
sp.run(command, cwd=args.component)
|
||||||
|
except Exception as error:
|
||||||
|
print(error)
|
Loading…
Reference in a new issue