|
|
|
@ -51,3 +51,39 @@ Digging into ceph, seeing the actual image:
|
|
|
|
|
csi-vol-d3c96f79-c7ba-11eb-8e52-1ed2f2d63451 |
|
|
|
|
[20:11] server47.place7:~# |
|
|
|
|
``` |
|
|
|
|
|
|
|
|
|
## Filesystem |
|
|
|
|
|
|
|
|
|
``` |
|
|
|
|
[21:06] server47.place7:~/ungleich-k8s/rook# kubectl -n rook-ceph get pod -l app=rook-ceph-mds |
|
|
|
|
NAME READY STATUS RESTARTS AGE |
|
|
|
|
rook-ceph-mds-myfs-a-5f547fd7c6-qmp2r 1/1 Running 0 16s |
|
|
|
|
rook-ceph-mds-myfs-b-dd78b444b-49h5h 0/1 PodInitializing 0 14s |
|
|
|
|
[21:06] server47.place7:~/ungleich-k8s/rook# kubectl -n rook-ceph get pod -l app=rook-ceph-mds |
|
|
|
|
NAME READY STATUS RESTARTS AGE |
|
|
|
|
rook-ceph-mds-myfs-a-5f547fd7c6-qmp2r 1/1 Running 0 20s |
|
|
|
|
rook-ceph-mds-myfs-b-dd78b444b-49h5h 1/1 Running 0 18s |
|
|
|
|
[21:06] server47.place7:~/ungleich-k8s/rook# kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph -s |
|
|
|
|
cluster: |
|
|
|
|
id: 049110d9-9368-4750-b3d3-6ca9a80553d7 |
|
|
|
|
health: HEALTH_WARN |
|
|
|
|
mons are allowing insecure global_id reclaim |
|
|
|
|
|
|
|
|
|
services: |
|
|
|
|
mon: 3 daemons, quorum a,b,d (age 98m) |
|
|
|
|
mgr: a(active, since 97m), standbys: b |
|
|
|
|
mds: 1/1 daemons up, 1 hot standby |
|
|
|
|
osd: 6 osds: 6 up (since 66m), 6 in (since 67m) |
|
|
|
|
|
|
|
|
|
data: |
|
|
|
|
volumes: 1/1 healthy |
|
|
|
|
pools: 4 pools, 97 pgs |
|
|
|
|
objects: 31 objects, 27 KiB |
|
|
|
|
usage: 40 MiB used, 45 GiB / 45 GiB avail |
|
|
|
|
pgs: 97 active+clean |
|
|
|
|
|
|
|
|
|
io: |
|
|
|
|
client: 3.3 KiB/s rd, 2.8 KiB/s wr, 2 op/s rd, 1 op/s wr |
|
|
|
|
|
|
|
|
|
[21:07] server47.place7:~/ungleich-k8s/rook# |
|
|
|
|
``` |
|
|
|
|