KubeVirt虚拟机快照和恢复Ceph RBD实现

SuKai October 25, 2024

安装Ceph RBD snapshot-controller和csi-driver

  1. 安装RBD Snapshot存储类
  2. 创建KubeVirt虚拟机快照
  3. 从快照恢复虚拟机

安装Ceph RBD snapshot-controller和csi-driver

root123@rack1-master34:~$ scp 10.10.102.26:/home/root123/v8.1.0.tar.gz .
v8.1.0.tar.gz                                                                                                                                                    100% 7774KB 137.8MB/s   00:00
root123@rack1-master34:~$ scp 10.10.102.26:/home/root123/kustomize .
kustomize                                                                                                                                                        100%   14MB 143.7MB/s   00:00
root123@rack1-master34:~$ sudo cp kustomize /usr/local/bin/
root123@rack1-master34:~$ ls /usr/local/bin/
containerd       containerd-shim-runc-v1  containerd-stress  ctr   etcdctl     etcd-scripts  helm                kubeadm  kubectl-ko  kubernetes-scripts  nerdctl  runc
containerd-shim  containerd-shim-runc-v2  crictl             etcd  etcdctl.sh  etcdutl       k8s-certs-renew.sh  kubectl  kubelet     kustomize           netaddr  virtctl

root123@rack1-master34:~/external-snapshotter-8.1.0$ kubectl kustomize client/config/crd | kubectl create -f -
customresourcedefinition.apiextensions.k8s.io/volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumegroupsnapshots.groupsnapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotclasses.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshotcontents.snapshot.storage.k8s.io created
customresourcedefinition.apiextensions.k8s.io/volumesnapshots.snapshot.storage.k8s.io created
root123@rack1-master34:~/external-snapshotter-8.1.0$
root123@rack1-master34:~/external-snapshotter-8.1.0$ kubectl -n kube-system kustomize deploy/kubernetes/snapshot-controller | kubectl create -f -
serviceaccount/snapshot-controller created
role.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
clusterrole.rbac.authorization.k8s.io/snapshot-controller-runner created
rolebinding.rbac.authorization.k8s.io/snapshot-controller-leaderelection created
clusterrolebinding.rbac.authorization.k8s.io/snapshot-controller-role created
deployment.apps/snapshot-controller created
root123@rack1-master34:~/external-snapshotter-8.1.0$
root123@rack1-master34:~/external-snapshotter-8.1.0$ kubectl kustomize deploy/kubernetes/csi-snapshotter | kubectl create -f -
serviceaccount/csi-provisioner created
serviceaccount/csi-snapshotter created
role.rbac.authorization.k8s.io/external-provisioner-cfg created
role.rbac.authorization.k8s.io/external-snapshotter-leaderelection created
clusterrole.rbac.authorization.k8s.io/external-provisioner-runner created
clusterrole.rbac.authorization.k8s.io/external-snapshotter-runner created
rolebinding.rbac.authorization.k8s.io/csi-provisioner-role-cfg created
rolebinding.rbac.authorization.k8s.io/csi-snapshotter-provisioner-role-cfg created
rolebinding.rbac.authorization.k8s.io/external-snapshotter-leaderelection created
clusterrolebinding.rbac.authorization.k8s.io/csi-provisioner-role created
clusterrolebinding.rbac.authorization.k8s.io/csi-snapshotter-provisioner-role created
clusterrolebinding.rbac.authorization.k8s.io/csi-snapshotter-role created
service/csi-snapshotter created
statefulset.apps/csi-snapshotter created

安装RBD Snapshot存储类

root123@rack1-master34:~/rook-1.15.0$ kubectl create -f deploy/examples/csi/rbd/snapshotclass.yaml
volumesnapshotclass.snapshot.storage.k8s.io/csi-rbdplugin-snapclass created
root123@rack1-master34:~/rook-1.15.0$ kubectl get sc
NAME                        PROVISIONER                  RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
rook-ceph-block (default)   rook-ceph.rbd.csi.ceph.com   Delete          Immediate           true                   36d
root123@rack1-master34:~/rook-1.15.0$ kubectl get volumesnapshotclass
NAME                      DRIVER                       DELETIONPOLICY   AGE
csi-rbdplugin-snapclass   rook-ceph.rbd.csi.ceph.com   Delete           38s
root123@rack1-master34:~/rook-1.15.0$

创建KubeVirt虚拟机快照

root123@rack1-master34:~/beijing-tyht$ cat bj-cloud05-snapshot.yaml
apiVersion: snapshot.kubevirt.io/v1beta1
kind: VirtualMachineSnapshot
metadata:
  name: snap-bj-cloud05
spec:
  source:
    apiGroup: kubevirt.io
    kind: VirtualMachine
    name: bj-cloud05

root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing apply -f bj-cloud05-snapshot.yaml
virtualmachinesnapshot.snapshot.kubevirt.io/snap-bj-cloud05 created

root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get volumesnapshot
NAME                                                              READYTOUSE   SOURCEPVC        SOURCESNAPSHOTCONTENT   RESTORESIZE   SNAPSHOTCLASS             SNAPSHOTCONTENT                                    CREATIONTIME   AGE
vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-datadisk   true         bj-cloud05-data                                         csi-rbdplugin-snapclass   snapcontent-5dbd28c3-d754-4782-9dfb-059c1a61459e   18s            19s
vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk     true         bj-cloud05-os                                           csi-rbdplugin-snapclass   snapcontent-207ec89d-6181-4d9b-ace4-1e16803d8f67   18s            19s


root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing describe volumesnapshot vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk
Name:         vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk
Namespace:    beijing
Labels:       snapshot.kubevirt.io/source-vm-name=bj-cloud05
              snapshot.kubevirt.io/source-vm-namespace=beijing
Annotations:  <none>
API Version:  snapshot.storage.k8s.io/v1
Kind:         VolumeSnapshot
Metadata:
  Creation Timestamp:  2024-10-25T06:22:43Z
  Finalizers:
    snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
    snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
  Generation:  1
  Owner References:
    API Version:           snapshot.kubevirt.io/v1beta1
    Block Owner Deletion:  true
    Controller:            true
    Kind:                  VirtualMachineSnapshotContent
    Name:                  vmsnapshot-content-432493df-9401-4e95-9354-d41ae9f32698
    UID:                   784d3788-9686-4c06-8bd0-d6d15e8709a8
  Resource Version:        32872933
  UID:                     207ec89d-6181-4d9b-ace4-1e16803d8f67
Spec:
  Source:
    Persistent Volume Claim Name:  bj-cloud05-os
  Volume Snapshot Class Name:      csi-rbdplugin-snapclass
Status:
  Bound Volume Snapshot Content Name:  snapcontent-207ec89d-6181-4d9b-ace4-1e16803d8f67
  Creation Time:                       2024-10-25T06:22:44Z
  Ready To Use:                        true
Events:
  Type    Reason            Age   From                 Message
  ----    ------            ----  ----                 -------
  Normal  CreatingSnapshot  62s   snapshot-controller  Waiting for a snapshot beijing/vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk to be created by the CSI driver.
  Normal  SnapshotCreated   59s   snapshot-controller  Snapshot beijing/vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk was successfully created by the CSI driver.
  Normal  SnapshotReady     59s   snapshot-controller  Snapshot beijing/vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk is ready to use.


root123@rack1-master34:~$ kubectl -n rook-ceph exec -it rook-ceph-tools-5fc85f966c-qnlkn -- /bin/bash
bash-5.1$ rados lspools
.mgr
replicapool
bash-5.1$ rbd ls -p replicapool
csi-snap-0a8b0f3d-a31b-492a-8b27-3b47783b0f1a
csi-snap-7c996347-53af-411f-b522-0a69bfc49ca5
csi-vol-02590aa0-1dbb-4076-9591-034d0c4d8728
csi-vol-04c2fbee-1fcf-4efd-b2de-b0a71276ba23
csi-vol-107b34f7-4a24-4ffa-9a0f-eb333e9a7657
csi-vol-198b056b-7586-4c4e-ace5-e80578b6ffd5
csi-vol-363dad7f-ea50-4c79-a9a3-3a6930fd0e25
csi-vol-5e51a9b3-4d84-44e6-8af6-7f0fb7272441
csi-vol-66b761b5-6526-4fe8-b83b-0f75c9f58e16
csi-vol-829274c6-6859-4049-8fa0-1fca2d295e42
csi-vol-863f9ee9-96a3-4d4a-9d77-1bfe789082fa
csi-vol-9605f49a-e195-4337-8034-bd52adbcdbf4
csi-vol-9cac1eaa-cbfb-492a-96dc-e8357d32f1f6
csi-vol-9e50f259-7f66-4072-9553-d23e22e8ab41
csi-vol-9fa7e0dc-cacd-439e-a65a-f87167893877
csi-vol-a01ad3c6-ac49-438b-b6f9-839d60387347
csi-vol-b0fb55fd-1d40-4666-b094-b2e7f6748bf7
csi-vol-bee03269-8694-4cc2-a238-ccbddcc278ea
csi-vol-ce2c449a-199a-43a8-b807-488691ad139c
csi-vol-d00ae3e8-dad9-4ea1-ac71-d7ac7de05fcc
csi-vol-d4cc5fc1-7c39-4754-be69-641099a25848
csi-vol-fc7c2988-65f3-4e90-982c-66b5d4859d47
bash-5.1$ rbd info replicapool/csi-snap-0a8b0f3d-a31b-492a-8b27-3b47783b0f1a
rbd image 'csi-snap-0a8b0f3d-a31b-492a-8b27-3b47783b0f1a':
        size 200 GiB in 51200 objects
        order 22 (4 MiB objects)
        snapshot_count: 1
        id: afcacc9dc903a
        block_name_prefix: rbd_data.afcacc9dc903a
        format: 2
        features: layering, deep-flatten, operations
        op_features: clone-child
        flags:
        create_timestamp: Fri Oct 25 06:22:44 2024
        access_timestamp: Fri Oct 25 06:22:44 2024
        modify_timestamp: Fri Oct 25 06:22:44 2024
        parent: replicapool/csi-vol-b0fb55fd-1d40-4666-b094-b2e7f6748bf7@8de9e2eb-4d3f-4576-9e16-fa477400d552
        overlap: 200 GiB
bash-5.1$
exit

从快照恢复虚拟机

root123@rack1-master34:~/beijing-tyht$ kubectl get volumesnapshot -n beijing
NAME                                                              READYTOUSE   SOURCEPVC           SOURCESNAPSHOTCONTENT   RESTORESIZE   SNAPSHOTCLASS             SNAPSHOTCONTENT                                    CREATIONTIME   AGE
vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-datadisk   true         bj-cloud05-data                                            csi-rbdplugin-snapclass   snapcontent-5dbd28c3-d754-4782-9dfb-059c1a61459e   12m            12m
vmsnapshot-432493df-9401-4e95-9354-d41ae9f32698-volume-osdisk     true         bj-cloud05-os                                              csi-rbdplugin-snapclass   snapcontent-207ec89d-6181-4d9b-ace4-1e16803d8f67   12m            12m
vmsnapshot-495e6ec3-acad-404b-a20f-663282c526af-volume-osdisk     true         vm-w2k12-clone-os                                         csi-rbdplugin-snapclass   snapcontent-eda4ef4e-c1c3-4bc7-b69f-3423c0ef20fd   26s            27s

root123@rack1-master34:~/beijing-tyht$ kubectl get virtualmachinesnapshot -n beijing
NAME                           SOURCEKIND       SOURCENAME       PHASE       READYTOUSE   CREATIONTIME   ERROR
snap-bj-cloud05                 VirtualMachine   bj-cloud05        Succeeded   true         12m
snap-vm-w2k12-clone-20241025   VirtualMachine   vm-w2k12-clone   Succeeded   true         76s


root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get pods -o wide
NAME                                 READY   STATUS    RESTARTS   AGE     IP            NODE              NOMINATED NODE   READINESS GATES
virt-launcher-bj-cloud05-wkmbq        2/2     Running   0          2d2h    10.11.1.146   rack1-worker40    <none>           1/1
virt-launcher-bj-cloud06-9ggrs        2/2     Running   0          2d      10.11.1.130   rack1-worker36    <none>           1/1
virt-launcher-bj-cloud07-wnw6n        2/2     Running   0          2d2h    10.11.1.131   rack2-worker92    <none>           1/1
virt-launcher-bj-cloud08-hhjzj        2/2     Running   0          2d      10.11.1.139   rack2-worker93    <none>           1/1
virt-launcher-bj-ubuntu-98tx9        2/2     Running   0          6d23h   10.11.1.115   rack3-worker127   <none>           1/1
virt-launcher-cloudw2k12-01-ktqsk    3/3     Running   0          3d4h    10.11.1.141   rack2-worker89    <none>           1/1
virt-launcher-cloudw2k12-02-pj2r2    3/3     Running   0          2d3h    10.11.1.143   rack3-worker124   <none>           1/1
virt-launcher-vm-w2k12-clone-xjnps   2/2     Running   0          46h     10.11.1.106   rack1-worker39    <none>           1/1
root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get vms
NAME             AGE    STATUS    READY
bj-test01        35d    Stopped   False
bj-cloud05        2d2h   Running   True
bj-cloud06        2d2h   Running   True
bj-cloud07        2d2h   Running   True
bj-cloud08        3d5h   Running   True
bj-ubuntu        34d    Running   True
cloudw2k12-01    3d4h   Running   True
cloudw2k12-02    3d4h   Running   True
vm-w2k12-clone   46h    Running   True
root123@rack1-master34:~/beijing-tyht$ sudo virtctl -n beijing stop vm-w2k12-clone
VM vm-w2k12-clone was scheduled to stop
root123@rack1-master34:~/beijing-tyht$



root123@rack1-master34:~/beijing-tyht$ cat vm-w2k12-clone-restore.yaml
apiVersion: snapshot.kubevirt.io/v1alpha1
kind: VirtualMachineRestore
metadata:
  name: restore-vm-w2k12-clone
  namespace: beijing
spec:
  target:
    apiGroup: kubevirt.io
    kind: VirtualMachine
    name: vm-w2k12-clone
  virtualMachineSnapshotName: snap-vm-w2k12-clone-20241025
root123@rack1-master34:~/beijing-tyht$
root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get virtualmachinerestore
NAME                     TARGETKIND       TARGETNAME       COMPLETE   RESTORETIME   ERROR
restore-vm-w2k12-clone   VirtualMachine   vm-w2k12-clone   true       39s
root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing describe virtualmachinerestore
Name:         restore-vm-w2k12-clone
Namespace:    beijing
Labels:       <none>
Annotations:  <none>
API Version:  snapshot.kubevirt.io/v1beta1
Kind:         VirtualMachineRestore
Metadata:
  Creation Timestamp:  2024-10-25T06:44:03Z
  Generation:          8
  Owner References:
    API Version:           kubevirt.io/v1
    Block Owner Deletion:  true
    Controller:            true
    Kind:                  VirtualMachine
    Name:                  vm-w2k12-clone
    UID:                   ed8d827d-669d-4cf9-9f2e-e3a562731084
  Resource Version:        32886085
  UID:                     f9edcc02-84a1-40fb-a2b4-44ce56ba42fe
Spec:
  Target:
    API Group:                    kubevirt.io
    Kind:                         VirtualMachine
    Name:                         vm-w2k12-clone
  Virtual Machine Snapshot Name:  snap-vm-w2k12-clone-20241025
Status:
  Complete:  true
  Conditions:
    Last Probe Time:       <nil>
    Last Transition Time:  2024-10-25T06:44:04Z
    Reason:                Operation complete
    Status:                False
    Type:                  Progressing
    Last Probe Time:       <nil>
    Last Transition Time:  2024-10-25T06:44:04Z
    Reason:                Operation complete
    Status:                True
    Type:                  Ready
  Deleted Data Volumes:
    vm-w2k12-clone-os
  Restore Time:  2024-10-25T06:44:04Z
  Restores:
    Data Volume Name:         restore-f9edcc02-84a1-40fb-a2b4-44ce56ba42fe-osdisk
    Persistent Volume Claim:  restore-f9edcc02-84a1-40fb-a2b4-44ce56ba42fe-osdisk
    Volume Name:              osdisk
    Volume Snapshot Name:     vmsnapshot-495e6ec3-acad-404b-a20f-663282c526af-volume-osdisk
Events:
  Type     Reason                         Age   From                Message
  ----     ------                         ----  ----                -------
  Warning  VirtualMachineRestoreError     47s   restore-controller  VirtualMachineRestore encountered error Operation cannot be fulfilled on virtualmachines.kubevirt.io "vm-w2k12-clone": the object has been modified; please apply your changes to the latest version and try again
  Warning  VirtualMachineRestoreError     47s   restore-controller  VirtualMachineRestore encountered error Failed to create restore DataVolume: datavolumes.cdi.kubevirt.io "restore-f9edcc02-84a1-40fb-a2b4-44ce56ba42fe-osdisk" already exists
  Normal   VirtualMachineRestoreComplete  47s   restore-controller  Successfully completed VirtualMachineRestore restore-vm-w2k12-clone


root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get pvc
NAME                                                  STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS      VOLUMEATTRIBUTESCLASS   AGE
bj-test01-os                                          Bound    pvc-e90b1fb3-983c-4761-96d3-93549b40c14a   187Gi      RWO            rook-ceph-block   <unset>                 35d
bj-cloud05-data                                        Bound    pvc-2ec6a99b-8cc3-4f42-b910-3cc6b911bc6a   200Gi      RWO            rook-ceph-block   <unset>                 3d22h
bj-cloud05-os                                          Bound    pvc-64034956-06d0-498d-bb24-82f79617772b   40Gi       RWO            rook-ceph-block   <unset>                 2d2h
bj-cloud06-data                                        Bound    pvc-9a23fedc-05f6-404b-aafb-440e429497fc   200Gi      RWO            rook-ceph-block   <unset>                 3d21h
bj-cloud06-os                                          Bound    pvc-62a4ebfa-973c-490d-8ca9-e3a96cfb1237   40Gi       RWO            rook-ceph-block   <unset>                 2d2h
bj-cloud07-data                                        Bound    pvc-0aeb8df6-4993-411e-b44e-7812c759fc57   200Gi      RWO            rook-ceph-block   <unset>                 3d21h
bj-cloud07-os                                          Bound    pvc-2575bf83-c924-4b53-98ce-38ea592df0a6   40Gi       RWO            rook-ceph-block   <unset>                 2d2h
bj-cloud08-data                                        Bound    pvc-41ea765f-4627-4ac0-8155-a48cbbabd89e   200Gi      RWO            rook-ceph-block   <unset>                 3d21h
bj-cloud08-os                                          Bound    pvc-9728b2e5-45df-4eb6-8366-e83fd4e167ae   40Gi       RWO            rook-ceph-block   <unset>                 2d3h
bj-ubuntu-os                                          Bound    pvc-db7ee2d1-91e7-45c1-a477-6ae598e92734   187Gi      RWO            rook-ceph-block   <unset>                 34d
iso-centos77                                          Bound    pvc-e2f00a7b-0b91-4ead-84be-da36ffa1651a   5Gi        RWO            rook-ceph-block   <unset>                 3d5h
iso-win2k12                                           Bound    pvc-6a31f599-0277-4ac1-b787-fff91c2b3173   5Gi        RWO            rook-ceph-block   <unset>                 3d4h
iso-win2k12-02                                        Bound    pvc-22075538-1dca-4661-9db0-effed89b9c02   5Gi        RWO            rook-ceph-block   <unset>                 3d4h
restore-f9edcc02-84a1-40fb-a2b4-44ce56ba42fe-osdisk   Bound    pvc-8c7c8500-b824-4053-9801-a788634d0e47   40Gi       RWO            rook-ceph-block   <unset>                 71s
cloudw2k12-01-data                                    Bound    pvc-b9fd9ed7-ce73-4ec2-b40c-0596633e88c7   2Ti        RWO            rook-ceph-block   <unset>                 3d22h
cloudw2k12-01-os                                      Bound    pvc-e7f60bf1-cfe3-4630-b5dc-b1fd28777b6b   40Gi       RWO            rook-ceph-block   <unset>                 3d4h
cloudw2k12-02-data                                    Bound    pvc-4404ddc1-7f2b-4627-b07c-0adf474f1a5d   200Gi      RWO            rook-ceph-block   <unset>                 3d2h
cloudw2k12-02-os                                      Bound    pvc-c1acb3e5-63cb-437f-808d-5f8511496bf5   40Gi       RWO            rook-ceph-block   <unset>                 3d4h
root123@rack1-master34:~/beijing-tyht$
root123@rack1-master34:~/beijing-tyht$
root123@rack1-master34:~/beijing-tyht$
root123@rack1-master34:~/beijing-tyht$ sudo virtctl -n beijing start vm-w2k12-clone
VM vm-w2k12-clone was scheduled to start

root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get vms
NAME             AGE    STATUS    READY
bj-test01        35d    Stopped   False
bj-cloud05        2d2h   Running   True
bj-cloud06        2d2h   Running   True
bj-cloud07        2d2h   Running   True
bj-cloud08        3d5h   Running   True
bj-ubuntu        34d    Running   True
cloudw2k12-01    3d4h   Running   True
cloudw2k12-02    3d4h   Running   True
vm-w2k12-clone   46h    Running   True
root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get pods
NAME                                 READY   STATUS    RESTARTS   AGE
virt-launcher-bj-cloud05-wkmbq        2/2     Running   0          2d2h
virt-launcher-bj-cloud06-9ggrs        2/2     Running   0          2d
virt-launcher-bj-cloud07-wnw6n        2/2     Running   0          2d2h
virt-launcher-bj-cloud08-hhjzj        2/2     Running   0          2d
virt-launcher-bj-ubuntu-98tx9        2/2     Running   0          6d23h
virt-launcher-cloudw2k12-01-ktqsk    3/3     Running   0          3d4h
virt-launcher-cloudw2k12-02-pj2r2    3/3     Running   0          2d3h
virt-launcher-vm-w2k12-clone-s8b7j   2/2     Running   0          47s
root123@rack1-master34:~/beijing-tyht$ kubectl -n beijing get pods -o wide
NAME                                 READY   STATUS    RESTARTS   AGE     IP            NODE              NOMINATED NODE   READINESS GATES
virt-launcher-bj-cloud05-wkmbq        2/2     Running   0          2d2h    10.11.1.146   rack1-worker40    <none>           1/1
virt-launcher-bj-cloud06-9ggrs        2/2     Running   0          2d      10.11.1.130   rack1-worker36    <none>           1/1
virt-launcher-bj-cloud07-wnw6n        2/2     Running   0          2d2h    10.11.1.131   rack2-worker92    <none>           1/1
virt-launcher-bj-cloud08-hhjzj        2/2     Running   0          2d      10.11.1.139   rack2-worker93    <none>           1/1
virt-launcher-bj-ubuntu-98tx9        2/2     Running   0          6d23h   10.11.1.115   rack3-worker127   <none>           1/1
virt-launcher-cloudw2k12-01-ktqsk    3/3     Running   0          3d4h    10.11.1.141   rack2-worker89    <none>           1/1
virt-launcher-cloudw2k12-02-pj2r2    3/3     Running   0          2d3h    10.11.1.143   rack3-worker124   <none>           1/1
virt-launcher-vm-w2k12-clone-s8b7j   2/2     Running   0          112s    10.11.1.106   rack1-worker39    <none>           1/1