Skip to content

Instantly share code, notes, and snippets.

@willzhang
Created March 13, 2022 01:46
Show Gist options
  • Save willzhang/e67513c3d8070b99bf1f38601699d3d4 to your computer and use it in GitHub Desktop.
Save willzhang/e67513c3d8070b99bf1f38601699d3d4 to your computer and use it in GitHub Desktop.
Rook-Log-Collection.log
This file has been truncated, but you can view the full file.
Last login: Sun Mar 13 09:02:20 2022 from 113.118.79.224
Authorized users only. All activity may be monitored and reported
[root@Ops-repo ~]# for p in $(kubectl -n rook-ceph get pods -o jsonpath='{.items[*].metadata.name}')
> do
> for c in $(kubectl -n rook-ceph get pod ${p} -o jsonpath='{.spec.containers[*].name}')
> do
> echo "BEGIN logs from pod: ${p} ${c}"
> kubectl -n rook-ceph logs -c ${c} ${p}
> echo "END logs from pod: ${p} ${c}"
> done
> done
BEGIN logs from pod: csi-cephfsplugin-9s5tz driver-registrar
I0307 07:08:22.958715 1 main.go:166] Version: v2.5.0
I0307 07:08:22.958747 1 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:23.963966 1 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:23.964108 1 node_register.go:62] Registration Server started at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:23.964248 1 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:24.522245 1 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:24.522491 1 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.cephfs.csi.ceph.com/registration"
I0307 07:08:28.410978 1 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
END logs from pod: csi-cephfsplugin-9s5tz driver-registrar
BEGIN logs from pod: csi-cephfsplugin-9s5tz csi-cephfsplugin
END logs from pod: csi-cephfsplugin-9s5tz csi-cephfsplugin
BEGIN logs from pod: csi-cephfsplugin-9s5tz liveness-prometheus
END logs from pod: csi-cephfsplugin-9s5tz liveness-prometheus
BEGIN logs from pod: csi-cephfsplugin-j88wj driver-registrar
I0307 07:08:18.564711 1 main.go:166] Version: v2.5.0
I0307 07:08:18.564777 1 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:21.840888 1 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:21.865930 1 node_register.go:62] Registration Server started at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:21.866463 1 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:22.883311 1 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:22.883817 1 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.cephfs.csi.ceph.com/registration"
I0307 07:08:28.652612 1 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
E0308 14:38:52.811807 1 connection.go:132] Lost connection to unix:///csi/csi.sock.
END logs from pod: csi-cephfsplugin-j88wj driver-registrar
BEGIN logs from pod: csi-cephfsplugin-j88wj csi-cephfsplugin
END logs from pod: csi-cephfsplugin-j88wj csi-cephfsplugin
BEGIN logs from pod: csi-cephfsplugin-j88wj liveness-prometheus
END logs from pod: csi-cephfsplugin-j88wj liveness-prometheus
BEGIN logs from pod: csi-cephfsplugin-jhqcz driver-registrar
I0307 07:08:30.132627 1 main.go:166] Version: v2.5.0
I0307 07:08:30.132665 1 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:30.136389 1 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:30.136512 1 node_register.go:62] Registration Server started at: /registration/rook-ceph.cephfs.csi.ceph.com-reg.sock
I0307 07:08:30.136590 1 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:32.836021 1 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:32.836262 1 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.cephfs.csi.ceph.com/registration"
I0307 07:08:34.834806 1 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
END logs from pod: csi-cephfsplugin-jhqcz driver-registrar
BEGIN logs from pod: csi-cephfsplugin-jhqcz csi-cephfsplugin
END logs from pod: csi-cephfsplugin-jhqcz csi-cephfsplugin
BEGIN logs from pod: csi-cephfsplugin-jhqcz liveness-prometheus
END logs from pod: csi-cephfsplugin-jhqcz liveness-prometheus
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-attacher
I0312 18:01:18.732986 1 main.go:92] Version: v3.4.0
I0312 18:01:18.750353 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:18.756442 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-attacher-leader-rook-ceph-cephfs-csi-ceph-com...
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-attacher
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-snapshotter
I0310 20:00:41.875302 1 main.go:104] Version: v5.0.0-rc3-webhook
I0310 20:00:42.036711 1 common.go:111] Probing CSI driver for readiness
I0310 20:00:42.044250 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-snapshotter-leader-rook-ceph-cephfs-csi-ceph-com...
I0312 18:00:57.009493 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-snapshotter-leader-rook-ceph-cephfs-csi-ceph-com
I0312 18:00:57.025979 1 snapshot_controller_base.go:115] Starting CSI snapshotter
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-snapshotter
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-resizer
I0312 18:01:22.521753 1 main.go:93] Version : v1.4.0
I0312 18:01:22.785240 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:22.808661 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-resizer-rook-ceph-cephfs-csi-ceph-com...
I0312 18:01:22.984569 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-resizer-rook-ceph-cephfs-csi-ceph-com
I0312 18:01:22.985176 1 controller.go:255] Starting external resizer rook-ceph.cephfs.csi.ceph.com
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-resizer
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-provisioner
I0312 18:01:23.941799 1 csi-provisioner.go:139] Version: v3.1.0
I0312 18:01:23.941868 1 csi-provisioner.go:162] Building kube configs for running in cluster...
I0312 18:01:23.961324 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:23.965230 1 csi-provisioner.go:278] CSI driver does not support PUBLISH_UNPUBLISH_VOLUME, not watching VolumeAttachments
I0312 18:01:23.965874 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/rook-ceph-cephfs-csi-ceph-com...
I0312 18:01:24.247931 1 leaderelection.go:258] successfully acquired lease rook-ceph/rook-ceph-cephfs-csi-ceph-com
I0312 18:01:24.349291 1 controller.go:811] Starting provisioner controller rook-ceph.cephfs.csi.ceph.com_csi-cephfsplugin-provisioner-6f54f6c477-blcxw_f953e454-d056-4fd9-8b05-5cd315b37f68!
I0312 18:01:24.349343 1 volume_store.go:97] Starting save volume queue
I0312 18:01:24.349306 1 clone_controller.go:66] Starting CloningProtection controller
I0312 18:01:24.349501 1 clone_controller.go:82] Started CloningProtection controller
I0312 18:01:24.450118 1 controller.go:860] Started provisioner controller rook-ceph.cephfs.csi.ceph.com_csi-cephfsplugin-provisioner-6f54f6c477-blcxw_f953e454-d056-4fd9-8b05-5cd315b37f68!
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-provisioner
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-cephfsplugin
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw csi-cephfsplugin
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw liveness-prometheus
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-blcxw liveness-prometheus
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-attacher
I0310 20:00:52.130694 1 main.go:92] Version: v3.4.0
I0310 20:00:52.313917 1 common.go:111] Probing CSI driver for readiness
I0310 20:00:52.360525 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-attacher-leader-rook-ceph-cephfs-csi-ceph-com...
I0312 18:01:02.898225 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-attacher-leader-rook-ceph-cephfs-csi-ceph-com
I0312 18:01:03.483353 1 controller.go:128] Starting CSI attacher
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-attacher
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-snapshotter
I0312 18:01:02.045611 1 main.go:104] Version: v5.0.0-rc3-webhook
I0312 18:01:02.342741 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:02.364325 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-snapshotter-leader-rook-ceph-cephfs-csi-ceph-com...
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-snapshotter
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-resizer
I0310 20:00:54.946653 1 main.go:93] Version : v1.4.0
I0310 20:00:55.020885 1 common.go:111] Probing CSI driver for readiness
I0310 20:00:55.075925 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-resizer-rook-ceph-cephfs-csi-ceph-com...
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-resizer
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-provisioner
I0311 16:00:54.771720 1 csi-provisioner.go:139] Version: v3.1.0
I0311 16:00:54.771774 1 csi-provisioner.go:162] Building kube configs for running in cluster...
I0311 16:00:54.897888 1 common.go:111] Probing CSI driver for readiness
I0311 16:00:54.926823 1 csi-provisioner.go:278] CSI driver does not support PUBLISH_UNPUBLISH_VOLUME, not watching VolumeAttachments
I0311 16:00:54.936684 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/rook-ceph-cephfs-csi-ceph-com...
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-provisioner
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-cephfsplugin
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw csi-cephfsplugin
BEGIN logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw liveness-prometheus
END logs from pod: csi-cephfsplugin-provisioner-6f54f6c477-g49tw liveness-prometheus
BEGIN logs from pod: csi-rbdplugin-jjhmz driver-registrar
I0307 07:08:33.176432 27582 main.go:166] Version: v2.5.0
I0307 07:08:33.176467 27582 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:33.178931 27582 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:33.179058 27582 node_register.go:62] Registration Server started at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:33.179144 27582 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:37.839495 27582 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:37.846324 27582 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.rbd.csi.ceph.com/registration"
I0307 07:08:38.185740 27582 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
END logs from pod: csi-rbdplugin-jjhmz driver-registrar
BEGIN logs from pod: csi-rbdplugin-jjhmz csi-rbdplugin
E0307 07:08:14.994665 26692 rbd_healer.go:141] list volumeAttachments failed, err: Get "https://10.233.0.1:443/apis/storage.k8s.io/v1/volumeattachments": dial tcp 10.233.0.1:443: connect: connection refused
E0307 07:08:14.994744 26692 driver.go:196] healer had failures, err Get "https://10.233.0.1:443/apis/storage.k8s.io/v1/volumeattachments": dial tcp 10.233.0.1:443: connect: connection refused
END logs from pod: csi-rbdplugin-jjhmz csi-rbdplugin
BEGIN logs from pod: csi-rbdplugin-jjhmz liveness-prometheus
END logs from pod: csi-rbdplugin-jjhmz liveness-prometheus
BEGIN logs from pod: csi-rbdplugin-nt6sn driver-registrar
I0307 07:08:16.849170 65088 main.go:166] Version: v2.5.0
I0307 07:08:16.849220 65088 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:20.050008 65088 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:20.050181 65088 node_register.go:62] Registration Server started at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:20.051100 65088 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:20.856343 65088 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:20.857318 65088 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.rbd.csi.ceph.com/registration"
I0307 07:08:28.415443 65088 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
E0308 14:38:52.824155 65088 connection.go:132] Lost connection to unix:///csi/csi.sock.
END logs from pod: csi-rbdplugin-nt6sn driver-registrar
BEGIN logs from pod: csi-rbdplugin-nt6sn csi-rbdplugin
END logs from pod: csi-rbdplugin-nt6sn csi-rbdplugin
BEGIN logs from pod: csi-rbdplugin-nt6sn liveness-prometheus
END logs from pod: csi-rbdplugin-nt6sn liveness-prometheus
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-provisioner
I0307 07:09:07.443025 1 csi-provisioner.go:139] Version: v3.1.0
I0307 07:09:07.443086 1 csi-provisioner.go:162] Building kube configs for running in cluster...
I0307 07:09:10.662483 1 common.go:111] Probing CSI driver for readiness
I0307 07:09:10.664929 1 csi-provisioner.go:278] CSI driver does not support PUBLISH_UNPUBLISH_VOLUME, not watching VolumeAttachments
I0307 07:09:10.667596 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/rook-ceph-rbd-csi-ceph-com...
I0312 18:01:18.280342 1 leaderelection.go:258] successfully acquired lease rook-ceph/rook-ceph-rbd-csi-ceph-com
I0312 18:01:18.547188 1 volume_store.go:97] Starting save volume queue
I0312 18:01:18.549752 1 clone_controller.go:66] Starting CloningProtection controller
I0312 18:01:18.549831 1 clone_controller.go:82] Started CloningProtection controller
I0312 18:01:18.550963 1 controller.go:811] Starting provisioner controller rook-ceph.rbd.csi.ceph.com_csi-rbdplugin-provisioner-6d765b47d5-hslsf_b67855fd-ffbd-4152-a24c-fdf7135d51b0!
I0312 18:01:18.651546 1 controller.go:860] Started provisioner controller rook-ceph.rbd.csi.ceph.com_csi-rbdplugin-provisioner-6d765b47d5-hslsf_b67855fd-ffbd-4152-a24c-fdf7135d51b0!
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-provisioner
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-resizer
I0308 18:01:04.401533 1 main.go:93] Version : v1.4.0
I0308 18:01:04.473551 1 common.go:111] Probing CSI driver for readiness
I0308 18:01:05.002382 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-resizer-rook-ceph-rbd-csi-ceph-com...
I0312 18:01:08.597884 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-resizer-rook-ceph-rbd-csi-ceph-com
I0312 18:01:08.638100 1 controller.go:255] Starting external resizer rook-ceph.rbd.csi.ceph.com
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-resizer
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-attacher
I0312 18:01:05.420188 1 main.go:92] Version: v3.4.0
I0312 18:01:05.521242 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:05.584657 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-attacher-leader-rook-ceph-rbd-csi-ceph-com...
I0312 18:01:06.684482 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-attacher-leader-rook-ceph-rbd-csi-ceph-com
I0312 18:01:06.684645 1 controller.go:128] Starting CSI attacher
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-attacher
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-snapshotter
I0312 18:01:14.141805 1 main.go:104] Version: v5.0.0-rc3-webhook
I0312 18:01:14.144998 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:14.146705 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-snapshotter-leader-rook-ceph-rbd-csi-ceph-com...
I0312 18:01:14.699206 1 leaderelection.go:258] successfully acquired lease rook-ceph/external-snapshotter-leader-rook-ceph-rbd-csi-ceph-com
I0312 18:01:14.700580 1 snapshot_controller_base.go:115] Starting CSI snapshotter
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-snapshotter
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-rbdplugin
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf csi-rbdplugin
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf liveness-prometheus
E0311 15:00:14.267396 1 liveness.go:47] health check failed: rpc error: code = DeadlineExceeded desc = context deadline exceeded
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-hslsf liveness-prometheus
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-provisioner
I0312 18:01:19.486431 1 csi-provisioner.go:139] Version: v3.1.0
I0312 18:01:19.493906 1 csi-provisioner.go:162] Building kube configs for running in cluster...
I0312 18:01:19.517529 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:19.541255 1 csi-provisioner.go:278] CSI driver does not support PUBLISH_UNPUBLISH_VOLUME, not watching VolumeAttachments
I0312 18:01:19.542739 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/rook-ceph-rbd-csi-ceph-com...
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-provisioner
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-resizer
I0312 18:01:22.521157 1 main.go:93] Version : v1.4.0
I0312 18:01:22.785445 1 common.go:111] Probing CSI driver for readiness
I0312 18:01:22.810998 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-resizer-rook-ceph-rbd-csi-ceph-com...
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-resizer
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-attacher
I0310 20:00:48.459680 1 main.go:92] Version: v3.4.0
I0310 20:00:48.709482 1 common.go:111] Probing CSI driver for readiness
I0310 20:00:48.728309 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-attacher-leader-rook-ceph-rbd-csi-ceph-com...
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-attacher
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-snapshotter
I0311 16:00:49.947899 1 main.go:104] Version: v5.0.0-rc3-webhook
I0311 16:00:49.989072 1 common.go:111] Probing CSI driver for readiness
I0311 16:00:49.991388 1 leaderelection.go:248] attempting to acquire leader lease rook-ceph/external-snapshotter-leader-rook-ceph-rbd-csi-ceph-com...
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-snapshotter
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-rbdplugin
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp csi-rbdplugin
BEGIN logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp liveness-prometheus
END logs from pod: csi-rbdplugin-provisioner-6d765b47d5-vjwcp liveness-prometheus
BEGIN logs from pod: csi-rbdplugin-zzm7m driver-registrar
I0307 07:08:17.583384 51937 main.go:166] Version: v2.5.0
I0307 07:08:17.583419 51937 main.go:167] Running node-driver-registrar in mode=registration
I0307 07:08:19.613138 51937 node_register.go:53] Starting Registration Server at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:19.613251 51937 node_register.go:62] Registration Server started at: /registration/rook-ceph.rbd.csi.ceph.com-reg.sock
I0307 07:08:19.613396 51937 node_register.go:92] Skipping HTTP server because endpoint is set to: ""
I0307 07:08:20.518637 51937 main.go:102] Received GetInfo call: &InfoRequest{}
I0307 07:08:20.518864 51937 main.go:109] "Kubelet registration probe created" path="/var/lib/kubelet/plugins/rook-ceph.rbd.csi.ceph.com/registration"
I0307 07:08:28.399857 51937 main.go:120] Received NotifyRegistrationStatus call: &RegistrationStatus{PluginRegistered:true,Error:,}
END logs from pod: csi-rbdplugin-zzm7m driver-registrar
BEGIN logs from pod: csi-rbdplugin-zzm7m csi-rbdplugin
END logs from pod: csi-rbdplugin-zzm7m csi-rbdplugin
BEGIN logs from pod: csi-rbdplugin-zzm7m liveness-prometheus
END logs from pod: csi-rbdplugin-zzm7m liveness-prometheus
BEGIN logs from pod: rook-ceph-crashcollector-node1-54b78567c5-fhj2p ceph-crash
INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
END logs from pod: rook-ceph-crashcollector-node1-54b78567c5-fhj2p ceph-crash
BEGIN logs from pod: rook-ceph-crashcollector-node2-69d4577554-dmwhw ceph-crash
INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
END logs from pod: rook-ceph-crashcollector-node2-69d4577554-dmwhw ceph-crash
BEGIN logs from pod: rook-ceph-crashcollector-node3-f4758c5df-7wxgj ceph-crash
INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
END logs from pod: rook-ceph-crashcollector-node3-f4758c5df-7wxgj ceph-crash
BEGIN logs from pod: rook-ceph-mgr-a-b557c664-92bpn mgr
debug 2022-03-13T01:18:40.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083637 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:40.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:40.835+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4089: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 2.9 MiB/s wr, 114 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:42.095+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083661 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:42.095+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:42.835+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4090: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.2 MiB/s rd, 3.5 MiB/s wr, 162 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:43.147+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:18:44.043+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083685 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:44.043+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:44.835+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4091: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 1.6 MiB/s wr, 121 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:45.695+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083709 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:45.695+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:46.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4092: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 1.6 MiB/s wr, 121 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:47.387+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083733 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:47.387+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:48.183+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:18:48.859+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4093: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.1 MiB/s rd, 4.7 MiB/s wr, 183 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:49.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083757 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:49.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:50.791+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083781 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:50.791+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:50.895+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4094: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.1 MiB/s rd, 3.6 MiB/s wr, 108 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:52.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5857712 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:52.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:52.899+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4095: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.6 MiB/s rd, 4.9 MiB/s wr, 156 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:53.207+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:18:54.387+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083823 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:54.387+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:54.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4096: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.0 MiB/s rd, 4.3 MiB/s wr, 108 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:56.143+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083847 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:56.143+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:56.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4097: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.6 MiB/s rd, 5.1 MiB/s wr, 138 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:57.879+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083868 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:57.879+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:18:58.207+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:18:58.931+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4098: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.0 MiB/s rd, 6.0 MiB/s wr, 173 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:18:59.475+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083892 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:18:59.475+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:00.935+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4099: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 3.0 MiB/s wr, 112 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:01.079+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083916 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:01.079+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:01.559+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:19:01
debug 2022-03-13T01:19:01.559+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:19:01.559+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:19:02.935+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4100: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.0 MiB/s rd, 4.3 MiB/s wr, 166 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:02.987+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083961 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:02.987+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:03.211+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:03.643+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:19:03.659+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:19:03.659+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:19:03.659+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:03.659+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:03.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:03.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:04.675+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6083982 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:04.675+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:04.939+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4101: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 3.1 MiB/s wr, 118 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:04.999+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:04.999+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:06.259+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084006 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:06.259+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:06.943+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4102: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.7 MiB/s rd, 3.7 MiB/s wr, 127 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:08.023+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084027 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:08.023+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:08.215+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:08.975+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4103: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 3.7 MiB/s wr, 108 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:09.663+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:19:09.759+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084045 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:09.763+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:09.911+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:19:11.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4104: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.8 MiB/s wr, 73 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:11.695+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084063 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:11.695+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:12.783+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:19:12.799+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:19:13.027+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4105: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.9 MiB/s rd, 4.3 MiB/s wr, 140 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:13.235+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:13.399+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084087 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:13.399+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:15.055+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4106: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 3.0 MiB/s wr, 87 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:15.295+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084111 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:15.295+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:16.907+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084135 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:16.911+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:17.067+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4107: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.3 MiB/s rd, 5.8 MiB/s wr, 179 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:18.235+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:18.771+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084171 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:18.771+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:19.063+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4108: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.1 MiB/s rd, 5.3 MiB/s wr, 170 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:20.395+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084195 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:20.395+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:21.079+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4109: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.9 MiB/s rd, 4.4 MiB/s wr, 160 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:22.275+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084219 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:22.275+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:23.079+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4110: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.7 MiB/s rd, 6.8 MiB/s wr, 225 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:23.239+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:23.947+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084243 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:23.947+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:25.087+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4111: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.7 MiB/s rd, 5.2 MiB/s wr, 158 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:25.623+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084267 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:25.623+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:27.091+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4112: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.2 MiB/s rd, 5.9 MiB/s wr, 202 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:27.303+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084291 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:27.303+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:28.247+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:29.087+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4113: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.3 MiB/s rd, 3.0 MiB/s wr, 109 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:29.107+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084315 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:29.107+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:30.787+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084339 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:30.787+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:31.095+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4114: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.3 MiB/s rd, 3.0 MiB/s wr, 109 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:32.435+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084363 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:32.435+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:33.095+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4115: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.9 MiB/s rd, 6.2 MiB/s wr, 181 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:33.251+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:33.659+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:33.659+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:33.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:33.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:34.015+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084390 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:34.015+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:35.055+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:19:35.055+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:19:35.103+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4116: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.1 MiB/s rd, 3.8 MiB/s wr, 116 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:35.727+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084411 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:35.727+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:37.091+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4117: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.1 MiB/s rd, 6.6 MiB/s wr, 168 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:37.423+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084435 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:37.423+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:38.251+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:39.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084459 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:39.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:39.103+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4118: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.6 MiB/s rd, 5.9 MiB/s wr, 124 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:40.767+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084486 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:40.767+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:41.175+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4119: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.6 MiB/s rd, 5.9 MiB/s wr, 123 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:42.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084510 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:42.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:43.175+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4120: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.6 MiB/s rd, 7.1 MiB/s wr, 186 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:43.283+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:44.203+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084531 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:44.203+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:45.175+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4121: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.0 MiB/s rd, 3.9 MiB/s wr, 115 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:45.879+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084555 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:45.879+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:47.179+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4122: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.5 MiB/s rd, 6.5 MiB/s wr, 196 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:47.607+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084585 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:47.607+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:48.295+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:49.175+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4123: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.5 MiB/s rd, 3.8 MiB/s wr, 144 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:49.283+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084609 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:49.283+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:50.911+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084633 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:50.911+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:51.187+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4124: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.5 MiB/s rd, 3.8 MiB/s wr, 144 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:52.707+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084657 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:52.707+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:53.191+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4125: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.2 MiB/s rd, 6.4 MiB/s wr, 185 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:53.331+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:54.479+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084681 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:54.479+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:55.191+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4126: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.2 MiB/s rd, 5.3 MiB/s wr, 121 op/s; 13806/41418 objects degraded (33.333%)
debug 2022-03-13T01:19:56.419+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084705 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:56.419+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:57.195+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4127: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.9 MiB/s rd, 7.5 MiB/s wr, 220 op/s; 13811/41433 objects degraded (33.333%)
debug 2022-03-13T01:19:58.075+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084729 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:58.075+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:19:58.331+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:19:59.195+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4128: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.3 MiB/s rd, 4.9 MiB/s wr, 139 op/s; 13811/41433 objects degraded (33.333%)
debug 2022-03-13T01:19:59.731+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084753 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:19:59.731+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:01.195+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4129: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.3 MiB/s rd, 4.9 MiB/s wr, 139 op/s; 13811/41433 objects degraded (33.333%)
debug 2022-03-13T01:20:01.307+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084777 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:01.307+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:01.663+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:20:01
debug 2022-03-13T01:20:01.663+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:20:01.663+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:20:03.051+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084807 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:03.051+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:03.199+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4130: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.0 MiB/s rd, 5.3 MiB/s wr, 180 op/s; 13811/41433 objects degraded (33.333%)
debug 2022-03-13T01:20:03.335+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:03.675+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:03.675+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:03.675+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:20:03.679+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:20:03.679+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:20:03.951+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:03.951+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:04.735+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084843 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:04.735+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:05.055+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:05.055+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:05.203+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4131: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.4 MiB/s rd, 2.7 MiB/s wr, 140 op/s; 13811/41433 objects degraded (33.333%)
debug 2022-03-13T01:20:06.467+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084867 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:06.467+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:07.207+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4132: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.8 MiB/s rd, 7.7 MiB/s wr, 262 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:08.235+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084891 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:08.235+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:08.335+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:09.207+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4133: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.1 MiB/s rd, 5.4 MiB/s wr, 162 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:09.851+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084915 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:09.851+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:09.919+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:20:09.931+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:20:11.211+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4134: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.1 MiB/s rd, 5.4 MiB/s wr, 162 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:11.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084939 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:11.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:12.803+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:20:12.811+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:20:13.211+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4135: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.6 MiB/s rd, 6.0 MiB/s wr, 206 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:13.275+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084963 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:13.275+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:13.339+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:14.887+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6084987 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:14.887+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:15.219+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4136: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.9 MiB/s rd, 5.5 MiB/s wr, 165 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:16.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085011 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:16.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:17.219+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4137: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.0 MiB/s rd, 9.0 MiB/s wr, 229 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:18.267+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085035 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:18.267+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:18.343+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:19.235+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4138: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.6 MiB/s rd, 4.0 MiB/s wr, 107 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:19.955+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085068 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:19.959+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:21.235+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4139: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.6 MiB/s rd, 4.0 MiB/s wr, 107 op/s; 13813/41439 objects degraded (33.333%)
debug 2022-03-13T01:20:21.651+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085092 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:21.651+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:23.239+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4140: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.6 MiB/s rd, 5.9 MiB/s wr, 189 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:23.347+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:23.415+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085116 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:23.415+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:25.235+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4141: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.0 MiB/s rd, 5.4 MiB/s wr, 145 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:25.243+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085140 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:25.243+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:26.991+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085164 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:26.991+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:27.243+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4142: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.4 MiB/s rd, 8.7 MiB/s wr, 220 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:28.355+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:28.647+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085188 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:28.647+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:29.243+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4143: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.3 MiB/s rd, 5.2 MiB/s wr, 156 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:30.259+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085212 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:30.259+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:31.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4144: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.3 MiB/s rd, 5.2 MiB/s wr, 156 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:31.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085236 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:31.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:33.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4145: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.0 MiB/s rd, 6.0 MiB/s wr, 207 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:33.355+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:33.675+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:33.675+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:33.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858306 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:33.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:33.951+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:33.951+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:35.059+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:20:35.059+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:20:35.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4146: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.1 MiB/s rd, 4.0 MiB/s wr, 125 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:35.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085290 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:35.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:37.219+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085314 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:37.219+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:37.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4147: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.2 MiB/s rd, 4.9 MiB/s wr, 165 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:38.359+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:39.003+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085335 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:39.015+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:39.251+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4148: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 1.6 MiB/s wr, 91 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:40.819+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085359 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:40.819+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:41.251+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4149: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 1.6 MiB/s wr, 91 op/s; 13814/41442 objects degraded (33.333%)
debug 2022-03-13T01:20:42.527+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085383 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:42.527+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:43.255+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4150: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.5 MiB/s rd, 5.7 MiB/s wr, 204 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:43.363+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:44.167+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085407 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:44.167+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:45.255+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4151: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.7 MiB/s rd, 5.0 MiB/s wr, 154 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:45.899+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085431 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:45.903+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:47.259+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4152: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.5 MiB/s rd, 6.6 MiB/s wr, 209 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:47.643+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085455 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:47.643+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:48.367+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:49.259+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4153: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.4 MiB/s rd, 5.7 MiB/s wr, 168 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:49.335+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085479 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:49.335+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:50.987+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085500 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:50.987+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:51.259+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4154: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.4 MiB/s rd, 5.7 MiB/s wr, 168 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:52.687+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085524 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:52.687+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:53.263+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4155: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.2 MiB/s rd, 6.5 MiB/s wr, 228 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:53.371+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:54.431+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085545 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:54.431+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:55.263+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4156: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.6 MiB/s rd, 2.4 MiB/s wr, 114 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:56.179+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085569 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:56.179+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:57.267+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4157: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.4 MiB/s rd, 5.4 MiB/s wr, 210 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:57.911+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085590 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:57.911+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:20:58.371+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:20:59.267+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4158: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.6 MiB/s rd, 3.8 MiB/s wr, 154 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:20:59.575+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085614 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:20:59.575+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:01.159+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858462 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:01.159+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:01.271+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4159: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.6 MiB/s rd, 3.8 MiB/s wr, 154 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:01.695+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:21:01
debug 2022-03-13T01:21:01.695+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:21:01.695+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:21:02.887+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085659 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:02.891+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:03.283+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4160: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.3 MiB/s rd, 5.9 MiB/s wr, 208 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:03.639+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:03.687+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:03.687+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:03.687+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:21:03.691+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:21:03.691+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:21:03.963+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:03.963+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:04.939+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085698 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:04.939+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:05.075+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:05.075+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:05.291+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4161: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.5 MiB/s rd, 5.1 MiB/s wr, 148 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:06.731+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085722 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:06.731+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:07.279+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4162: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 10 MiB/s rd, 9.5 MiB/s wr, 264 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:08.495+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085746 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:08.495+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:08.663+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:09.295+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4163: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.6 MiB/s rd, 6.4 MiB/s wr, 169 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:09.955+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:21:09.971+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:21:10.135+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085770 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:10.135+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:11.299+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4164: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.6 MiB/s rd, 6.4 MiB/s wr, 169 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:12.091+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085794 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:12.091+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:12.819+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:21:12.843+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:21:13.303+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4165: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.0 MiB/s rd, 7.9 MiB/s wr, 185 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:13.667+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:13.799+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085818 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:13.799+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:15.343+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4166: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.2 MiB/s rd, 5.8 MiB/s wr, 131 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:15.623+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085842 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:15.623+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:17.351+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4167: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.9 MiB/s rd, 8.7 MiB/s wr, 227 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:17.411+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085866 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:17.411+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:18.719+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:19.103+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085896 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:19.103+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:19.351+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4168: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.1 MiB/s rd, 4.3 MiB/s wr, 111 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:21.171+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085926 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:21.171+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:21.355+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4169: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.1 MiB/s rd, 4.3 MiB/s wr, 111 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:22.831+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6085956 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:22.831+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:23.355+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4170: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.1 MiB/s rd, 6.3 MiB/s wr, 160 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:23.719+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:24.827+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858588 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:24.827+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:25.359+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4171: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.7 MiB/s rd, 4.9 MiB/s wr, 144 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:26.547+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086004 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:26.547+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:27.359+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4172: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.7 MiB/s rd, 6.3 MiB/s wr, 225 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:28.407+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858621 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:28.407+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:28.723+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:29.359+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4173: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.0 MiB/s rd, 3.4 MiB/s wr, 129 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:30.063+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086052 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:30.063+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:31.375+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4174: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.0 MiB/s rd, 3.4 MiB/s wr, 129 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:31.699+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086073 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:31.699+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:33.299+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086097 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:33.299+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:33.383+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4175: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.0 MiB/s rd, 6.3 MiB/s wr, 233 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:33.687+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:33.687+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:33.723+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:33.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:33.955+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:34.967+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086121 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:34.967+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:35.083+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:21:35.083+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:21:35.383+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4176: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.0 MiB/s rd, 4.3 MiB/s wr, 184 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:36.587+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086151 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:36.587+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:37.391+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4177: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.5 MiB/s rd, 6.7 MiB/s wr, 246 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:38.347+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086175 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:38.347+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:38.727+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:39.519+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4178: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.5 MiB/s rd, 5.3 MiB/s wr, 165 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:39.951+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086199 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:39.951+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:41.555+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4179: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.4 MiB/s rd, 5.2 MiB/s wr, 163 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:43.151+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086223 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:43.151+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:43.559+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4180: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.4 MiB/s rd, 7.3 MiB/s wr, 265 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:43.731+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:44.807+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086247 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:44.807+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:45.559+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4181: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.4 MiB/s rd, 4.5 MiB/s wr, 161 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:46.447+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086271 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:46.447+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:47.559+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4182: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 9.1 MiB/s rd, 8.2 MiB/s wr, 260 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:48.107+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086295 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:48.107+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:48.739+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:49.575+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4183: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.7 MiB/s rd, 5.8 MiB/s wr, 200 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:49.771+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086319 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:49.771+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:51.371+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086343 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:51.371+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:51.579+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4184: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.7 MiB/s rd, 5.8 MiB/s wr, 200 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:53.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086367 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:53.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:53.579+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4185: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 9.4 MiB/s rd, 6.5 MiB/s wr, 248 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:53.759+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:54.879+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086391 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:54.879+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:55.583+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4186: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.4 MiB/s rd, 4.5 MiB/s wr, 146 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:56.527+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086415 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:56.527+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:57.615+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4187: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.9 MiB/s rd, 7.0 MiB/s wr, 215 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:58.427+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858813 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:58.427+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:21:58.763+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:21:59.615+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4188: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.1 MiB/s rd, 3.2 MiB/s wr, 114 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:21:59.995+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086457 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:21:59.995+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:01.367+0000 7fc22987b700 0 [devicehealth INFO root] Check health
debug 2022-03-13T01:22:01.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086481 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:01.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:01.623+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4189: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.1 MiB/s rd, 3.2 MiB/s wr, 114 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:01.699+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:22:01
debug 2022-03-13T01:22:01.699+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:22:01.699+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:22:03.259+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086505 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:03.259+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:03.623+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4190: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.6 MiB/s rd, 4.3 MiB/s wr, 132 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:03.695+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:22:03.699+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:22:03.699+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:22:03.711+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:03.711+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:03.771+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:03.987+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:03.991+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:05.095+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:05.095+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:05.395+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086541 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:05.395+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:05.663+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4191: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 3.6 MiB/s wr, 86 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:07.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086577 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:07.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:07.671+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4192: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.8 MiB/s rd, 5.6 MiB/s wr, 185 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:08.771+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:09.215+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086601 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:09.215+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:09.667+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4193: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.2 MiB/s rd, 3.1 MiB/s wr, 116 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:09.999+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:22:10.019+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:22:10.979+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086619 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:10.979+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:11.699+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4194: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.2 MiB/s rd, 3.1 MiB/s wr, 116 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:12.859+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:22:12.887+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086643 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:12.887+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:12.927+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:22:13.703+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4195: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.1 MiB/s rd, 5.4 MiB/s wr, 206 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:13.775+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:14.667+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086667 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:14.667+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:15.703+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4196: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.7 MiB/s rd, 4.4 MiB/s wr, 188 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:16.319+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086691 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:16.319+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:17.707+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4197: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.6 MiB/s rd, 5.6 MiB/s wr, 242 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:18.079+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086715 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:18.079+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:18.763+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:19.707+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4198: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.8 MiB/s rd, 3.6 MiB/s wr, 143 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:20.055+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086739 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:20.055+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:21.711+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4199: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 5.8 MiB/s rd, 3.6 MiB/s wr, 144 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:21.735+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5858951 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:21.735+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:23.483+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086799 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:23.483+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:23.719+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4200: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 7.9 MiB/s rd, 6.9 MiB/s wr, 210 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:23.955+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:25.187+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086823 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:25.187+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:25.723+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4201: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.0 MiB/s rd, 4.5 MiB/s wr, 119 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:26.779+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086847 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:26.779+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:27.727+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4202: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.7 MiB/s rd, 8.0 MiB/s wr, 224 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:28.699+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086871 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:28.699+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:28.959+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:29.727+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4203: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.8 MiB/s rd, 6.8 MiB/s wr, 170 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:30.315+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086895 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:30.315+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:31.727+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4204: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.8 MiB/s rd, 6.8 MiB/s wr, 170 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:31.991+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086919 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:31.991+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:33.663+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086943 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:33.663+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:33.711+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:33.711+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:33.731+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4205: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.0 MiB/s rd, 8.3 MiB/s wr, 208 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:33.959+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:33.991+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:33.991+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:35.095+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:22:35.095+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:22:35.335+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086967 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:35.335+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:35.735+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4206: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 6.0 MiB/s rd, 5.0 MiB/s wr, 142 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:37.007+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6086997 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:37.007+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:37.739+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4207: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 8.2 MiB/s rd, 6.8 MiB/s wr, 208 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:38.703+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087021 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:38.703+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:38.967+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:39.743+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4208: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.5 MiB/s rd, 3.2 MiB/s wr, 103 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:40.367+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087045 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:40.367+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:41.755+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4209: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.5 MiB/s rd, 3.2 MiB/s wr, 103 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:42.151+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087069 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:42.151+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:43.759+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4210: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 4.2 MiB/s rd, 3.5 MiB/s wr, 128 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:43.847+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087093 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:43.847+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:43.971+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:45.635+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087117 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:45.635+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:45.759+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4211: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 3.0 MiB/s rd, 2.0 MiB/s wr, 90 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:47.615+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087138 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:47.615+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:47.763+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4212: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 7.2 MiB/s rd, 7.0 MiB/s wr, 185 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:48.967+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:49.255+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087162 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:49.255+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:49.763+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4213: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.9 MiB/s rd, 5.3 MiB/s wr, 119 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:50.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087192 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:50.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:51.767+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4214: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.9 MiB/s rd, 5.3 MiB/s wr, 119 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:52.543+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087216 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:52.543+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:53.791+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4215: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.8 MiB/s rd, 6.5 MiB/s wr, 169 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:53.975+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:54.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087240 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:54.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:55.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4216: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.1 MiB/s rd, 6.2 MiB/s wr, 145 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:55.939+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087264 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:55.939+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:57.619+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087285 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:57.619+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:57.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4217: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 6.1 MiB/s rd, 7.3 MiB/s wr, 187 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:22:58.979+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:22:59.327+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087309 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:22:59.327+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:22:59.803+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4218: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.3 MiB/s wr, 92 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:01.011+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087333 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:01.011+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:01.703+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:23:01
debug 2022-03-13T01:23:01.703+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:23:01.703+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:23:01.803+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4219: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.3 MiB/s wr, 92 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:02.879+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087357 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:02.879+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:03.719+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:23:03.735+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:23:03.739+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:23:03.739+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:03.739+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:03.807+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4220: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.7 MiB/s rd, 4.5 MiB/s wr, 145 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:03.983+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:04.491+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087381 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:04.491+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:05.115+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:05.115+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:05.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4221: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.8 MiB/s rd, 3.3 MiB/s wr, 95 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:06.255+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087411 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:06.255+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:07.811+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4222: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.7 MiB/s rd, 4.1 MiB/s wr, 135 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:08.095+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087444 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:08.095+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:08.991+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:09.735+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087468 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:09.735+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:09.819+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4223: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.7 MiB/s rd, 3.0 MiB/s wr, 93 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:10.019+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:23:10.031+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:23:11.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087489 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:11.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:11.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4224: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.7 MiB/s rd, 3.0 MiB/s wr, 93 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:12.951+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:23:12.971+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:23:13.159+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087513 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:13.159+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:13.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4225: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.2 MiB/s rd, 4.9 MiB/s wr, 136 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:13.991+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:14.775+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087537 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:14.775+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:15.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4226: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.4 MiB/s rd, 2.7 MiB/s wr, 83 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:16.419+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087561 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:16.419+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:17.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4227: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.6 MiB/s rd, 3.4 MiB/s wr, 116 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:18.151+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087585 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:18.151+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:18.991+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:19.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4228: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.6 MiB/s wr, 76 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:19.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087609 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:19.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:21.775+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087633 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:21.775+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:21.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4229: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.6 MiB/s wr, 76 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:23.451+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087663 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:23.451+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:23.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4230: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.5 MiB/s rd, 4.2 MiB/s wr, 159 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:24.007+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:25.043+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087693 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:25.043+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:25.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4231: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.1 MiB/s rd, 2.2 MiB/s wr, 116 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:26.655+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087717 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:26.655+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:27.831+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4232: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.4 MiB/s rd, 3.1 MiB/s wr, 155 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:28.303+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087741 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:28.303+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:29.019+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:29.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4233: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.2 MiB/s rd, 2.5 MiB/s wr, 122 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:30.071+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087765 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:30.071+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:31.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4234: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.2 MiB/s rd, 2.5 MiB/s wr, 122 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:31.907+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087789 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:31.907+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:33.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087813 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:33.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:33.739+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:33.739+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:33.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4235: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.4 MiB/s rd, 4.2 MiB/s wr, 175 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:33.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:33.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:34.023+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:35.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5859371 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:35.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:35.115+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:23:35.115+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:23:35.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4236: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.6 MiB/s wr, 92 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:36.747+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087864 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:36.747+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:37.835+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4237: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.7 MiB/s rd, 3.4 MiB/s wr, 125 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:38.807+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087894 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:38.807+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:39.023+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:39.843+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4238: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.5 MiB/s wr, 85 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:40.431+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087918 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:40.431+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:41.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4239: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.5 MiB/s wr, 86 op/s; 13816/41448 objects degraded (33.333%)
debug 2022-03-13T01:23:42.099+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087942 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:42.099+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:43.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087966 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:43.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:43.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4240: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.0 MiB/s rd, 4.6 MiB/s wr, 158 op/s; 13818/41454 objects degraded (33.333%)
debug 2022-03-13T01:23:44.027+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:45.499+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6087990 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:45.499+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:45.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4241: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.8 MiB/s rd, 2.9 MiB/s wr, 105 op/s; 13818/41454 objects degraded (33.333%)
debug 2022-03-13T01:23:47.115+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088014 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:47.115+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:47.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4242: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.1 MiB/s rd, 5.0 MiB/s wr, 167 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:48.819+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088038 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:48.819+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:49.035+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:49.851+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4243: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.9 MiB/s rd, 4.1 MiB/s wr, 134 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:50.423+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088062 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:50.423+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:51.859+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4244: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.9 MiB/s rd, 4.1 MiB/s wr, 134 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:52.035+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088086 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:52.035+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:53.659+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5859461 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:53.659+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:53.879+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4245: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.3 MiB/s rd, 4.4 MiB/s wr, 159 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:54.043+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:55.279+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088131 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:55.279+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:55.891+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4246: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.7 MiB/s rd, 2.3 MiB/s wr, 86 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:56.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088155 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:56.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:57.895+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4247: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 2.8 MiB/s rd, 2.6 MiB/s wr, 110 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:23:58.675+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088179 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:23:58.675+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:23:59.043+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:23:59.899+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4248: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 519 KiB/s rd, 570 KiB/s wr, 48 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:00.323+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088203 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:00.323+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:01.707+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:24:01
debug 2022-03-13T01:24:01.707+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:24:01.707+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:24:01.899+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4249: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 519 KiB/s rd, 570 KiB/s wr, 48 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:01.975+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088227 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:01.975+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:03.739+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:24:03.751+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:03.751+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:03.767+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:24:03.767+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:24:03.879+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088251 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:03.879+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:03.931+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4250: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 548 KiB/s rd, 801 KiB/s wr, 67 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:04.043+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:05.155+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:05.155+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:05.515+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088275 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:05.515+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:05.951+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4251: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 136 KiB/s rd, 499 KiB/s wr, 42 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:07.299+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088302 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:07.299+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:07.951+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4252: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 151 KiB/s rd, 816 KiB/s wr, 67 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:09.079+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:09.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088335 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:09.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:09.955+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4253: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 44 KiB/s rd, 549 KiB/s wr, 43 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:10.039+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:24:10.047+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:24:10.823+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088359 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:10.823+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:11.959+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4254: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 44 KiB/s rd, 754 KiB/s wr, 54 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:12.611+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088380 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:12.611+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:12.983+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:24:13.003+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:24:13.963+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4255: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 46 KiB/s rd, 787 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:14.079+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:14.343+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088404 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:14.343+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:15.963+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4256: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 16 KiB/s rd, 556 KiB/s wr, 40 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:16.055+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088428 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:16.055+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:17.715+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088452 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:17.715+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:17.971+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4257: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 43 KiB/s rd, 1.0 MiB/s wr, 68 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:19.083+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:19.335+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088476 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:19.335+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:19.975+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4258: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 29 KiB/s rd, 722 KiB/s wr, 43 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:21.175+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088503 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:21.175+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:21.975+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4259: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 29 KiB/s rd, 958 KiB/s wr, 56 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:22.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088527 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:22.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:24.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4260: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 30 KiB/s rd, 817 KiB/s wr, 53 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:24.087+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:24.611+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088551 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:24.611+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:26.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4261: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 28 KiB/s rd, 782 KiB/s wr, 47 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:26.231+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088587 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:26.231+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:27.995+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088611 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:27.999+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:28.099+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4262: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 30 KiB/s rd, 1.4 MiB/s wr, 78 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:29.091+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:29.707+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088635 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:29.707+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:30.099+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4263: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.3 KiB/s rd, 984 KiB/s wr, 51 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:31.339+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088659 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:31.339+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:32.099+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4264: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.6 KiB/s rd, 1.1 MiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:32.983+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088683 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:32.983+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:33.755+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:33.755+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:33.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:33.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:34.095+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4265: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 4.0 KiB/s rd, 968 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:34.095+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:34.635+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088707 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:34.635+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:35.155+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:24:35.155+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:24:36.107+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4266: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 3.3 KiB/s rd, 904 KiB/s wr, 47 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:36.287+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088728 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:36.287+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:37.967+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088752 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:37.967+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:38.111+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4267: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 26 KiB/s rd, 1.2 MiB/s wr, 68 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:39.111+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:39.579+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088782 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:39.579+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:40.111+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4268: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 24 KiB/s rd, 537 KiB/s wr, 37 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:41.207+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088806 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:41.207+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:42.147+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4269: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 30 KiB/s rd, 607 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:42.827+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088830 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:42.827+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:44.111+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:44.151+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4270: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 37 KiB/s rd, 699 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:44.475+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088854 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:44.475+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:46.155+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4271: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 37 KiB/s rd, 594 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:46.195+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088878 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:46.195+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:47.979+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088902 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:47.979+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:48.159+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4272: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 42 KiB/s rd, 902 KiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:49.115+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:49.723+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088926 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:49.723+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:50.163+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4273: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 19 KiB/s rd, 586 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:51.323+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088950 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:51.323+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:52.167+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4274: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 30 KiB/s rd, 854 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:53.083+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5859845 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:53.083+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:54.135+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:54.167+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4275: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 24 KiB/s rd, 785 KiB/s wr, 50 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:54.695+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6088995 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:54.695+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:56.179+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4276: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 17 KiB/s rd, 578 KiB/s wr, 40 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:56.371+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089019 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:56.371+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:58.071+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089043 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:58.071+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:24:58.195+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4277: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 26 KiB/s rd, 847 KiB/s wr, 62 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:24:59.143+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:24:59.723+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089067 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:24:59.723+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:00.199+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4278: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 20 KiB/s rd, 538 KiB/s wr, 41 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:01.339+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089091 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:01.339+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:01.707+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:25:01
debug 2022-03-13T01:25:01.707+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:25:01.707+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:25:02.199+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4279: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 949 KiB/s rd, 3.0 MiB/s wr, 85 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:03.007+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089112 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:03.007+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:03.755+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:03.755+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:03.767+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:25:03.775+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:25:03.775+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:25:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:03.995+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:04.147+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:04.199+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4280: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 938 KiB/s rd, 2.8 MiB/s wr, 65 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:04.735+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089136 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:04.735+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:05.163+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:05.163+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:06.191+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4281: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 938 KiB/s rd, 2.8 MiB/s wr, 65 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:06.359+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089157 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:06.359+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:08.195+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089193 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:08.195+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:08.211+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4282: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 3.1 MiB/s wr, 99 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:09.151+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:09.951+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089226 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:09.951+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:10.051+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:25:10.059+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:25:10.215+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4283: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.8 MiB/s wr, 78 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:11.971+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089250 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:11.971+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:12.219+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4284: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 3.1 MiB/s wr, 95 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:13.027+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:25:13.039+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:25:13.619+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089274 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:13.619+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:14.183+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:14.219+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4285: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 187 KiB/s rd, 573 KiB/s wr, 50 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:15.431+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089295 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:15.431+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:16.219+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4286: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 187 KiB/s rd, 573 KiB/s wr, 50 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:17.035+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089319 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:17.035+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:18.223+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4287: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 206 KiB/s rd, 860 KiB/s wr, 72 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:18.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089343 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:18.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:19.191+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:20.227+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4288: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 38 KiB/s rd, 547 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:20.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089367 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:20.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:22.231+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4289: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 69 KiB/s rd, 995 KiB/s wr, 62 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:22.299+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089391 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:22.299+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:23.963+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089415 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:23.963+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:24.183+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:24.239+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4290: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 50 KiB/s rd, 735 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:25.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089439 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:25.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:26.243+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4291: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 50 KiB/s rd, 735 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:27.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089475 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:27.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:28.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4292: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 113 KiB/s rd, 1.3 MiB/s wr, 77 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:28.875+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089499 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:28.875+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:29.203+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:30.251+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4293: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 94 KiB/s rd, 1016 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:30.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089523 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:30.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:32.247+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4294: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 98 KiB/s rd, 1.7 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:32.247+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089544 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:32.247+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:33.955+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089568 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:33.955+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:33.999+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:33.999+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:34.203+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:34.263+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4295: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 67 KiB/s rd, 1.3 MiB/s wr, 52 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:35.167+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:25:35.167+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:25:35.911+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089592 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:35.911+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:36.267+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4296: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 68 KiB/s rd, 1.3 MiB/s wr, 52 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:37.547+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089616 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:37.547+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:38.271+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4297: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 80 KiB/s rd, 1.8 MiB/s wr, 82 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:39.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089640 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:39.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:39.211+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:40.271+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4298: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 17 KiB/s rd, 1.2 MiB/s wr, 50 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:40.851+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089670 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:40.851+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:42.279+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4299: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 22 KiB/s rd, 1.5 MiB/s wr, 67 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:42.523+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089694 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:42.523+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:44.175+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089718 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:44.175+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:44.215+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:44.279+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4300: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 17 KiB/s rd, 792 KiB/s wr, 47 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:45.827+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089742 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:45.827+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:46.279+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4301: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 17 KiB/s rd, 792 KiB/s wr, 47 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:47.451+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089766 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:47.451+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:48.279+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4302: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 19 KiB/s rd, 1.1 MiB/s wr, 65 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:49.155+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089787 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:49.155+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:49.219+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:50.283+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4303: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 6.3 KiB/s rd, 550 KiB/s wr, 35 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:50.803+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089811 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:50.803+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:52.283+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4304: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 12 KiB/s rd, 780 KiB/s wr, 52 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:52.495+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089841 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:52.495+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:54.199+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089865 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:54.199+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:54.223+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:25:54.295+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4305: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 7.3 KiB/s rd, 531 KiB/s wr, 35 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:55.803+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089889 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:55.803+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:56.295+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4306: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 7.3 KiB/s rd, 531 KiB/s wr, 35 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:57.467+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089913 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:57.467+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:58.299+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4307: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 17 KiB/s rd, 820 KiB/s wr, 58 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:25:59.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089937 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:25:59.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:25:59.223+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:00.295+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4308: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 15 KiB/s rd, 519 KiB/s wr, 40 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:00.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089961 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:00.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:01.711+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:26:01
debug 2022-03-13T01:26:01.711+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:26:01.711+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:26:02.303+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4309: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 15 KiB/s rd, 776 KiB/s wr, 54 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:02.443+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6089985 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:02.443+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:03.759+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:03.759+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:03.775+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:26:03.779+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:26:03.779+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:26:04.011+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:04.011+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:04.199+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090009 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:04.199+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:04.243+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:04.303+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4310: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 10 KiB/s rd, 546 KiB/s wr, 37 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:05.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:05.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:06.311+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4311: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 10 KiB/s rd, 546 KiB/s wr, 37 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:06.343+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090033 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:06.343+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:08.055+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090057 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:08.055+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:08.311+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4312: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 64 KiB/s rd, 1.3 MiB/s wr, 74 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:09.243+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:09.823+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090093 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:09.823+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:10.067+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:26:10.071+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:26:10.315+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4313: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 54 KiB/s rd, 1.1 MiB/s wr, 51 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:11.487+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090123 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:11.487+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:12.351+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4314: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 95 KiB/s rd, 1.3 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:13.047+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:26:13.059+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:26:13.127+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090147 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:13.127+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:14.251+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:14.355+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4315: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 95 KiB/s rd, 1.1 MiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:14.855+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090171 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:14.855+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:16.363+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4316: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 95 KiB/s rd, 1.1 MiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:16.547+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090195 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:16.547+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:18.223+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090219 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:18.223+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:18.363+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4317: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 97 KiB/s rd, 1.4 MiB/s wr, 81 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:19.259+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:19.863+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090243 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:19.863+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:20.363+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4318: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 43 KiB/s rd, 571 KiB/s wr, 44 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:21.599+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090267 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:21.599+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:22.363+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4319: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 76 KiB/s rd, 1.3 MiB/s wr, 79 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:23.175+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090291 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:23.175+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:24.255+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:24.443+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4320: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 35 KiB/s rd, 1007 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:24.795+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090315 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:24.795+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:26.491+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090339 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:26.491+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:26.491+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4321: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 35 KiB/s rd, 1003 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:28.119+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090375 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:28.119+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:28.491+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4322: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 39 KiB/s rd, 1.6 MiB/s wr, 80 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:29.267+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:29.795+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090399 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:29.795+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:30.495+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4323: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 37 KiB/s rd, 1.3 MiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:31.387+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090423 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:31.387+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:32.495+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4324: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 38 KiB/s rd, 1.6 MiB/s wr, 80 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:33.023+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090447 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:33.023+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:34.019+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:34.019+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:34.275+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:34.479+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4325: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.6 KiB/s rd, 944 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:34.651+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090471 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:34.651+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:26:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:26:36.263+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090495 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:36.263+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:36.523+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4326: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 5.6 KiB/s rd, 948 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:37.955+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5860418 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:37.955+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:38.523+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4327: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 71 KiB/s rd, 1.2 MiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:39.279+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:39.587+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090546 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:39.587+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:40.563+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4328: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 66 KiB/s rd, 616 KiB/s wr, 40 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:41.283+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090576 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:41.283+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:42.563+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4329: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 121 KiB/s rd, 1.4 MiB/s wr, 87 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:42.907+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090600 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:42.907+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:44.303+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:44.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090624 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:44.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:44.567+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4330: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 120 KiB/s rd, 1.1 MiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:46.355+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090648 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:46.355+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:46.571+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4331: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 120 KiB/s rd, 1.1 MiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:48.159+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090672 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:48.159+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:48.575+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4332: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 243 KiB/s rd, 1.6 MiB/s wr, 97 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:49.311+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:49.807+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090696 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:49.807+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:50.579+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4333: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 178 KiB/s rd, 1.3 MiB/s wr, 77 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:51.407+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090720 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:51.407+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:52.583+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4334: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 285 KiB/s rd, 1.6 MiB/s wr, 106 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:53.067+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090744 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:53.067+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:54.319+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:54.579+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4335: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 230 KiB/s rd, 807 KiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:54.819+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090768 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:54.819+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:56.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090792 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:56.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:56.583+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4336: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 230 KiB/s rd, 808 KiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:58.223+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090816 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:58.223+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:26:58.591+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4337: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 311 KiB/s rd, 1.0 MiB/s wr, 84 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:26:59.319+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:26:59.927+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090840 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:26:59.927+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:00.595+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4338: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 188 KiB/s rd, 539 KiB/s wr, 53 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:01.603+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090864 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:01.603+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:01.707+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:27:01
debug 2022-03-13T01:27:01.707+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:27:01.707+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:27:02.595+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4339: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 200 KiB/s rd, 759 KiB/s wr, 68 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:03.183+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090888 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:03.183+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:03.759+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:03.759+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:03.779+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:27:03.783+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:27:03.783+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:27:04.023+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:04.023+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:04.359+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:04.595+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4340: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 94 KiB/s rd, 482 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:04.783+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090912 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:04.783+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:05.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:05.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:06.435+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090936 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:06.439+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:06.595+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4341: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 94 KiB/s rd, 482 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:08.343+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090960 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:08.343+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:08.603+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4342: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 174 KiB/s rd, 1.4 MiB/s wr, 76 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:09.367+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:10.011+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6090990 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:10.011+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:10.079+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:27:10.095+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:27:10.603+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4343: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 93 KiB/s rd, 1.1 MiB/s wr, 51 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:11.827+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091026 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:11.827+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:12.615+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4344: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 199 KiB/s rd, 1.8 MiB/s wr, 90 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:13.063+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:27:13.075+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:27:13.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091050 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:13.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:14.383+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:14.631+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4345: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 186 KiB/s rd, 1.6 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:15.071+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091074 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:15.071+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:16.635+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4346: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 186 KiB/s rd, 1.6 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:16.695+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091098 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:16.695+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:18.427+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091122 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:18.427+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:18.635+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4347: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 286 KiB/s rd, 1.9 MiB/s wr, 101 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:19.387+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:20.055+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091146 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:20.055+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:20.639+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4348: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 206 KiB/s rd, 1012 KiB/s wr, 64 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:21.635+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091170 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:21.635+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:22.651+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4349: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 288 KiB/s rd, 1.3 MiB/s wr, 92 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:23.427+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091191 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:23.427+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:24.391+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:24.659+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4350: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 182 KiB/s rd, 555 KiB/s wr, 54 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:25.051+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091221 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:25.051+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:26.619+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091245 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:26.619+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:26.671+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4351: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 182 KiB/s rd, 555 KiB/s wr, 54 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:28.391+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091275 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:28.391+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:28.675+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4352: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 317 KiB/s rd, 1.0 MiB/s wr, 87 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:29.391+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:30.027+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091305 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:30.027+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:30.679+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4353: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 217 KiB/s rd, 766 KiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:31.599+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091329 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:31.599+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:32.683+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4354: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 407 KiB/s rd, 1.0 MiB/s wr, 94 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:33.231+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091350 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:33.231+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:33.763+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:33.763+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:34.023+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:34.023+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:34.395+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:34.683+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4355: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 326 KiB/s rd, 794 KiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:34.855+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091374 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:34.855+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:27:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:27:36.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091398 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:36.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:36.687+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4356: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 326 KiB/s rd, 794 KiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:38.455+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091416 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:38.455+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:38.691+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4357: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 451 KiB/s rd, 1.6 MiB/s wr, 97 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:39.563+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:40.043+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091440 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:40.043+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:40.695+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4358: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 316 KiB/s rd, 1.1 MiB/s wr, 64 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:41.683+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091464 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:41.683+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:42.699+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4359: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 426 KiB/s rd, 1.3 MiB/s wr, 91 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:43.271+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091491 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:43.275+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:44.571+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:44.707+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4360: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 235 KiB/s rd, 1.0 MiB/s wr, 57 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:44.855+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091515 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:44.855+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:46.679+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091539 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:46.679+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:46.707+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4361: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 235 KiB/s rd, 1.0 MiB/s wr, 57 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:48.451+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091563 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:48.451+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:48.711+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4362: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 362 KiB/s rd, 1.3 MiB/s wr, 84 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:49.575+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:50.103+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091587 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:50.103+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:50.711+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4363: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 237 KiB/s rd, 564 KiB/s wr, 53 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:51.755+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091611 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:51.755+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:52.715+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4364: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 373 KiB/s rd, 1.2 MiB/s wr, 86 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:53.411+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091635 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:53.411+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:54.579+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:27:54.723+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4365: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 263 KiB/s rd, 995 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:55.083+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091659 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:55.083+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:56.723+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4366: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 263 KiB/s rd, 996 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:56.807+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091683 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:56.807+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:58.527+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091704 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:27:58.527+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:27:58.723+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4367: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 508 KiB/s rd, 1.5 MiB/s wr, 109 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:27:59.579+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:00.111+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091728 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:00.111+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:00.723+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4368: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 381 KiB/s rd, 1.2 MiB/s wr, 82 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:01.763+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091752 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:01.763+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:01.763+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:28:01
debug 2022-03-13T01:28:01.763+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:28:01.763+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:28:02.731+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4369: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 503 KiB/s rd, 1.5 MiB/s wr, 107 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:03.379+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091776 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:03.379+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:03.763+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:03.763+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:03.783+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:28:03.787+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:28:03.787+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:28:04.027+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:04.027+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:04.583+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:04.731+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4370: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 367 KiB/s rd, 829 KiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:04.991+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091800 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:04.991+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:05.187+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:05.187+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:06.743+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4371: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 367 KiB/s rd, 829 KiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:06.787+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091824 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:06.787+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:08.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091848 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:08.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:08.747+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4372: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 607 KiB/s rd, 1.5 MiB/s wr, 120 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:09.615+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:10.107+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:28:10.135+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:28:10.607+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091878 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:10.607+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:10.767+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4373: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 362 KiB/s rd, 938 KiB/s wr, 69 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:12.759+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4374: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 611 KiB/s rd, 1.7 MiB/s wr, 109 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:12.835+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091914 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:12.835+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:13.139+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:28:13.147+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:28:14.527+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091938 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:14.527+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:14.619+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:14.771+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4375: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 490 KiB/s rd, 1.4 MiB/s wr, 84 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:16.147+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6091962 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:16.147+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:16.775+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4376: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 490 KiB/s rd, 1.4 MiB/s wr, 84 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:17.867+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5860988 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:17.867+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:18.779+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4377: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 589 KiB/s rd, 1.7 MiB/s wr, 107 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:19.479+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092010 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:19.479+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:19.635+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:20.787+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4378: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 349 KiB/s rd, 1.0 MiB/s wr, 62 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:21.199+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092031 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:21.199+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:22.791+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4379: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 465 KiB/s rd, 1.3 MiB/s wr, 85 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:23.011+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092055 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:23.011+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:24.639+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:24.691+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092079 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:24.691+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:24.791+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4380: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 215 KiB/s rd, 507 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:26.387+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092103 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:26.387+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:26.791+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4381: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 215 KiB/s rd, 507 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:28.075+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092127 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:28.075+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:28.795+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4382: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 362 KiB/s rd, 821 KiB/s wr, 73 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:29.639+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:29.755+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092154 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:29.755+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:30.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4383: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 262 KiB/s rd, 577 KiB/s wr, 50 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:31.323+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092184 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:31.323+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:32.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4384: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 432 KiB/s rd, 1.5 MiB/s wr, 100 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:32.991+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092208 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:32.991+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:33.759+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:33.999+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:33.999+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:34.607+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861081 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:34.607+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:34.679+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:34.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4385: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 316 KiB/s rd, 1.3 MiB/s wr, 77 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:28:35.183+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:28:36.199+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092253 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:36.199+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:36.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4386: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 316 KiB/s rd, 1.3 MiB/s wr, 77 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:37.967+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092277 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:37.967+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:38.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4387: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 500 KiB/s rd, 1.5 MiB/s wr, 103 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:39.623+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092301 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:39.623+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:39.691+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:40.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4388: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 354 KiB/s rd, 1.2 MiB/s wr, 76 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:41.159+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092325 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:41.159+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:42.791+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861108 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:42.791+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:42.847+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4389: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 564 KiB/s rd, 1.9 MiB/s wr, 114 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:44.431+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092379 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:44.431+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:44.715+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:44.839+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4390: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 395 KiB/s rd, 981 KiB/s wr, 64 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:46.139+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092403 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:46.139+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:46.851+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4391: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 395 KiB/s rd, 980 KiB/s wr, 64 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:47.851+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092424 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:47.851+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:48.851+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4392: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 796 KiB/s rd, 1.5 MiB/s wr, 101 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:49.459+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092448 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:49.459+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:49.715+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:50.883+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4393: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 612 KiB/s rd, 1.2 MiB/s wr, 74 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:51.123+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092472 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:51.123+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:52.775+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092496 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:52.775+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:52.883+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4394: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 790 KiB/s rd, 1.4 MiB/s wr, 104 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:54.471+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092520 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:54.471+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:54.719+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:28:54.883+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4395: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 580 KiB/s rd, 757 KiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:56.147+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092544 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:56.147+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:56.887+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4396: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 579 KiB/s rd, 757 KiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:57.915+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092562 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:57.915+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:58.887+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4397: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 845 KiB/s rd, 1.5 MiB/s wr, 108 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:28:59.599+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092586 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:28:59.599+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:28:59.723+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:00.891+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4398: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 444 KiB/s rd, 981 KiB/s wr, 72 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:01.163+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092610 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:01.163+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:01.767+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:29:01
debug 2022-03-13T01:29:01.767+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:29:01.767+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:29:02.895+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4399: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 741 KiB/s rd, 1.3 MiB/s wr, 103 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:02.899+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092634 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:02.899+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:03.763+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:03.763+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:03.787+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:29:03.791+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:29:03.791+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:29:04.031+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:04.031+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:04.555+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092658 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:04.555+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:04.727+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:04.899+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4400: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 562 KiB/s rd, 1.0 MiB/s wr, 73 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:05.219+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:05.219+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:06.523+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092682 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:06.523+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:06.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4401: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 562 KiB/s rd, 1.0 MiB/s wr, 73 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:08.303+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092706 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:08.303+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:08.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4402: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 639 KiB/s rd, 1.3 MiB/s wr, 105 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:09.763+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:09.935+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092730 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:09.935+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:10.151+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:29:10.219+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:29:10.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4403: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 373 KiB/s rd, 631 KiB/s wr, 63 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:11.679+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092754 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:11.679+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:12.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4404: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 777 KiB/s rd, 1.3 MiB/s wr, 110 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:13.155+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:29:13.163+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:29:13.395+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092790 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:13.395+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:14.775+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:14.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4405: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 481 KiB/s rd, 999 KiB/s wr, 79 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:15.039+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092820 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:15.039+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:16.651+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092844 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:16.651+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:16.907+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4406: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 481 KiB/s rd, 999 KiB/s wr, 79 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:18.331+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092868 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:18.331+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:18.907+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4407: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 596 KiB/s rd, 1.8 MiB/s wr, 115 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:19.855+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:19.939+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092892 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:19.939+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:20.911+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4408: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 520 KiB/s rd, 1.5 MiB/s wr, 83 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:21.519+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861279 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:21.519+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:22.919+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4409: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 526 KiB/s rd, 1.8 MiB/s wr, 105 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:23.139+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092940 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:23.139+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:24.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092964 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:24.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:24.855+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:24.919+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4410: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 122 KiB/s rd, 1.1 MiB/s wr, 58 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:26.539+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6092988 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:26.539+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:26.919+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4411: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 122 KiB/s rd, 1.1 MiB/s wr, 58 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:28.264+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093009 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:28.264+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:28.924+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4412: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 127 KiB/s rd, 1.4 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:29.856+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:29.952+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093033 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:29.952+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:30.924+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4413: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 12 KiB/s rd, 569 KiB/s wr, 39 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:31.608+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861342 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:31.608+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:32.928+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4414: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 22 KiB/s rd, 978 KiB/s wr, 63 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:33.264+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093090 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:33.264+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:33.764+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:33.764+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:34.036+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:34.036+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:34.864+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:34.924+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093111 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:34.924+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:34.932+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4415: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 16 KiB/s rd, 642 KiB/s wr, 41 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:35.220+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:29:35.220+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:29:36.584+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093135 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:36.584+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:36.932+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4416: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 16 KiB/s rd, 642 KiB/s wr, 41 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:38.276+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093159 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:38.276+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:38.940+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4417: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 58 KiB/s rd, 917 KiB/s wr, 63 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:39.824+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093183 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:39.824+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:39.864+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:40.944+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4418: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 53 KiB/s rd, 685 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:41.492+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093213 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:41.492+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:42.944+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4419: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 59 KiB/s rd, 1.3 MiB/s wr, 66 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:43.084+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093237 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:43.084+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:44.664+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093267 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:44.664+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:44.864+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:44.960+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4420: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 49 KiB/s rd, 943 KiB/s wr, 43 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:46.396+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093291 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:46.396+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:46.960+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4421: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 49 KiB/s rd, 943 KiB/s wr, 43 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:48.140+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093315 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:48.140+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:48.964+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4422: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 53 KiB/s rd, 1.2 MiB/s wr, 60 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:49.812+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093339 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:49.812+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:49.872+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:50.964+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4423: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 11 KiB/s rd, 929 KiB/s wr, 38 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:51.452+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093363 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:51.452+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:52.968+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4424: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 23 KiB/s rd, 1.2 MiB/s wr, 57 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:53.084+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093387 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:53.084+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:54.808+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093411 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:54.808+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:54.884+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:29:54.972+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4425: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 17 KiB/s rd, 518 KiB/s wr, 36 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:56.440+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093435 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:56.440+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:56.972+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4426: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 17 KiB/s rd, 647 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:58.124+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093459 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:58.124+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:58.972+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4427: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 17 KiB/s rd, 806 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:29:59.772+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093483 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:29:59.772+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:29:59.888+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:00.972+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4428: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 13 KiB/s rd, 545 KiB/s wr, 38 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:01.580+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093507 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:01.580+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:01.776+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:30:01
debug 2022-03-13T01:30:01.776+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:30:01.776+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:30:02.976+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4429: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 13 KiB/s rd, 842 KiB/s wr, 57 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:03.436+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093531 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:03.436+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:03.768+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:03.768+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:03.792+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:30:03.792+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:30:03.792+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:30:04.036+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:04.036+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:04.928+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:04.976+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4430: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1023 B/s rd, 585 KiB/s wr, 37 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:05.056+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093555 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:05.056+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:05.228+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:05.228+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:06.672+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093579 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:06.672+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:06.980+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4431: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1023 B/s rd, 585 KiB/s wr, 37 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:08.544+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093603 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:08.544+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:08.980+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4432: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 102 KiB/s rd, 890 KiB/s wr, 63 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:09.960+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:10.176+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093627 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:10.176+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:10.236+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:30:10.244+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:30:10.992+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4433: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 102 KiB/s rd, 730 KiB/s wr, 54 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:12.304+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093651 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:12.304+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:12.992+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4434: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 131 KiB/s rd, 1.1 MiB/s wr, 83 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:13.244+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:30:13.260+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:30:14.264+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093684 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:14.264+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:14.964+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:14.996+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4435: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 130 KiB/s rd, 795 KiB/s wr, 64 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:15.964+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093714 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:15.964+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:17.004+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4436: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 131 KiB/s rd, 1.2 MiB/s wr, 79 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:17.732+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093738 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:17.732+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:19.004+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4437: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 134 KiB/s rd, 1.6 MiB/s wr, 97 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:19.328+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093762 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:19.328+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:19.968+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:21.008+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4438: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 33 KiB/s rd, 1.2 MiB/s wr, 61 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:21.080+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093786 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:21.080+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:22.812+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093810 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:22.812+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:23.020+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4439: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 86 KiB/s rd, 1.7 MiB/s wr, 102 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:24.432+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093834 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:24.432+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:24.980+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:25.020+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4440: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 57 KiB/s rd, 1.4 MiB/s wr, 73 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:26.092+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093864 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:26.092+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:27.032+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4441: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 69 KiB/s rd, 1.6 MiB/s wr, 88 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:27.828+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861651 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:27.828+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:29.032+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4442: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 98 KiB/s rd, 1.4 MiB/s wr, 87 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:29.544+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093912 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:29.544+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:29.984+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:31.040+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4443: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 95 KiB/s rd, 913 KiB/s wr, 69 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:31.172+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093936 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:31.172+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:32.780+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093972 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:32.780+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:33.044+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4444: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 116 KiB/s rd, 1.4 MiB/s wr, 103 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:33.788+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:33.788+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:34.040+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:34.040+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:34.384+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6093996 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:34.384+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:34.992+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:35.048+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4445: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 63 KiB/s rd, 923 KiB/s wr, 63 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:35.228+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:30:35.228+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:30:35.996+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094017 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:35.996+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:37.048+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4446: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 65 KiB/s rd, 1.0 MiB/s wr, 75 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:37.700+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094041 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:37.700+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:39.052+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4447: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 77 KiB/s rd, 1.0 MiB/s wr, 73 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:39.328+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094065 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:39.328+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:39.996+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:40.932+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094089 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:40.932+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:41.056+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4448: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 47 KiB/s rd, 835 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:42.624+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5861717 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:42.624+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:43.056+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4449: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 56 KiB/s rd, 1.1 MiB/s wr, 80 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:44.276+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094137 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:44.276+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:45.000+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:45.056+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4450: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 35 KiB/s rd, 567 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:45.876+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094167 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:45.876+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:47.056+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4451: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 35 KiB/s rd, 617 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:47.668+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094191 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:47.668+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:49.056+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4452: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 45 KiB/s rd, 723 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:49.320+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094215 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:49.320+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:50.004+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:50.936+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094239 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:50.936+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:51.060+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4453: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 22 KiB/s rd, 556 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:52.596+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094263 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:52.596+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:53.060+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4454: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 46 KiB/s rd, 837 KiB/s wr, 69 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:54.316+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094287 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:54.316+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:55.012+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:30:55.076+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4455: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 37 KiB/s rd, 581 KiB/s wr, 48 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:55.944+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094311 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:55.944+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:57.080+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4456: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 38 KiB/s rd, 734 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:57.584+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094335 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:57.584+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:30:59.080+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4457: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 43 KiB/s rd, 828 KiB/s wr, 61 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:30:59.180+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094359 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:30:59.180+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:00.012+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:00.876+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094383 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:00.876+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:01.084+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4458: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 31 KiB/s rd, 577 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:01.780+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:31:01
debug 2022-03-13T01:31:01.780+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:31:01.780+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:31:02.628+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094407 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:02.628+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:03.116+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4459: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 51 KiB/s rd, 818 KiB/s wr, 68 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:03.788+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:03.788+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:03.796+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:31:03.800+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:31:03.800+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:31:04.040+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:04.040+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:04.236+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094431 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:04.236+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:05.016+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:05.128+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4460: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 27 KiB/s rd, 538 KiB/s wr, 45 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:05.232+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:05.232+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:05.852+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094455 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:05.852+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:07.128+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4461: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 28 KiB/s rd, 786 KiB/s wr, 59 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:07.516+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094479 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:07.516+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:09.128+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4462: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 36 KiB/s rd, 686 KiB/s wr, 58 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:09.172+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094503 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:09.172+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:10.020+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:10.252+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:31:10.264+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:31:11.192+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4463: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 30 KiB/s rd, 542 KiB/s wr, 46 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:11.416+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094527 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:11.416+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:13.232+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4464: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 49 KiB/s rd, 804 KiB/s wr, 70 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:13.304+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:31:13.384+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:31:13.384+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094557 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:13.384+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:15.024+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:15.220+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4465: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 28 KiB/s rd, 562 KiB/s wr, 48 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:15.320+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094587 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:15.320+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:17.236+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4466: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 42 KiB/s rd, 846 KiB/s wr, 67 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:17.476+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094614 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:17.476+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:19.188+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094638 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:19.188+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:19.236+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4467: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 41 KiB/s rd, 600 KiB/s wr, 53 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:20.144+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:21.132+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094662 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:21.132+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:21.244+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4468: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 32 KiB/s rd, 547 KiB/s wr, 43 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:22.996+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094686 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:22.996+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:23.256+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4469: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 61 KiB/s rd, 890 KiB/s wr, 68 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:24.704+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094710 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:24.708+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:25.152+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:25.260+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4470: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 42 KiB/s rd, 628 KiB/s wr, 44 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:26.336+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094734 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:26.336+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:27.264+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4471: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 167 KiB/s rd, 1.6 MiB/s wr, 93 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:28.048+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094758 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:28.048+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:29.264+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4472: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 154 KiB/s rd, 1.3 MiB/s wr, 74 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:29.700+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094782 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:29.700+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:30.152+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:31.264+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4473: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 154 KiB/s rd, 1.3 MiB/s wr, 74 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:31.392+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094806 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:31.392+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:33.056+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094836 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:33.056+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:33.268+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4474: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 254 KiB/s rd, 1.7 MiB/s wr, 106 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:33.792+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:33.792+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:34.032+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:34.032+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:34.688+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094860 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:34.688+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:35.232+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:35.232+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:31:35.232+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:31:35.260+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4475: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 225 KiB/s rd, 1.3 MiB/s wr, 81 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:36.320+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094881 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:36.320+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:37.268+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4476: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 273 KiB/s rd, 1.6 MiB/s wr, 104 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:37.980+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094905 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:37.980+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:39.272+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4477: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 148 KiB/s rd, 665 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:39.608+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094929 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:39.608+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:40.232+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:41.216+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094953 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:41.216+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:41.272+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4478: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 148 KiB/s rd, 664 KiB/s wr, 55 op/s; 13823/41469 objects degraded (33.333%)
debug 2022-03-13T01:31:42.952+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6094977 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:42.952+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:43.276+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4479: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 213 KiB/s rd, 1.4 MiB/s wr, 104 op/s; 13825/41475 objects degraded (33.333%)
debug 2022-03-13T01:31:44.604+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095001 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:44.604+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:45.236+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:45.292+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4480: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 113 KiB/s rd, 1.0 MiB/s wr, 71 op/s; 13825/41475 objects degraded (33.333%)
debug 2022-03-13T01:31:46.172+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095025 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:46.172+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:47.292+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4481: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 232 KiB/s rd, 1.4 MiB/s wr, 108 op/s; 13828/41484 objects degraded (33.333%)
debug 2022-03-13T01:31:48.028+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095055 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:48.028+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:49.296+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4482: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 184 KiB/s rd, 1.1 MiB/s wr, 86 op/s; 13828/41484 objects degraded (33.333%)
debug 2022-03-13T01:31:49.720+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095079 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:49.720+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:50.244+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:51.296+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4483: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 184 KiB/s rd, 1.1 MiB/s wr, 86 op/s; 13828/41484 objects degraded (33.333%)
debug 2022-03-13T01:31:51.348+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095103 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:51.348+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:53.184+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095127 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:53.184+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:53.296+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4484: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 186 KiB/s rd, 1.4 MiB/s wr, 105 op/s; 13829/41487 objects degraded (33.333%)
debug 2022-03-13T01:31:54.792+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095151 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:54.792+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:55.248+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:31:55.296+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4485: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 121 KiB/s rd, 612 KiB/s wr, 55 op/s; 13829/41487 objects degraded (33.333%)
debug 2022-03-13T01:31:56.448+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095175 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:56.448+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:57.300+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4486: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 183 KiB/s rd, 1.2 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:31:58.180+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095205 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:58.180+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:31:59.297+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4487: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 64 KiB/s rd, 876 KiB/s wr, 52 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:31:59.797+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095229 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:31:59.797+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:00.249+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:01.301+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4488: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 64 KiB/s rd, 876 KiB/s wr, 52 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:01.369+0000 7fc22987b700 0 [devicehealth INFO root] Check health
debug 2022-03-13T01:32:01.401+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095253 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:01.401+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:01.785+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:32:01
debug 2022-03-13T01:32:01.785+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:32:01.785+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:32:03.045+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095277 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:03.045+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:03.301+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4489: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 267 KiB/s rd, 1.5 MiB/s wr, 86 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:03.789+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:03.789+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:03.801+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:32:03.805+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:32:03.805+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:32:04.033+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:04.033+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:04.637+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095301 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:04.637+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:05.261+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:05.261+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:05.261+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:05.293+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4490: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 265 KiB/s rd, 1.3 MiB/s wr, 67 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:06.253+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095325 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:06.253+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:07.305+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4491: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 370 KiB/s rd, 1.5 MiB/s wr, 101 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:08.045+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095349 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:08.045+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:09.301+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4492: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 308 KiB/s rd, 961 KiB/s wr, 67 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:09.717+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095373 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:09.717+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:10.269+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:10.277+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:32:10.289+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:32:11.309+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4493: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 308 KiB/s rd, 961 KiB/s wr, 67 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:11.325+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5862176 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:11.325+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:12.993+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095421 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:12.993+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:13.309+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4494: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 405 KiB/s rd, 1.6 MiB/s wr, 101 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:13.393+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:32:13.405+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:32:14.645+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095445 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:14.645+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:15.273+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:15.309+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4495: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 202 KiB/s rd, 958 KiB/s wr, 67 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:16.381+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095478 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:16.381+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:17.313+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4496: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 317 KiB/s rd, 1.2 MiB/s wr, 91 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:18.133+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095508 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:18.133+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:19.313+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4497: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 212 KiB/s rd, 899 KiB/s wr, 57 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:19.761+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095532 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:19.765+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:20.289+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:21.317+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4498: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 212 KiB/s rd, 899 KiB/s wr, 57 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:21.669+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095556 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:21.669+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:23.289+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095580 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:23.289+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:23.325+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4499: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 253 KiB/s rd, 1.3 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:24.889+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095604 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:24.889+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:25.289+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:25.329+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4500: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 156 KiB/s rd, 627 KiB/s wr, 55 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:26.537+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095628 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:26.537+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:27.337+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4501: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 394 KiB/s rd, 1.3 MiB/s wr, 102 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:28.225+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095652 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:28.225+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:29.337+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4502: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 279 KiB/s rd, 1.1 MiB/s wr, 78 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:29.837+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5862299 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:29.857+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:30.297+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:31.369+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4503: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 278 KiB/s rd, 1.1 MiB/s wr, 78 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:31.441+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095700 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:31.441+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:33.009+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095721 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:33.009+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:33.373+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4504: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 418 KiB/s rd, 1.4 MiB/s wr, 111 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:33.789+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:33.789+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:34.041+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:34.041+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:34.625+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095751 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:34.625+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:35.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:32:35.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:32:35.309+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:35.381+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4505: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 378 KiB/s rd, 1007 KiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:36.305+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095781 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:36.305+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:37.381+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4506: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 483 KiB/s rd, 1.7 MiB/s wr, 119 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:38.041+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095805 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:38.041+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:39.385+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4507: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 246 KiB/s rd, 985 KiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:39.777+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095829 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:39.777+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:40.313+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:41.385+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4508: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 246 KiB/s rd, 985 KiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:41.417+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095853 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:41.417+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:43.101+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095880 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:43.101+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:43.385+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4509: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 485 KiB/s rd, 1.4 MiB/s wr, 112 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:44.773+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095904 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:44.773+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:45.309+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:45.501+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4510: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 342 KiB/s rd, 1.1 MiB/s wr, 77 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:46.581+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095928 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:46.581+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:47.497+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4511: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 449 KiB/s rd, 1.4 MiB/s wr, 102 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:48.273+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095958 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:48.273+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:49.529+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4512: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 344 KiB/s rd, 727 KiB/s wr, 63 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:49.937+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6095982 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:49.937+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:50.317+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:51.533+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4513: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 343 KiB/s rd, 726 KiB/s wr, 63 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:51.565+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096006 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:51.565+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:53.241+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096030 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:53.241+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:53.533+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4514: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 706 KiB/s rd, 1.6 MiB/s wr, 127 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:54.837+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096054 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:54.845+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:55.317+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:32:55.545+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4515: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 470 KiB/s rd, 1.1 MiB/s wr, 88 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:56.489+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096078 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:56.489+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:57.545+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4516: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 668 KiB/s rd, 1.4 MiB/s wr, 118 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:58.157+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096096 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:58.157+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:32:59.553+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4517: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 560 KiB/s rd, 1.2 MiB/s wr, 93 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:32:59.793+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096120 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:32:59.793+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:00.325+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:01.409+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096144 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:01.409+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:01.553+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4518: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 560 KiB/s rd, 1.2 MiB/s wr, 94 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:01.781+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:33:01
debug 2022-03-13T01:33:01.781+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:33:01.781+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:33:03.161+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096168 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:03.165+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:03.557+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4519: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 795 KiB/s rd, 1.9 MiB/s wr, 139 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:03.793+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:03.793+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:03.805+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:33:03.809+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:33:03.809+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:33:04.045+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:04.045+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:04.753+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096192 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:04.753+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:05.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:05.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:05.341+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:05.545+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4520: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 428 KiB/s rd, 996 KiB/s wr, 75 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:06.425+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096216 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:06.425+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:07.605+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4521: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 616 KiB/s rd, 1.6 MiB/s wr, 110 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:08.161+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096240 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:08.161+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:09.605+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4522: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 422 KiB/s rd, 1.3 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:09.721+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096264 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:09.721+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:10.309+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:33:10.317+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:33:10.337+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:11.413+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096288 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:11.413+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:11.605+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4523: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 422 KiB/s rd, 1.3 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:13.017+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096312 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:13.017+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:13.417+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:33:13.425+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:33:13.609+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4524: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 654 KiB/s rd, 1.6 MiB/s wr, 110 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:14.613+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5862569 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:14.613+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:15.341+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:15.609+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4525: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 421 KiB/s rd, 877 KiB/s wr, 65 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:16.485+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096363 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:16.485+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:17.609+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4526: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 566 KiB/s rd, 1.1 MiB/s wr, 87 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:18.329+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096399 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:18.329+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:19.613+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4527: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 378 KiB/s rd, 489 KiB/s wr, 52 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:20.049+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096423 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:20.049+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:20.341+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:21.617+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4528: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 378 KiB/s rd, 489 KiB/s wr, 52 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:21.729+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096447 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:21.729+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:23.397+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096471 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:23.397+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:23.617+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4529: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 662 KiB/s rd, 1.1 MiB/s wr, 99 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:25.065+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096495 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:25.065+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:25.349+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:25.621+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4530: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 429 KiB/s rd, 848 KiB/s wr, 69 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:26.633+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096519 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:26.637+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:27.621+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4531: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 576 KiB/s rd, 1.2 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:28.297+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096549 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:28.297+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:29.637+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4532: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 430 KiB/s rd, 1015 KiB/s wr, 77 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:29.921+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096573 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:29.921+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:30.353+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:31.633+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4533: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 431 KiB/s rd, 1015 KiB/s wr, 77 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:31.645+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096597 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:31.645+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:33.281+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096621 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:33.281+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:33.645+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4534: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1015 KiB/s rd, 2.0 MiB/s wr, 144 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:33.825+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:33.825+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:34.041+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:34.045+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:34.877+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096645 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:34.877+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:35.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:33:35.265+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:33:35.353+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:35.649+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4535: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 732 KiB/s rd, 1.3 MiB/s wr, 97 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:36.429+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096681 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:36.429+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:37.657+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4536: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 857 KiB/s rd, 1.6 MiB/s wr, 119 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:38.189+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096705 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:38.189+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:39.661+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4537: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 710 KiB/s rd, 1.2 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:39.821+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096729 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:39.821+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:40.361+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:41.497+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096753 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:41.497+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:41.661+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4538: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 710 KiB/s rd, 1.2 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:43.093+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096777 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:43.093+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:43.685+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4539: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 808 KiB/s rd, 1.4 MiB/s wr, 116 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:44.873+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096801 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:44.873+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:45.373+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:45.685+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4540: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 224 KiB/s rd, 471 KiB/s wr, 50 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:46.505+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096825 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:46.505+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:47.689+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4541: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 684 KiB/s rd, 1.1 MiB/s wr, 95 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:48.285+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096849 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:48.285+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:49.689+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4542: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 559 KiB/s rd, 897 KiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:49.961+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096879 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:49.961+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:50.377+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:51.557+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096903 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:51.557+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:51.693+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4543: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 559 KiB/s rd, 897 KiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:53.317+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096927 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:53.317+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:53.693+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4544: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 884 KiB/s rd, 1.6 MiB/s wr, 114 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:54.929+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096951 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:54.929+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:55.385+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:33:55.697+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4545: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 786 KiB/s rd, 1.3 MiB/s wr, 87 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:56.505+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096975 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:56.509+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:57.697+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4546: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 979 KiB/s rd, 1.7 MiB/s wr, 125 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:58.237+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6096999 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:58.237+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:33:59.705+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4547: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 518 KiB/s rd, 1.1 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:33:59.973+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097023 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:33:59.973+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:00.393+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:01.569+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097047 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:01.569+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:01.709+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4548: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 518 KiB/s rd, 1.1 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:01.781+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:34:01
debug 2022-03-13T01:34:01.781+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:34:01.781+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:34:03.221+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097071 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:03.221+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:03.709+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4549: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.2 MiB/s wr, 127 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:03.809+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:34:03.813+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:34:03.813+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:34:03.829+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:03.829+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:04.045+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:04.045+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:04.865+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097095 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:04.865+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:05.293+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:05.293+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:05.393+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:05.709+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4550: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 974 KiB/s rd, 1.5 MiB/s wr, 86 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:06.445+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097119 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:06.449+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:07.725+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4551: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.0 MiB/s wr, 112 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:08.229+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097143 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:08.229+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:09.733+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4552: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1020 KiB/s rd, 1.6 MiB/s wr, 74 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:09.849+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097161 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:09.849+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:10.321+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:34:10.325+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:34:10.405+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:11.721+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4553: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1020 KiB/s rd, 1.6 MiB/s wr, 74 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:11.789+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5862893 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:11.789+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:13.449+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:34:13.449+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097209 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:13.449+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:13.469+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:34:13.733+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4554: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.2 MiB/s wr, 108 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:15.057+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097239 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:15.057+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:15.405+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:15.745+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4555: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 448 KiB/s rd, 1.1 MiB/s wr, 61 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:16.697+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097263 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:16.697+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:17.745+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4556: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 965 KiB/s rd, 1.5 MiB/s wr, 91 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:18.633+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5862950 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:18.633+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:19.765+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4557: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 726 KiB/s rd, 997 KiB/s wr, 64 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:20.417+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:20.457+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097323 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:20.457+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:21.777+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4558: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 725 KiB/s rd, 997 KiB/s wr, 64 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:22.285+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097347 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:22.285+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:23.777+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4559: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 936 KiB/s rd, 1.4 MiB/s wr, 92 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:23.901+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097368 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:23.901+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:25.417+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:25.541+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097392 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:25.541+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:25.777+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4560: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 728 KiB/s rd, 786 KiB/s wr, 57 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:27.221+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097416 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:27.221+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:27.781+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4561: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.5 MiB/s wr, 106 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:28.965+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097434 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:28.965+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:29.801+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4562: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 947 KiB/s rd, 1.2 MiB/s wr, 76 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:30.421+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:30.585+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097458 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:30.585+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:31.801+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4563: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 947 KiB/s rd, 1.2 MiB/s wr, 77 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:32.249+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097482 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:32.249+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:33.805+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4564: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 2.1 MiB/s wr, 114 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:33.829+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:33.829+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:33.901+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097506 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:33.901+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:34.049+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:34.049+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:35.301+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:34:35.301+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:34:35.425+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:35.561+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097530 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:35.561+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:35.809+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4565: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.7 MiB/s wr, 86 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:37.245+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097563 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:37.245+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:37.813+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4566: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.0 MiB/s wr, 103 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:39.049+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097584 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:39.049+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:39.817+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4567: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 480 KiB/s rd, 1.2 MiB/s wr, 54 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:40.425+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:40.657+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097608 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:40.657+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:41.829+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4568: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 480 KiB/s rd, 1.2 MiB/s wr, 54 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:42.297+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097632 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:42.297+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:43.833+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4569: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.3 MiB/s wr, 101 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:43.889+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097656 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:43.889+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:45.429+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:45.553+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097680 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:45.553+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:45.845+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4570: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 614 KiB/s rd, 1.4 MiB/s wr, 63 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:47.341+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097704 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:47.341+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:47.849+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4571: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 956 KiB/s rd, 1.8 MiB/s wr, 90 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:49.849+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4572: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 942 KiB/s rd, 1.5 MiB/s wr, 73 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:49.901+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097734 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:49.901+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:50.429+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:51.477+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097758 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:51.477+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:51.849+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4573: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 942 KiB/s rd, 1.5 MiB/s wr, 73 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:53.089+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097782 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:53.089+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:53.853+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4574: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.0 MiB/s wr, 109 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:54.705+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097806 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:54.705+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:55.453+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:34:55.853+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4575: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 614 KiB/s rd, 912 KiB/s wr, 62 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:56.325+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097830 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:56.325+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:57.853+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4576: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.5 MiB/s wr, 98 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:34:58.009+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097854 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:58.009+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:59.645+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097884 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:34:59.645+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:34:59.861+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4577: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 692 KiB/s rd, 1.2 MiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:00.453+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:01.273+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097908 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:01.273+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:01.793+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:35:01
debug 2022-03-13T01:35:01.793+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:35:01.793+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:35:01.861+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4578: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 693 KiB/s rd, 1.2 MiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:02.925+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097932 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:02.925+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:03.817+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:35:03.817+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:35:03.817+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:35:03.849+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4579: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.9 MiB/s wr, 116 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:03.881+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:03.881+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:04.057+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:04.057+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:04.593+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097956 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:04.593+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:05.301+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:05.301+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:05.465+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:05.869+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4580: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 902 KiB/s rd, 1.3 MiB/s wr, 79 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:06.198+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6097980 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:06.198+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:07.878+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4581: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1017 KiB/s rd, 1.7 MiB/s wr, 107 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:07.886+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098004 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:07.886+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:09.678+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098028 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:09.678+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:09.882+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4582: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 597 KiB/s rd, 1.1 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:10.358+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:35:10.362+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:35:10.490+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:11.318+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863274 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:11.318+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:11.882+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4583: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 680 KiB/s rd, 1.3 MiB/s wr, 86 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:12.910+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098073 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:12.910+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:13.474+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:35:13.490+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:35:13.886+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4584: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 716 KiB/s rd, 1.7 MiB/s wr, 102 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:14.622+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098097 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:14.622+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:15.494+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:15.890+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4585: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 234 KiB/s rd, 1.0 MiB/s wr, 59 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:16.238+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098121 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:16.238+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:17.922+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4586: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 572 KiB/s rd, 1.3 MiB/s wr, 83 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:17.930+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098145 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:17.930+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:19.678+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098181 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:19.682+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:19.930+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4587: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 457 KiB/s rd, 915 KiB/s wr, 55 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:20.518+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:21.594+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098202 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:21.594+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:21.934+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4588: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 600 KiB/s rd, 1.1 MiB/s wr, 70 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:23.326+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098226 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:23.326+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:23.938+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4589: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 771 KiB/s rd, 1.2 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:24.974+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098250 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:24.974+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:25.526+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:25.938+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4590: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 736 KiB/s rd, 803 KiB/s wr, 64 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:26.566+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863364 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:26.566+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:27.982+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4591: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 829 KiB/s rd, 1.1 MiB/s wr, 87 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:28.342+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098298 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:28.342+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:29.990+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4592: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 491 KiB/s rd, 917 KiB/s wr, 62 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:30.122+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098322 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:30.122+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:30.530+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:31.826+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098346 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:31.826+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:31.998+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4593: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.9 MiB/s wr, 114 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:33.446+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098370 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:33.446+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:33.882+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:33.882+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:34.006+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4594: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 887 KiB/s rd, 1.7 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:34.058+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:34.058+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:35.086+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098394 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:35.086+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:35.306+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:35:35.310+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:35:35.534+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:36.006+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4595: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 633 KiB/s rd, 1.3 MiB/s wr, 74 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:36.686+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098418 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:36.686+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:38.010+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4596: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 888 KiB/s rd, 2.3 MiB/s wr, 113 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:38.482+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098448 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:38.482+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:40.010+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4597: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 796 KiB/s rd, 1.9 MiB/s wr, 91 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:40.078+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098472 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:40.078+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:40.546+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:41.686+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098496 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:41.686+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:42.014+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4598: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 910 KiB/s rd, 2.2 MiB/s wr, 123 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:43.262+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098520 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:43.262+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:44.014+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4599: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 369 KiB/s rd, 1.2 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:44.878+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098550 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:44.878+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:45.546+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:46.018+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4600: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 369 KiB/s rd, 1.2 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:46.466+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098574 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:46.466+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:48.054+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4601: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 625 KiB/s rd, 1.8 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:48.190+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098595 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:48.190+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:49.886+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098619 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:49.886+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:50.062+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4602: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 369 KiB/s rd, 845 KiB/s wr, 60 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:50.550+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:51.590+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098649 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:51.590+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:52.062+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4603: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 596 KiB/s rd, 1.2 MiB/s wr, 96 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:53.290+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098673 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:53.290+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:54.066+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4604: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 484 KiB/s rd, 931 KiB/s wr, 64 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:54.890+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098697 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:54.890+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:55.554+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:35:56.070+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4605: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 484 KiB/s rd, 931 KiB/s wr, 64 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:56.514+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098721 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:56.514+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:58.074+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4606: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 880 KiB/s rd, 1.8 MiB/s wr, 106 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:35:58.390+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098745 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:58.390+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:35:59.986+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098769 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:35:59.986+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:00.078+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4607: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 625 KiB/s rd, 1.2 MiB/s wr, 78 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:00.594+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:01.738+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863589 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:01.738+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:01.798+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:36:01
debug 2022-03-13T01:36:01.798+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:36:01.798+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:36:02.078+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4608: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.7 MiB/s wr, 124 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:03.322+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098817 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:03.322+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:03.822+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:36:03.826+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:36:03.826+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:36:03.882+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:03.882+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:04.062+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:04.062+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:04.082+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4609: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 898 KiB/s rd, 1.3 MiB/s wr, 88 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:05.014+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098838 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:05.014+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:05.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:05.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:05.594+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:06.082+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4610: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 898 KiB/s rd, 1.3 MiB/s wr, 88 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:06.594+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098862 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:06.598+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:08.082+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4611: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.0 MiB/s wr, 133 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:08.314+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098886 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:08.314+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:09.894+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098910 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:09.902+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:10.086+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4612: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 886 KiB/s rd, 1.2 MiB/s wr, 91 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:10.374+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:36:10.394+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:36:10.594+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:11.554+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098934 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:11.554+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:12.090+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4613: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.8 MiB/s wr, 133 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:13.154+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6098958 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:13.154+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:13.510+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:36:13.522+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:36:14.098+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4614: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 768 KiB/s rd, 1.3 MiB/s wr, 87 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:14.854+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863667 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:14.854+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:15.598+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:16.106+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4615: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 768 KiB/s rd, 1.3 MiB/s wr, 86 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:16.398+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099003 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:16.398+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:18.106+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4616: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.0 MiB/s wr, 121 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:18.138+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099027 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:18.138+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:19.930+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863694 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:19.930+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:20.106+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4617: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 733 KiB/s rd, 1.2 MiB/s wr, 76 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:20.602+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:21.650+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099072 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:21.650+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:22.110+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4618: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.9 MiB/s wr, 122 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:23.358+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099099 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:23.358+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:24.114+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4619: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 914 KiB/s rd, 1.4 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:25.022+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099123 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:25.022+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:25.606+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:26.122+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4620: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 914 KiB/s rd, 1.4 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:26.726+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099147 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:26.726+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:28.122+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4621: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.1 MiB/s wr, 117 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:28.450+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099171 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:28.450+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:30.070+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099201 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:30.070+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:30.138+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4622: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 935 KiB/s rd, 1.5 MiB/s wr, 83 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:30.614+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:31.730+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099225 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:31.730+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:32.150+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4623: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.9 MiB/s wr, 117 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:33.306+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099249 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:33.306+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:33.894+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:33.894+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:34.066+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:34.066+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:34.150+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4624: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 707 KiB/s rd, 1.1 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:34.878+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099273 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:34.882+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:35.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:36:35.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:36:35.622+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:36.158+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4625: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 707 KiB/s rd, 1.1 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:36.482+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099297 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:36.482+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:38.174+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4626: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.2 MiB/s wr, 132 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:38.206+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099321 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:38.206+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:39.846+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099357 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:39.846+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:40.174+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4627: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 982 KiB/s rd, 1.5 MiB/s wr, 96 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:40.634+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:41.418+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099381 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:41.418+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:42.178+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4628: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.9 MiB/s wr, 135 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:43.322+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099405 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:43.322+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:44.182+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4629: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1004 KiB/s rd, 1.5 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:44.934+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099429 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:44.934+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:45.638+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:46.186+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4630: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1004 KiB/s rd, 1.5 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:46.542+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099453 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:46.542+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:48.214+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4631: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.3 MiB/s wr, 139 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:48.266+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099477 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:48.266+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:50.222+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4632: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 673 KiB/s rd, 1.2 MiB/s wr, 77 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:50.222+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099501 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:50.222+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:50.638+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:51.834+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099525 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:51.834+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:52.222+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4633: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.4 MiB/s wr, 128 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:53.550+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099552 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:53.550+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:54.226+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4634: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 948 KiB/s rd, 1.9 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:55.286+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099576 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:55.286+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:55.642+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:36:56.246+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4635: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 946 KiB/s rd, 1.9 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:56.914+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099600 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:56.914+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:36:58.250+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4636: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.2 MiB/s wr, 120 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:36:58.726+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5863871 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:36:58.726+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:00.254+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4637: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 956 KiB/s rd, 1.5 MiB/s wr, 81 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:00.366+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099648 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:00.366+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:00.642+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:01.798+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:37:01
debug 2022-03-13T01:37:01.798+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:37:01.798+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:37:01.962+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099672 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:01.962+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:02.262+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4638: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 2.0 MiB/s wr, 106 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:03.730+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099696 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:03.730+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:03.826+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:37:03.826+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:37:03.826+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:37:03.926+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:03.926+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:04.066+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:04.066+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:04.262+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4639: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 412 KiB/s rd, 838 KiB/s wr, 55 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:05.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:05.314+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:05.406+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099720 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:05.406+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:05.646+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:06.266+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4640: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 413 KiB/s rd, 838 KiB/s wr, 55 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:07.042+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099744 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:07.042+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:08.258+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4641: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.8 MiB/s wr, 112 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:08.842+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099768 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:08.842+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:10.270+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4642: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 705 KiB/s rd, 1.5 MiB/s wr, 81 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:10.390+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099792 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:10.390+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:10.398+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:37:10.410+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:37:10.650+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:12.274+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4643: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.2 MiB/s wr, 119 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:12.330+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099816 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:12.342+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:13.538+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:37:13.558+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:37:14.002+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099837 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:14.002+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:14.278+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4644: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.7 MiB/s wr, 95 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:15.654+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:15.698+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099867 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:15.702+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:16.318+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4645: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.6 MiB/s wr, 94 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:17.326+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099891 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:17.326+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:18.322+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4646: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.1 MiB/s wr, 112 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:19.034+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099915 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:19.034+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:20.390+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4647: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 488 KiB/s rd, 1.1 MiB/s wr, 55 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:20.666+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:21.222+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099948 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:21.222+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:22.414+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4648: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.3 MiB/s wr, 107 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:23.134+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6099972 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:23.146+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:24.418+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4649: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 850 KiB/s rd, 1.7 MiB/s wr, 68 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:25.098+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100002 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:25.098+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:25.666+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:26.418+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4650: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 850 KiB/s rd, 1.7 MiB/s wr, 68 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:26.746+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100026 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:26.746+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:28.422+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4651: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.2 MiB/s wr, 108 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:28.482+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100050 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:28.482+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:30.178+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100074 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:30.178+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:30.422+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4652: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.8 MiB/s wr, 90 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:30.670+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:31.802+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100098 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:31.806+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:32.422+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4653: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.7 MiB/s wr, 131 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:33.482+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100122 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:33.482+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:33.926+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:33.926+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:34.074+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:34.074+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:34.426+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4654: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 870 KiB/s rd, 1.5 MiB/s wr, 79 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:35.126+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100146 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:35.126+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:35.346+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:37:35.346+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:37:35.682+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:36.430+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4655: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 870 KiB/s rd, 1.5 MiB/s wr, 79 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:36.754+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100170 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:36.754+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:38.430+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4656: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.3 MiB/s wr, 118 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:38.486+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100194 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:38.486+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:40.178+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100218 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:40.178+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:40.430+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4657: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 808 KiB/s rd, 1.7 MiB/s wr, 79 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:40.686+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:41.798+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100254 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:41.798+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:42.434+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4658: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.8 MiB/s wr, 132 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:43.450+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100278 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:43.450+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:44.438+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4659: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.9 MiB/s wr, 92 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:45.034+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100302 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:45.034+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:45.686+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:46.438+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4660: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.9 MiB/s wr, 92 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:46.586+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100326 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:46.586+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:48.294+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100350 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:48.294+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:48.442+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4661: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.2 MiB/s wr, 114 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:49.946+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100374 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:49.946+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:50.442+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4662: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 902 KiB/s rd, 1.4 MiB/s wr, 75 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:50.690+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:51.622+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100398 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:51.626+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:52.442+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4663: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.5 MiB/s wr, 126 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:53.330+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5864141 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:53.330+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:54.446+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4664: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.4 MiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:54.914+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100452 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:54.914+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:55.694+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:37:56.450+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4665: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.4 MiB/s wr, 72 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:56.566+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100476 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:56.566+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:58.210+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100500 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:58.210+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:37:58.450+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4666: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.2 MiB/s wr, 112 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:37:59.866+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100524 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:37:59.866+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:00.454+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4667: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.8 MiB/s wr, 89 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:00.706+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:01.518+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100554 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:01.518+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:01.806+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:38:01
debug 2022-03-13T01:38:01.806+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:38:01.806+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:38:02.458+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4668: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.8 MiB/s rd, 3.0 MiB/s wr, 127 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:03.202+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100578 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:03.202+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:03.842+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:38:03.850+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:38:03.850+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:38:03.926+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:03.926+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:04.114+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:04.114+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:04.482+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4669: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 930 KiB/s rd, 1.9 MiB/s wr, 76 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:04.862+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100602 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:04.862+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:05.342+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:05.342+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:05.762+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:06.462+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100626 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:06.462+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:06.482+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4670: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 930 KiB/s rd, 1.9 MiB/s wr, 76 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:08.226+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100650 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:08.226+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:08.482+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4671: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.8 MiB/s rd, 3.1 MiB/s wr, 134 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:09.858+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100674 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:09.858+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:10.414+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:38:10.434+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:38:10.482+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4672: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.4 MiB/s wr, 95 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:10.770+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:11.450+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100698 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:11.450+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:12.486+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4673: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.5 MiB/s rd, 3.1 MiB/s wr, 118 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:13.082+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100722 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:13.082+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:13.582+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:38:13.590+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:38:14.498+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4674: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.9 MiB/s wr, 80 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:14.758+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100746 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:14.758+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:15.774+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:16.486+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100770 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:16.486+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:16.498+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4675: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.0 MiB/s rd, 1.9 MiB/s wr, 81 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:18.198+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100794 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:18.198+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:18.498+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4676: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.6 MiB/s wr, 118 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:19.834+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100818 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:19.834+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:20.502+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4677: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 635 KiB/s rd, 1.3 MiB/s wr, 60 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:20.798+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:21.770+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100845 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:21.770+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:22.506+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4678: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.4 MiB/s wr, 118 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:23.430+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100869 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:23.434+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:24.514+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4679: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.8 MiB/s wr, 95 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:25.074+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100896 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:25.074+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:25.798+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:26.518+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4680: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.8 MiB/s wr, 95 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:26.674+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100920 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:26.674+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:28.382+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100944 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:28.382+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:28.518+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4681: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.0 MiB/s wr, 116 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:30.094+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100968 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:30.094+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:30.522+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4682: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 104 GiB used, 496 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.3 MiB/s wr, 79 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:30.798+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:31.726+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6100992 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:31.726+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:32.522+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4683: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.1 MiB/s wr, 141 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:33.462+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101016 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:33.462+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:33.926+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:33.926+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:34.122+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:34.122+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:34.526+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4684: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.1 MiB/s wr, 83 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:35.058+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101040 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:35.058+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:35.390+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:38:35.390+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:38:35.802+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:36.526+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4685: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.1 MiB/s wr, 83 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:36.670+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101064 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:36.670+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:38.686+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101088 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:38.690+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:38.702+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4686: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 1.7 MiB/s wr, 127 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:40.314+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101112 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:40.326+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:40.702+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4687: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.5 MiB/s wr, 106 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:40.810+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:42.346+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101145 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:42.350+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:42.702+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4688: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.7 MiB/s wr, 159 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:44.034+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101169 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:44.034+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:44.714+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4689: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.9 MiB/s wr, 97 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:45.678+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101193 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:45.678+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:45.810+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:46.718+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4690: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.9 MiB/s wr, 97 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:47.362+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101223 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:47.362+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:48.722+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4691: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.7 MiB/s wr, 147 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:49.254+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101244 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:49.258+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:50.734+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4692: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.1 MiB/s wr, 103 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:50.818+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:50.906+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101268 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:50.906+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:52.550+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101292 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:52.550+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:52.738+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4693: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.9 MiB/s wr, 138 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:54.142+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101313 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:54.142+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:54.746+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4694: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1004 KiB/s rd, 1.7 MiB/s wr, 85 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:55.822+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:38:55.958+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101343 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:55.958+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:56.746+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4695: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1004 KiB/s rd, 1.7 MiB/s wr, 85 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:57.646+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101367 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:57.646+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:38:58.766+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4696: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 2.1 MiB/s wr, 121 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:38:59.258+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101391 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:38:59.258+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:00.770+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4697: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 812 KiB/s rd, 1.3 MiB/s wr, 71 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:00.826+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:00.874+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101415 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:00.874+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:01.810+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:39:01
debug 2022-03-13T01:39:01.810+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:39:01.810+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:39:02.539+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101439 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:02.539+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:02.771+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4698: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.1 MiB/s wr, 116 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:03.855+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:39:03.859+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:39:03.859+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:39:03.931+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:03.931+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:04.191+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:04.191+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:04.195+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101463 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:04.195+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:04.771+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4699: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 938 KiB/s rd, 1.3 MiB/s wr, 81 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:05.415+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:05.415+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:05.827+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:05.871+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101487 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:05.871+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:06.775+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4700: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 939 KiB/s rd, 1.3 MiB/s wr, 81 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:07.555+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101511 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:07.555+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:08.779+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4701: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.2 MiB/s wr, 136 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:09.219+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101535 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:09.219+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:10.455+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:39:10.467+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:39:10.779+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4702: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.7 MiB/s wr, 100 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:10.811+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101559 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:10.811+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:10.835+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:12.783+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4703: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.0 MiB/s wr, 128 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:13.039+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101580 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:13.039+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:13.599+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:39:13.607+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:39:14.647+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101604 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:14.647+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:14.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4704: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.2 MiB/s wr, 82 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:15.839+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:16.331+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101628 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:16.331+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:16.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4705: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.2 MiB/s wr, 82 op/s; 13831/41493 objects degraded (33.333%)
debug 2022-03-13T01:39:18.087+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101652 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:18.087+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:18.799+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4706: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.2 MiB/s wr, 122 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:19.723+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101676 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:19.723+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:20.803+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4707: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 784 KiB/s rd, 1.3 MiB/s wr, 66 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:20.839+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:21.391+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101697 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:21.391+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:22.807+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4708: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.7 MiB/s wr, 97 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:23.347+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101727 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:23.407+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:24.811+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4709: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 920 KiB/s rd, 1.4 MiB/s wr, 70 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:25.127+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101751 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:25.127+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:25.843+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:26.763+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101775 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:26.763+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:26.811+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4710: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 921 KiB/s rd, 1.4 MiB/s wr, 70 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:28.515+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101799 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:28.515+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:28.815+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4711: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.3 MiB/s wr, 110 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:30.123+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101823 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:30.123+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:30.819+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4712: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 652 KiB/s rd, 1.3 MiB/s wr, 70 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:30.843+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:31.803+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101847 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:31.803+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:32.819+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4713: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.8 MiB/s wr, 137 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:33.459+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101877 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:33.459+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:33.931+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:33.931+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:34.191+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:34.191+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:34.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4714: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.4 MiB/s wr, 106 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:35.071+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101901 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:35.071+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:35.431+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:39:35.431+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:39:35.847+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:36.655+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101925 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:36.655+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:36.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4715: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 2.4 MiB/s wr, 106 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:38.327+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101949 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:38.327+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:38.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4716: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.7 MiB/s wr, 145 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:39.867+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101973 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:39.867+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:40.823+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4717: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.7 MiB/s wr, 105 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:40.851+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:41.507+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6101997 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:41.507+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:42.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4718: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.1 MiB/s wr, 140 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:43.091+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102033 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:43.091+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:44.695+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102057 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:44.695+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:44.827+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4719: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 693 KiB/s rd, 643 KiB/s wr, 73 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:45.843+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:46.299+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102081 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:46.299+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:46.831+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4720: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 692 KiB/s rd, 643 KiB/s wr, 73 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:47.991+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102105 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:47.991+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:48.855+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4721: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.3 MiB/s wr, 116 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:49.647+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102129 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:49.647+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:50.863+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:50.871+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4722: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 758 KiB/s rd, 1.0 MiB/s wr, 77 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:51.259+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102153 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:51.259+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:52.871+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4723: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.2 MiB/s wr, 129 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:53.027+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102171 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:53.027+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:54.667+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102192 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:54.667+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:54.879+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4724: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.9 MiB/s wr, 95 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:55.859+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:39:56.363+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102222 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:56.363+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:56.883+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4725: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.9 MiB/s wr, 94 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:58.063+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102246 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:58.063+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:39:58.883+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4726: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.2 MiB/s wr, 126 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:39:59.699+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102270 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:39:59.699+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:00.867+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:00.887+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4727: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.5 MiB/s wr, 84 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:01.307+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102294 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:01.307+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:01.843+0000 7fc22b07e700 0 [balancer INFO root] Optimize plan auto_2022-03-13_01:40:01
debug 2022-03-13T01:40:01.843+0000 7fc22b07e700 0 [balancer INFO root] Mode upmap, max misplaced 0.050000
debug 2022-03-13T01:40:01.843+0000 7fc22b07e700 0 [balancer INFO root] Some objects (0.333333) are degraded; try again later
debug 2022-03-13T01:40:02.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4728: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.6 MiB/s wr, 144 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:02.939+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102318 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:02.939+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:03.871+0000 7fc227877700 0 [pg_autoscaler INFO root] _maybe_adjust
debug 2022-03-13T01:40:03.871+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:40:03.871+0000 7fc227877700 0 [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 644245094400
debug 2022-03-13T01:40:03.943+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:03.943+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:04.195+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:04.195+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:04.547+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102342 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:04.547+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:04.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4729: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1002 KiB/s rd, 1.4 MiB/s wr, 92 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:05.443+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:05.443+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:05.871+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:06.139+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102366 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:06.139+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:06.903+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4730: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1002 KiB/s rd, 1.4 MiB/s wr, 92 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:07.855+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102390 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:07.855+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:08.963+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4731: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.5 MiB/s wr, 156 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:09.535+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102414 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:09.535+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:10.483+0000 7fc21d5e3700 0 [rbd_support INFO root] MirrorSnapshotScheduleHandler: load_schedules
debug 2022-03-13T01:40:10.567+0000 7fc21d5e3700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:40:10.871+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:10.967+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4732: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.3 MiB/s wr, 124 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:11.567+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5864912 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:11.567+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:12.967+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4733: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 3.0 MiB/s wr, 157 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:13.135+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102462 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:13.135+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:13.619+0000 7fc21ad5e700 0 [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
debug 2022-03-13T01:40:13.627+0000 7fc21ad5e700 0 [rbd_support INFO root] load_schedules: replicapool, start_after=
debug 2022-03-13T01:40:14.795+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102486 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:14.795+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:14.983+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4734: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.9 MiB/s wr, 96 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:15.871+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:16.395+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102510 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:16.395+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:16.987+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4735: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 2.3 MiB/s wr, 115 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:18.191+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.5864942 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:18.191+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:18.991+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4736: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.8 MiB/s wr, 137 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:19.859+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102561 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:19.859+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:20.887+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:21.019+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4737: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 615 KiB/s rd, 1.7 MiB/s wr, 73 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:21.575+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102585 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:21.575+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:23.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4738: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 3.0 MiB/s wr, 144 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:23.243+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102615 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:23.243+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:24.955+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102642 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:24.967+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:25.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4739: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 2.3 MiB/s wr, 111 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:25.895+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:26.615+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102669 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:26.615+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:27.023+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4740: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.7 MiB/s wr, 128 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:28.375+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102696 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:28.375+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:29.035+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4741: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.3 MiB/s wr, 123 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:30.003+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102720 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:30.003+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:30.899+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:31.039+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4742: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.8 MiB/s wr, 101 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:31.667+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102744 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:31.667+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:33.035+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4743: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.8 MiB/s wr, 149 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:33.323+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102768 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:33.323+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:33.943+0000 7fc218559700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:33.943+0000 7fc218559700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:34.199+0000 7fc20eb46700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:34.199+0000 7fc20eb46700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:34.975+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102792 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:34.975+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:35.055+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4744: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 785 KiB/s rd, 1.4 MiB/s wr, 77 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:35.467+0000 7fc20bb40700 0 [volumes INFO mgr_util] scanning for idle connections..
debug 2022-03-13T01:40:35.467+0000 7fc20bb40700 0 [volumes INFO mgr_util] cleaning up connections: []
debug 2022-03-13T01:40:35.903+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:36.631+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102816 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:36.631+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:37.099+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4745: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 2.2 MiB/s wr, 115 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:38.311+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102840 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:38.311+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:39.115+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4746: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.8 MiB/s wr, 99 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:40.051+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102864 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:40.051+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
debug 2022-03-13T01:40:40.911+0000 7fc227076700 0 [progress INFO root] Processing OSDMap change 690..690
debug 2022-03-13T01:40:41.115+0000 7fc22c080700 0 log_channel(cluster) log [DBG] : pgmap v4747: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 980 KiB/s rd, 1.8 MiB/s wr, 85 op/s; 13832/41496 objects degraded (33.333%)
debug 2022-03-13T01:40:41.739+0000 7fc251840700 0 log_channel(audit) log [DBG] : from='client.6102888 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:40:41.739+0000 7fc251840700 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (33 PGs are or would become offline)
END logs from pod: rook-ceph-mgr-a-b557c664-92bpn mgr
BEGIN logs from pod: rook-ceph-mon-a-c4484c877-msrjn mon
audit 2022-03-13T01:03:24.551894+0000 mgr.a (mgr.5974105) 7889 : audit [DBG] from='client.6070410 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:24.986670+0000 mon.c (mon.2) 7499 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:24.986811+0000 mon.c (mon.2) 7500 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:25.008524+0000 mon.a (mon.0) 234470 : audit [DBG] from='client.? 10.233.92.240:0/408190576' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:25.365404+0000 mon.a (mon.0) 234471 : audit [DBG] from='client.? 10.233.92.240:0/154010604' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:25.744181+0000 mon.a (mon.0) 234472 : audit [DBG] from='client.? 10.233.92.240:0/419347599' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:26.571+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:26.571+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2589476042' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:26.971+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:26.971+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1408247259' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:27.131+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:03:27.135+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
cluster 2022-03-13T01:03:25.233279+0000 mgr.a (mgr.5974105) 7890 : cluster [DBG] pgmap v3633: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.0 MiB/s wr, 103 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:26.110632+0000 mgr.a (mgr.5974105) 7891 : audit [DBG] from='client.6070434 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:26.575845+0000 mon.a (mon.0) 234473 : audit [DBG] from='client.? 10.233.92.240:0/2589476042' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:26.974141+0000 mon.a (mon.0) 234474 : audit [DBG] from='client.? 10.233.92.240:0/1408247259' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:27.137281+0000 mon.a (mon.0) 234475 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:27.137467+0000 mon.a (mon.0) 234476 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:03:27.387+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:27.387+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2274386870' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:28.075+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:03:28.379+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:28.379+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/676717738' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:27.236331+0000 mgr.a (mgr.5974105) 7892 : cluster [DBG] pgmap v3634: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.1 MiB/s rd, 2.4 MiB/s wr, 132 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:27.392350+0000 mon.a (mon.0) 234477 : audit [DBG] from='client.? 10.233.92.240:0/2274386870' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:27.890999+0000 mgr.a (mgr.5974105) 7893 : audit [DBG] from='client.6070458 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:03:28.811+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:28.811+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3501527121' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:29.219+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:29.219+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2932860092' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:28.381507+0000 mon.a (mon.0) 234478 : audit [DBG] from='client.? 10.233.92.240:0/676717738' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:28.815277+0000 mon.a (mon.0) 234479 : audit [DBG] from='client.? 10.233.92.240:0/3501527121' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:29.224925+0000 mon.a (mon.0) 234480 : audit [DBG] from='client.? 10.233.92.240:0/2932860092' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:30.095+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:30.095+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2587072466' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:30.467+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:30.467+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4102200499' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:30.835+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:30.835+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1823564864' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:29.240788+0000 mgr.a (mgr.5974105) 7894 : cluster [DBG] pgmap v3635: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 710 KiB/s rd, 1.1 MiB/s wr, 68 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:29.651315+0000 mgr.a (mgr.5974105) 7895 : audit [DBG] from='client.6070482 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:30.099806+0000 mon.a (mon.0) 234481 : audit [DBG] from='client.? 10.233.92.240:0/2587072466' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:30.470478+0000 mon.a (mon.0) 234482 : audit [DBG] from='client.? 10.233.92.240:0/4102200499' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:30.839736+0000 mon.a (mon.0) 234483 : audit [DBG] from='client.? 10.233.92.240:0/1823564864' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:32.095+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:32.095+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/574920663' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:31.686425+0000 mon.c (mon.2) 7501 : audit [DBG] from='client.? 10.233.92.240:0/1482276955' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:32.459+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:32.459+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3826458229' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:33.095+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:03:31.214903+0000 mgr.a (mgr.5974105) 7896 : audit [DBG] from='client.6070506 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:03:31.241385+0000 mgr.a (mgr.5974105) 7897 : cluster [DBG] pgmap v3636: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 710 KiB/s rd, 1.1 MiB/s wr, 68 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:32.097754+0000 mon.a (mon.0) 234484 : audit [DBG] from='client.? 10.233.92.240:0/574920663' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:32.462347+0000 mon.a (mon.0) 234485 : audit [DBG] from='client.? 10.233.92.240:0/3826458229' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:33.323+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:33.327+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/255337466' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:33.679+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:33.679+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/766180235' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:34.047+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:34.047+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1460562164' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:32.831410+0000 mgr.a (mgr.5974105) 7898 : audit [DBG] from='client.6070527 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:03:33.245221+0000 mgr.a (mgr.5974105) 7899 : cluster [DBG] pgmap v3637: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 2.6 MiB/s wr, 139 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:33.329501+0000 mon.a (mon.0) 234486 : audit [DBG] from='client.? 10.233.92.240:0/255337466' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:33.684162+0000 mon.a (mon.0) 234487 : audit [DBG] from='client.? 10.233.92.240:0/766180235' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:34.051015+0000 mon.a (mon.0) 234488 : audit [DBG] from='client.? 10.233.92.240:0/1460562164' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:34.875+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:34.875+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1059723361' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:35.283+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:35.283+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2961084695' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:34.258889+0000 mon.b (mon.1) 1146 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:34.259118+0000 mon.b (mon.1) 1147 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:34.427901+0000 mgr.a (mgr.5974105) 7900 : audit [DBG] from='client.6070551 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:34.880535+0000 mon.a (mon.0) 234489 : audit [DBG] from='client.? 10.233.92.240:0/1059723361' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:34.988233+0000 mon.c (mon.2) 7502 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:34.988415+0000 mon.c (mon.2) 7503 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:35.285950+0000 mon.a (mon.0) 234490 : audit [DBG] from='client.? 10.233.92.240:0/2961084695' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:35.659+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:35.659+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2260214550' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:36.499+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:36.499+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3896005476' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:35.256289+0000 mgr.a (mgr.5974105) 7901 : cluster [DBG] pgmap v3638: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.0 MiB/s rd, 1.8 MiB/s wr, 100 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:35.665291+0000 mon.a (mon.0) 234491 : audit [DBG] from='client.? 10.233.92.240:0/2260214550' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:36.027895+0000 mgr.a (mgr.5974105) 7902 : audit [DBG] from='client.6070575 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:36.504946+0000 mon.a (mon.0) 234492 : audit [DBG] from='client.? 10.233.92.240:0/3896005476' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:36.883+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:36.883+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2993526403' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:37.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:03:37.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:03:38.115+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:03:38.231+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:38.231+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2150845709' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:36.889092+0000 mon.a (mon.0) 234493 : audit [DBG] from='client.? 10.233.92.240:0/2993526403' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:37.118424+0000 mon.a (mon.0) 234494 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:37.118723+0000 mon.a (mon.0) 234495 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:37.288226+0000 mon.c (mon.2) 7504 : audit [DBG] from='client.? 10.233.92.240:0/4046782646' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:38.631+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:38.631+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3043458364' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:38.995+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:38.995+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3630252746' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:37.258208+0000 mgr.a (mgr.5974105) 7903 : cluster [DBG] pgmap v3639: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 2.5 MiB/s wr, 135 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:37.770764+0000 mgr.a (mgr.5974105) 7904 : audit [DBG] from='client.5852561 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:38.233867+0000 mon.a (mon.0) 234496 : audit [DBG] from='client.? 10.233.92.240:0/2150845709' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:38.637190+0000 mon.a (mon.0) 234497 : audit [DBG] from='client.? 10.233.92.240:0/3043458364' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:39.000595+0000 mon.a (mon.0) 234498 : audit [DBG] from='client.? 10.233.92.240:0/3630252746' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:39.863+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:39.863+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1295764320' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:40.227+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:40.227+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1543114160' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:40.595+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:40.595+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1556631159' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:39.258896+0000 mgr.a (mgr.5974105) 7905 : cluster [DBG] pgmap v3640: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.2 MiB/s wr, 106 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:39.394444+0000 mgr.a (mgr.5974105) 7906 : audit [DBG] from='client.6070623 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:39.869428+0000 mon.a (mon.0) 234499 : audit [DBG] from='client.? 10.233.92.240:0/1295764320' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:40.232025+0000 mon.a (mon.0) 234500 : audit [DBG] from='client.? 10.233.92.240:0/1543114160' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:40.600481+0000 mon.a (mon.0) 234501 : audit [DBG] from='client.? 10.233.92.240:0/1556631159' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:41.463+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:41.463+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3169047306' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:41.855+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:41.855+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2853702486' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:40.961943+0000 mgr.a (mgr.5974105) 7907 : audit [DBG] from='client.6070647 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:41.466180+0000 mon.a (mon.0) 234502 : audit [DBG] from='client.? 10.233.92.240:0/3169047306' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:42.211+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:42.211+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1986501864' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:41.301914+0000 mgr.a (mgr.5974105) 7908 : cluster [DBG] pgmap v3641: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.2 MiB/s wr, 106 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:41.857963+0000 mon.a (mon.0) 234503 : audit [DBG] from='client.? 10.233.92.240:0/2853702486' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:42.216017+0000 mon.a (mon.0) 234504 : audit [DBG] from='client.? 10.233.92.240:0/1986501864' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:43.075+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:43.075+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1776862721' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:43.115+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:03:43.451+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:43.451+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3843094123' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:43.855+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:43.855+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2413612736' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:42.621262+0000 mgr.a (mgr.5974105) 7909 : audit [DBG] from='client.6070668 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:43.079638+0000 mon.a (mon.0) 234505 : audit [DBG] from='client.? 10.233.92.240:0/1776862721' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:43.454757+0000 mon.a (mon.0) 234506 : audit [DBG] from='client.? 10.233.92.240:0/3843094123' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:43.860694+0000 mon.a (mon.0) 234507 : audit [DBG] from='client.? 10.233.92.240:0/2413612736' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:44.703+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:44.703+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3670668214' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:44.799+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:03:44.799+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/653684328' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:03:45.091+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:45.091+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/661305268' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:43.303337+0000 mgr.a (mgr.5974105) 7910 : cluster [DBG] pgmap v3642: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 3.0 MiB/s wr, 138 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:44.241710+0000 mon.b (mon.1) 1148 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:44.242037+0000 mon.b (mon.1) 1149 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:44.705736+0000 mon.a (mon.0) 234508 : audit [DBG] from='client.? 10.233.92.240:0/3670668214' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:44.803377+0000 mon.a (mon.0) 234509 : audit [DBG] from='client.? 10.233.92.240:0/653684328' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:03:44.980696+0000 mon.c (mon.2) 7505 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:44.980850+0000 mon.c (mon.2) 7506 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:45.095749+0000 mon.a (mon.0) 234510 : audit [DBG] from='client.? 10.233.92.240:0/661305268' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:45.471+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:45.471+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3476807851' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:46.375+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:46.375+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/46837099' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:44.223936+0000 mgr.a (mgr.5974105) 7911 : audit [DBG] from='client.6070692 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:03:45.303926+0000 mgr.a (mgr.5974105) 7912 : cluster [DBG] pgmap v3643: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 751 KiB/s rd, 1.5 MiB/s wr, 67 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:45.475073+0000 mon.a (mon.0) 234511 : audit [DBG] from='client.? 10.233.92.240:0/3476807851' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:45.855133+0000 mgr.a (mgr.5974105) 7913 : audit [DBG] from='client.6070722 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:03:46.847+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:46.851+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/324802672' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:46.995+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd dump", "format": "json"} v 0) v1
debug 2022-03-13T01:03:46.995+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1807424484' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch
debug 2022-03-13T01:03:47.119+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:03:47.119+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:03:48.135+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:03:46.378222+0000 mon.a (mon.0) 234512 : audit [DBG] from='client.? 10.233.92.240:0/46837099' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:46.853353+0000 mon.a (mon.0) 234513 : audit [DBG] from='client.? 10.233.92.240:0/324802672' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:46.998525+0000 mon.a (mon.0) 234514 : audit [DBG] from='client.? 10.233.92.240:0/1807424484' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch
audit 2022-03-13T01:03:47.121470+0000 mon.a (mon.0) 234515 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:47.121616+0000 mon.a (mon.0) 234516 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:47.254358+0000 mon.c (mon.2) 7507 : audit [DBG] from='client.? 10.233.92.240:0/667526281' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:47.465216+0000 mon.b (mon.1) 1150 : audit [DBG] from='client.? 10.233.92.240:0/1292331005' entity='client.admin' cmd=[{"prefix": "osd crush class ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:48.259+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:48.259+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2321223013' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:48.643+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:48.643+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1819112118' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:49.059+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:49.059+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/518770653' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:47.325715+0000 mgr.a (mgr.5974105) 7914 : cluster [DBG] pgmap v3644: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.2 MiB/s wr, 113 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:47.713554+0000 mgr.a (mgr.5974105) 7915 : audit [DBG] from='client.6070755 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:48.264951+0000 mon.a (mon.0) 234517 : audit [DBG] from='client.? 10.233.92.240:0/2321223013' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:48.649054+0000 mon.a (mon.0) 234518 : audit [DBG] from='client.? 10.233.92.240:0/1819112118' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:49.064660+0000 mon.a (mon.0) 234519 : audit [DBG] from='client.? 10.233.92.240:0/518770653' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:49.875+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "quorum_status", "format": "json"} v 0) v1
debug 2022-03-13T01:03:49.875+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2334005988' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
debug 2022-03-13T01:03:49.923+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:49.923+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1712440470' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:50.299+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:50.299+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1734437157' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:49.879243+0000 mon.a (mon.0) 234520 : audit [DBG] from='client.? 10.233.92.240:0/2334005988' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
audit 2022-03-13T01:03:49.925993+0000 mon.a (mon.0) 234521 : audit [DBG] from='client.? 10.233.92.240:0/1712440470' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:50.687+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:50.687+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2272898939' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:49.326364+0000 mgr.a (mgr.5974105) 7916 : cluster [DBG] pgmap v3645: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.5 MiB/s wr, 77 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:49.444223+0000 mgr.a (mgr.5974105) 7917 : audit [DBG] from='client.6070779 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:50.305250+0000 mon.a (mon.0) 234522 : audit [DBG] from='client.? 10.233.92.240:0/1734437157' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:50.692269+0000 mon.a (mon.0) 234523 : audit [DBG] from='client.? 10.233.92.240:0/2272898939' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:51.110809+0000 mgr.a (mgr.5974105) 7918 : audit [DBG] from='client.6070809 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:03:51.575+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:51.575+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/81840525' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:51.995+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:51.995+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/497216414' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:52.375+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:52.375+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/276520526' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:51.330282+0000 mgr.a (mgr.5974105) 7919 : cluster [DBG] pgmap v3646: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.1 MiB/s rd, 1.5 MiB/s wr, 77 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:51.579804+0000 mon.a (mon.0) 234524 : audit [DBG] from='client.? 10.233.92.240:0/81840525' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:51.998371+0000 mon.a (mon.0) 234525 : audit [DBG] from='client.? 10.233.92.240:0/497216414' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:52.380251+0000 mon.a (mon.0) 234526 : audit [DBG] from='client.? 10.233.92.240:0/276520526' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:53.139+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:03:53.259+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:53.259+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1454218619' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:53.647+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:53.647+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2244772778' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:54.039+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:54.039+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2750753497' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:52.774814+0000 mgr.a (mgr.5974105) 7920 : audit [DBG] from='client.6070833 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:53.265401+0000 mon.a (mon.0) 234527 : audit [DBG] from='client.? 10.233.92.240:0/1454218619' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:53.652281+0000 mon.a (mon.0) 234528 : audit [DBG] from='client.? 10.233.92.240:0/2244772778' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:54.042772+0000 mon.a (mon.0) 234529 : audit [DBG] from='client.? 10.233.92.240:0/2750753497' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:54.947+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:54.947+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1484872051' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:55.299+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:55.299+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3485503967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:55.675+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:55.675+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/212714418' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:53.334315+0000 mgr.a (mgr.5974105) 7921 : cluster [DBG] pgmap v3647: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.4 MiB/s wr, 122 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:54.454882+0000 mgr.a (mgr.5974105) 7922 : audit [DBG] from='client.6070857 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:54.727200+0000 mon.b (mon.1) 1151 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:54.727451+0000 mon.b (mon.1) 1152 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:54.949633+0000 mon.a (mon.0) 234530 : audit [DBG] from='client.? 10.233.92.240:0/1484872051' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:54.980202+0000 mon.c (mon.2) 7508 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:54.980363+0000 mon.c (mon.2) 7509 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:55.302680+0000 mon.a (mon.0) 234531 : audit [DBG] from='client.? 10.233.92.240:0/3485503967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:55.678644+0000 mon.a (mon.0) 234532 : audit [DBG] from='client.? 10.233.92.240:0/212714418' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:56.515+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:56.515+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1072971169' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:56.899+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:56.899+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1890817849' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:55.334910+0000 mgr.a (mgr.5974105) 7923 : cluster [DBG] pgmap v3648: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.6 MiB/s wr, 90 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:56.067899+0000 mgr.a (mgr.5974105) 7924 : audit [DBG] from='client.6070881 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:56.521279+0000 mon.a (mon.0) 234533 : audit [DBG] from='client.? 10.233.92.240:0/1072971169' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:56.903114+0000 mon.a (mon.0) 234534 : audit [DBG] from='client.? 10.233.92.240:0/1890817849' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:57.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:03:57.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:03:57.315+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:57.315+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/809113834' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:57.987+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:03:57.987+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/854785479' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:03:58.159+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:03:58.243+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:58.243+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3268534079' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:57.120141+0000 mon.a (mon.0) 234535 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:03:57.120366+0000 mon.a (mon.0) 234536 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:03:57.320068+0000 mon.a (mon.0) 234537 : audit [DBG] from='client.? 10.233.92.240:0/809113834' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:57.992031+0000 mon.a (mon.0) 234538 : audit [DBG] from='client.? 10.233.92.240:0/854785479' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:03:58.647+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:03:58.647+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2428716037' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:03:58.739+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "versions", "format": "json"} v 0) v1
debug 2022-03-13T01:03:58.739+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3905295498' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
debug 2022-03-13T01:03:59.007+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:59.007+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/594192207' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:57.351174+0000 mgr.a (mgr.5974105) 7925 : cluster [DBG] pgmap v3649: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 2.9 MiB/s wr, 148 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:57.758569+0000 mgr.a (mgr.5974105) 7926 : audit [DBG] from='client.6070905 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:58.249251+0000 mon.a (mon.0) 234539 : audit [DBG] from='client.? 10.233.92.240:0/3268534079' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:03:58.649563+0000 mon.a (mon.0) 234540 : audit [DBG] from='client.? 10.233.92.240:0/2428716037' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:03:58.743142+0000 mon.a (mon.0) 234541 : audit [DBG] from='client.? 10.233.92.240:0/3905295498' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
audit 2022-03-13T01:03:59.012285+0000 mon.a (mon.0) 234542 : audit [DBG] from='client.? 10.233.92.240:0/594192207' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:03:59.903+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:03:59.903+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/853925897' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:00.287+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:00.287+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/8636247' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:00.651+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:00.651+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1587847269' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:03:59.364421+0000 mgr.a (mgr.5974105) 7927 : cluster [DBG] pgmap v3650: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.2 MiB/s wr, 102 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:03:59.438406+0000 mgr.a (mgr.5974105) 7928 : audit [DBG] from='client.6070938 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:03:59.907358+0000 mon.a (mon.0) 234543 : audit [DBG] from='client.? 10.233.92.240:0/853925897' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:00.291925+0000 mon.a (mon.0) 234544 : audit [DBG] from='client.? 10.233.92.240:0/8636247' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:00.656228+0000 mon.a (mon.0) 234545 : audit [DBG] from='client.? 10.233.92.240:0/1587847269' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:01.523+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:01.523+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3735226242' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:01.943+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:01.943+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3026135544' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:02.315+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:02.315+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2735035409' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:01.039901+0000 mgr.a (mgr.5974105) 7929 : audit [DBG] from='client.6070962 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:01.529088+0000 mon.a (mon.0) 234546 : audit [DBG] from='client.? 10.233.92.240:0/3735226242' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:03.159+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:03.203+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:03.203+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3939759328' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:03.591+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:03.591+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3623352315' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:01.365907+0000 mgr.a (mgr.5974105) 7930 : cluster [DBG] pgmap v3651: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.2 MiB/s wr, 102 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:01.948574+0000 mon.a (mon.0) 234547 : audit [DBG] from='client.? 10.233.92.240:0/3026135544' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:02.321248+0000 mon.a (mon.0) 234548 : audit [DBG] from='client.? 10.233.92.240:0/2735035409' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:02.705308+0000 mgr.a (mgr.5974105) 7931 : audit [DBG] from='client.6070986 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:03.207863+0000 mon.a (mon.0) 234549 : audit [DBG] from='client.? 10.233.92.240:0/3939759328' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:03.959+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:03.959+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3940075987' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:04.771+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:04.771+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1871826656' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:05.159+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:05.159+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1078281575' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:03.386971+0000 mgr.a (mgr.5974105) 7932 : cluster [DBG] pgmap v3652: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.1 MiB/s rd, 2.6 MiB/s wr, 138 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:03.595148+0000 mon.a (mon.0) 234550 : audit [DBG] from='client.? 10.233.92.240:0/3623352315' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:03.963436+0000 mon.a (mon.0) 234551 : audit [DBG] from='client.? 10.233.92.240:0/3940075987' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:04.284460+0000 mon.b (mon.1) 1153 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:04.285133+0000 mon.b (mon.1) 1154 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:04.776430+0000 mon.a (mon.0) 234552 : audit [DBG] from='client.? 10.233.92.240:0/1871826656' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:05.523+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:05.523+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4067681156' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:04.324287+0000 mgr.a (mgr.5974105) 7933 : audit [DBG] from='client.6071010 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:05.035669+0000 mon.c (mon.2) 7510 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:05.035825+0000 mon.c (mon.2) 7511 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:05.161849+0000 mon.a (mon.0) 234553 : audit [DBG] from='client.? 10.233.92.240:0/1078281575' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:05.527928+0000 mon.a (mon.0) 234554 : audit [DBG] from='client.? 10.233.92.240:0/4067681156' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:06.411+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:06.415+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/558481922' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:06.791+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:06.791+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2317742334' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:07.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:07.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:07.207+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:07.207+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2727486232' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:05.392646+0000 mgr.a (mgr.5974105) 7934 : cluster [DBG] pgmap v3653: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 1.6 MiB/s wr, 93 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:05.923375+0000 mgr.a (mgr.5974105) 7935 : audit [DBG] from='client.6071034 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:06.417697+0000 mon.a (mon.0) 234555 : audit [DBG] from='client.? 10.233.92.240:0/558481922' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:06.795872+0000 mon.a (mon.0) 234556 : audit [DBG] from='client.? 10.233.92.240:0/2317742334' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:07.121005+0000 mon.a (mon.0) 234557 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:07.121160+0000 mon.a (mon.0) 234558 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:07.211229+0000 mon.a (mon.0) 234559 : audit [DBG] from='client.? 10.233.92.240:0/2727486232' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:08.123+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:08.123+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2789055221' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:08.179+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:08.503+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:08.503+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2156115788' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:07.394538+0000 mgr.a (mgr.5974105) 7936 : cluster [DBG] pgmap v3654: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 2.9 MiB/s wr, 146 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:07.629726+0000 mgr.a (mgr.5974105) 7937 : audit [DBG] from='client.6071058 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:08.128208+0000 mon.a (mon.0) 234560 : audit [DBG] from='client.? 10.233.92.240:0/2789055221' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:08.879+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"} v 0) v1
debug 2022-03-13T01:04:08.879+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
debug 2022-03-13T01:04:08.895+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:08.895+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3274821164' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:08.507407+0000 mon.a (mon.0) 234561 : audit [DBG] from='client.? 10.233.92.240:0/2156115788' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:08.881568+0000 mon.a (mon.0) 234562 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
audit 2022-03-13T01:04:08.900399+0000 mon.a (mon.0) 234563 : audit [DBG] from='client.? 10.233.92.240:0/3274821164' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:09.289113+0000 mgr.a (mgr.5974105) 7938 : audit [DBG] from='client.6071082 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:04:09.807+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:09.807+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2014472322' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:10.179+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:10.179+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3221825945' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:10.551+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:10.551+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/919064481' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:09.397467+0000 mgr.a (mgr.5974105) 7939 : cluster [DBG] pgmap v3655: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.6 MiB/s wr, 88 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:09.811427+0000 mon.a (mon.0) 234564 : audit [DBG] from='client.? 10.233.92.240:0/2014472322' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:10.182401+0000 mon.a (mon.0) 234565 : audit [DBG] from='client.? 10.233.92.240:0/3221825945' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:10.555672+0000 mon.a (mon.0) 234566 : audit [DBG] from='client.? 10.233.92.240:0/919064481' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:11.979+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:11.979+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2195030395' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:12.119+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"} v 0) v1
debug 2022-03-13T01:04:12.119+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
debug 2022-03-13T01:04:12.375+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:12.375+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/505215619' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:10.928039+0000 mgr.a (mgr.5974105) 7940 : audit [DBG] from='client.6071106 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:11.439522+0000 mgr.a (mgr.5974105) 7941 : cluster [DBG] pgmap v3656: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.6 MiB/s wr, 88 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:11.582421+0000 mon.c (mon.2) 7512 : audit [DBG] from='client.? 10.233.92.240:0/2575495480' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:11.985193+0000 mon.a (mon.0) 234567 : audit [DBG] from='client.? 10.233.92.240:0/2195030395' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:13.215+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:13.359+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:13.359+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1005993025' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:13.731+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:13.731+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2119220292' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:12.123751+0000 mon.a (mon.0) 234568 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
audit 2022-03-13T01:04:12.379056+0000 mon.a (mon.0) 234569 : audit [DBG] from='client.? 10.233.92.240:0/505215619' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:13.362596+0000 mon.a (mon.0) 234570 : audit [DBG] from='client.? 10.233.92.240:0/1005993025' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:14.103+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:14.103+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2332883505' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:15.023+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:15.023+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/149834596' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:12.846066+0000 mgr.a (mgr.5974105) 7942 : audit [DBG] from='client.6071130 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:13.448007+0000 mgr.a (mgr.5974105) 7943 : cluster [DBG] pgmap v3657: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.3 MiB/s wr, 130 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:13.734074+0000 mon.a (mon.0) 234571 : audit [DBG] from='client.? 10.233.92.240:0/2119220292' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:14.107897+0000 mon.a (mon.0) 234572 : audit [DBG] from='client.? 10.233.92.240:0/2332883505' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:14.829857+0000 mon.b (mon.1) 1155 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:14.830102+0000 mon.b (mon.1) 1156 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:15.295+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:04:15.295+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/631649415' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:04:15.399+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:15.399+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3587815082' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:15.775+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:15.775+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4003573135' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:14.549475+0000 mgr.a (mgr.5974105) 7944 : audit [DBG] from='client.6071154 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:15.013591+0000 mon.c (mon.2) 7513 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:15.013839+0000 mon.c (mon.2) 7514 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:15.026173+0000 mon.a (mon.0) 234573 : audit [DBG] from='client.? 10.233.92.240:0/149834596' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:15.300206+0000 mon.a (mon.0) 234574 : audit [DBG] from='client.? 10.233.92.240:0/631649415' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:04:15.402104+0000 mon.a (mon.0) 234575 : audit [DBG] from='client.? 10.233.92.240:0/3587815082' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:15.453674+0000 mgr.a (mgr.5974105) 7945 : cluster [DBG] pgmap v3658: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 2.0 MiB/s wr, 95 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:15.779738+0000 mon.a (mon.0) 234576 : audit [DBG] from='client.? 10.233.92.240:0/4003573135' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:16.647+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:16.647+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3399164259' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:17.023+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:17.023+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2099618504' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:17.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:17.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:17.423+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:17.423+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/263605177' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:16.159068+0000 mgr.a (mgr.5974105) 7946 : audit [DBG] from='client.6071184 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:16.652934+0000 mon.a (mon.0) 234577 : audit [DBG] from='client.? 10.233.92.240:0/3399164259' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:17.027759+0000 mon.a (mon.0) 234578 : audit [DBG] from='client.? 10.233.92.240:0/2099618504' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:17.101344+0000 mon.a (mon.0) 234579 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:17.101641+0000 mon.a (mon.0) 234580 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:18.227+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:18.435+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:18.435+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/401118230' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:17.427849+0000 mon.a (mon.0) 234581 : audit [DBG] from='client.? 10.233.92.240:0/263605177' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:17.454420+0000 mgr.a (mgr.5974105) 7947 : cluster [DBG] pgmap v3659: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 2.9 MiB/s wr, 137 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:18.438845+0000 mon.a (mon.0) 234582 : audit [DBG] from='client.? 10.233.92.240:0/401118230' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:18.831+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:18.831+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2310528944' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:19.207+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:19.207+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2952203844' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:20.115+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:20.115+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/357836279' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:17.879041+0000 mgr.a (mgr.5974105) 7948 : audit [DBG] from='client.5852795 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:18.833698+0000 mon.a (mon.0) 234583 : audit [DBG] from='client.? 10.233.92.240:0/2310528944' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:19.209803+0000 mon.a (mon.0) 234584 : audit [DBG] from='client.? 10.233.92.240:0/2952203844' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:19.462251+0000 mgr.a (mgr.5974105) 7949 : cluster [DBG] pgmap v3660: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.6 MiB/s wr, 84 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:04:20.539+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:20.539+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2730117700' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:20.915+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:20.919+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1323783304' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:19.604545+0000 mgr.a (mgr.5974105) 7950 : audit [DBG] from='client.6071229 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:20.121097+0000 mon.a (mon.0) 234585 : audit [DBG] from='client.? 10.233.92.240:0/357836279' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:20.541818+0000 mon.a (mon.0) 234586 : audit [DBG] from='client.? 10.233.92.240:0/2730117700' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:20.921633+0000 mon.a (mon.0) 234587 : audit [DBG] from='client.? 10.233.92.240:0/1323783304' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:21.911+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:21.911+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1238931040' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:22.275+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:22.275+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2691373056' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:21.329149+0000 mgr.a (mgr.5974105) 7951 : audit [DBG] from='client.6071253 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:21.469625+0000 mgr.a (mgr.5974105) 7952 : cluster [DBG] pgmap v3661: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.6 MiB/s wr, 84 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:21.913579+0000 mon.a (mon.0) 234588 : audit [DBG] from='client.? 10.233.92.240:0/1238931040' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:22.281041+0000 mon.a (mon.0) 234589 : audit [DBG] from='client.? 10.233.92.240:0/2691373056' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:22.703+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:22.703+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2576985823' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:23.435+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:23.551+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_write.cc:1668] [default] New memtable created with log file: #16935. Immutable memtables: 0.
debug 2022-03-13T01:04:23.551+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.555638) [db_impl/db_impl_compaction_flush.cc:2198] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
debug 2022-03-13T01:04:23.551+0000 7fb3af481700 4 rocksdb: [flush_job.cc:321] [default] [JOB 2403] Flushing memtable with next log file: 16935
debug 2022-03-13T01:04:23.551+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133463555679, "job": 2403, "event": "flush_started", "num_memtables": 1, "num_entries": 971, "num_deletes": 252, "total_data_size": 3252092, "memory_usage": 3269944, "flush_reason": "Manual Compaction"}
debug 2022-03-13T01:04:23.551+0000 7fb3af481700 4 rocksdb: [flush_job.cc:350] [default] [JOB 2403] Level-0 flush table #16936: started
debug 2022-03-13T01:04:23.587+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133463591537, "cf_name": "default", "job": 2403, "event": "table_file_creation", "file_number": 16936, "file_size": 3042603, "table_properties": {"data_size": 3035507, "index_size": 5028, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 1221, "raw_key_size": 9939, "raw_average_key_size": 21, "raw_value_size": 3025530, "raw_average_value_size": 6577, "num_data_blocks": 148, "num_entries": 460, "num_deletions": 252, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1647133402, "oldest_key_time": 1647133402, "file_creation_time": 1647133463}}
debug 2022-03-13T01:04:23.587+0000 7fb3af481700 4 rocksdb: [flush_job.cc:401] [default] [JOB 2403] Level-0 flush table #16936: 3042603 bytes OK
debug 2022-03-13T01:04:23.599+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:23.599+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1517896161' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:23.631+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.612082) [memtable_list.cc:447] [default] Level-0 commit table #16936 started
debug 2022-03-13T01:04:23.631+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.634695) [memtable_list.cc:503] [default] Level-0 commit table #16936: memtable #1 done
debug 2022-03-13T01:04:23.631+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.634722) EVENT_LOG_v1 {"time_micros": 1647133463634714, "job": 2403, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
debug 2022-03-13T01:04:23.631+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.634849) [db_impl/db_impl_compaction_flush.cc:205] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
debug 2022-03-13T01:04:23.631+0000 7fb3af481700 4 rocksdb: [db_impl/db_impl_files.cc:353] [JOB 2403] Try to delete WAL files size 3247311, prev total WAL file size 3247636, number of live WAL files 2.
debug 2022-03-13T01:04:23.631+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:23.631+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:23.636290) [db_impl/db_impl_compaction_flush.cc:2516] [default] Manual compaction from level-0 to level-6 from 'mgrstat .. 'mgrstat; will stop at (end)
debug 2022-03-13T01:04:23.631+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1676] [default] [JOB 2404] Compacting 1@0 + 1@6 files to L6, score -1.00
debug 2022-03-13T01:04:23.631+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1680] [default] Compaction start summary: Base version 2403 Base level 0, inputs: [16936(2971KB)], [16934(56MB)]
debug 2022-03-13T01:04:23.631+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133463636313, "job": 2404, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [16936], "files_L6": [16934], "score": -1, "input_data_size": 62279747}
debug 2022-03-13T01:04:23.967+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:23.967+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1415030358' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:24.159+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1349] [default] [JOB 2404] Generated table #16937: 4247 keys, 61473589 bytes
debug 2022-03-13T01:04:24.163+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133464165302, "cf_name": "default", "job": 2404, "event": "table_file_creation", "file_number": 16937, "file_size": 61473589, "table_properties": {"data_size": 61368303, "index_size": 93749, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 10693, "raw_key_size": 86236, "raw_average_key_size": 20, "raw_value_size": 61242147, "raw_average_value_size": 14420, "num_data_blocks": 2755, "num_entries": 4247, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1646580773, "oldest_key_time": 0, "file_creation_time": 1647133463}}
debug 2022-03-13T01:04:24.175+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1415] [default] [JOB 2404] Compacted 1@0 + 1@6 files to L6 => 61473589 bytes
debug 2022-03-13T01:04:24.211+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:24.215997) [compaction/compaction_job.cc:760] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 117.7 rd, 116.2 wr, level 6, files in(1, 1) out(1) MB in(2.9, 56.5) out(58.6), read-write-amplify(40.7) write-amplify(20.2) OK, records in: 4740, records dropped: 493 output_compression: NoCompression
debug 2022-03-13T01:04:24.211+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:24.216021) EVENT_LOG_v1 {"time_micros": 1647133464216012, "job": 2404, "event": "compaction_finished", "compaction_time_micros": 529113, "compaction_time_cpu_micros": 90168, "output_level": 6, "num_output_files": 1, "total_output_size": 61473589, "num_input_records": 4740, "num_output_records": 4247, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
debug 2022-03-13T01:04:24.211+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133464216778, "job": 2404, "event": "table_file_deletion", "file_number": 16936}
debug 2022-03-13T01:04:24.223+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133464227930, "job": 2404, "event": "table_file_deletion", "file_number": 16934}
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1437] [default] Manual compaction waiting for all other scheduled background compactions to finish
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.223+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:24.335+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:24.335+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3419411929' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:22.706152+0000 mon.a (mon.0) 234590 : audit [DBG] from='client.? 10.233.92.240:0/2576985823' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:23.130094+0000 mgr.a (mgr.5974105) 7953 : audit [DBG] from='client.6071277 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:23.470436+0000 mgr.a (mgr.5974105) 7954 : cluster [DBG] pgmap v3662: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 2.5 MiB/s wr, 134 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:23.604989+0000 mon.a (mon.0) 234591 : audit [DBG] from='client.? 10.233.92.240:0/1517896161' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:25.207+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:25.207+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1947253504' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:23.972481+0000 mon.a (mon.0) 234592 : audit [DBG] from='client.? 10.233.92.240:0/1415030358' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:24.339552+0000 mon.a (mon.0) 234593 : audit [DBG] from='client.? 10.233.92.240:0/3419411929' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:24.761798+0000 mgr.a (mgr.5974105) 7955 : audit [DBG] from='client.6071301 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:24.991979+0000 mon.c (mon.2) 7515 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:24.992117+0000 mon.c (mon.2) 7516 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:25.162302+0000 mon.b (mon.1) 1157 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:25.162549+0000 mon.b (mon.1) 1158 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:25.209752+0000 mon.a (mon.0) 234594 : audit [DBG] from='client.? 10.233.92.240:0/1947253504' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:25.475207+0000 mgr.a (mgr.5974105) 7956 : cluster [DBG] pgmap v3663: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.8 MiB/s wr, 91 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:25.576171+0000 mon.c (mon.2) 7517 : audit [DBG] from='client.? 10.233.92.240:0/738619272' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:25.951+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:25.951+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2674136045' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:26.823+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:26.823+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2922354880' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:25.956591+0000 mon.a (mon.0) 234595 : audit [DBG] from='client.? 10.233.92.240:0/2674136045' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:26.364873+0000 mgr.a (mgr.5974105) 7957 : audit [DBG] from='client.6071322 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:26.828551+0000 mon.a (mon.0) 234596 : audit [DBG] from='client.? 10.233.92.240:0/2922354880' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:27.091+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:27.095+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:27.219+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:27.219+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3515393907' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:27.635+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:27.635+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2938672737' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:27.096942+0000 mon.a (mon.0) 234597 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:27.097760+0000 mon.a (mon.0) 234598 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:27.223753+0000 mon.a (mon.0) 234599 : audit [DBG] from='client.? 10.233.92.240:0/3515393907' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:27.476045+0000 mgr.a (mgr.5974105) 7958 : cluster [DBG] pgmap v3664: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 2.9 MiB/s wr, 143 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:27.638013+0000 mon.a (mon.0) 234600 : audit [DBG] from='client.? 10.233.92.240:0/2938672737' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:28.459+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:28.511+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:28.511+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/699760582' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:28.895+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:28.899+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1239297477' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:29.303+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:29.303+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/257990004' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:28.044096+0000 mgr.a (mgr.5974105) 7959 : audit [DBG] from='client.6071346 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:28.515586+0000 mon.a (mon.0) 234601 : audit [DBG] from='client.? 10.233.92.240:0/699760582' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:28.901765+0000 mon.a (mon.0) 234602 : audit [DBG] from='client.? 10.233.92.240:0/1239297477' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:29.306439+0000 mon.a (mon.0) 234603 : audit [DBG] from='client.? 10.233.92.240:0/257990004' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:29.481443+0000 mgr.a (mgr.5974105) 7960 : cluster [DBG] pgmap v3665: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.0 MiB/s wr, 101 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:30.160056+0000 mon.c (mon.2) 7518 : audit [DBG] from='client.? 10.233.92.240:0/63137417' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:30.575+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:30.575+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2704689243' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:30.983+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:30.983+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2767679945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:29.698035+0000 mgr.a (mgr.5974105) 7961 : audit [DBG] from='client.6071370 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:30.580201+0000 mon.a (mon.0) 234604 : audit [DBG] from='client.? 10.233.92.240:0/2704689243' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:30.987469+0000 mon.a (mon.0) 234605 : audit [DBG] from='client.? 10.233.92.240:0/2767679945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:31.875+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:31.875+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1251605085' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:32.247+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:32.247+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/265538716' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:32.615+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:32.615+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2114708264' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:31.394160+0000 mgr.a (mgr.5974105) 7962 : audit [DBG] from='client.6071388 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:31.482333+0000 mgr.a (mgr.5974105) 7963 : cluster [DBG] pgmap v3666: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.0 MiB/s wr, 101 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:31.878452+0000 mon.a (mon.0) 234606 : audit [DBG] from='client.? 10.233.92.240:0/1251605085' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:32.251051+0000 mon.a (mon.0) 234607 : audit [DBG] from='client.? 10.233.92.240:0/265538716' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:32.619619+0000 mon.a (mon.0) 234608 : audit [DBG] from='client.? 10.233.92.240:0/2114708264' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:33.487+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:33.507+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:33.507+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1678390555' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:33.907+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:33.907+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2576877591' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:33.024846+0000 mgr.a (mgr.5974105) 7964 : audit [DBG] from='client.5852876 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:33.485586+0000 mgr.a (mgr.5974105) 7965 : cluster [DBG] pgmap v3667: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 3.3 MiB/s wr, 124 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:33.512323+0000 mon.a (mon.0) 234609 : audit [DBG] from='client.? 10.233.92.240:0/1678390555' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:34.323+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:34.323+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2016311036' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:33.911972+0000 mon.a (mon.0) 234610 : audit [DBG] from='client.? 10.233.92.240:0/2576877591' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:34.327655+0000 mon.a (mon.0) 234611 : audit [DBG] from='client.? 10.233.92.240:0/2016311036' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:34.330850+0000 mon.b (mon.1) 1159 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:34.331087+0000 mon.b (mon.1) 1160 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:35.255+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:35.255+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3642296326' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:35.635+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:35.635+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2580576345' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:36.031+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:36.031+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4226622037' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:34.737257+0000 mgr.a (mgr.5974105) 7966 : audit [DBG] from='client.6071436 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:35.035228+0000 mon.c (mon.2) 7519 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:35.035391+0000 mon.c (mon.2) 7520 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:35.258059+0000 mon.a (mon.0) 234612 : audit [DBG] from='client.? 10.233.92.240:0/3642296326' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:35.487273+0000 mgr.a (mgr.5974105) 7967 : cluster [DBG] pgmap v3668: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 962 KiB/s rd, 2.4 MiB/s wr, 74 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:35.491351+0000 mon.b (mon.1) 1161 : audit [DBG] from='client.? 10.233.92.240:0/2420427530' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
audit 2022-03-13T01:04:35.641105+0000 mon.a (mon.0) 234613 : audit [DBG] from='client.? 10.233.92.240:0/2580576345' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:36.037057+0000 mon.a (mon.0) 234614 : audit [DBG] from='client.? 10.233.92.240:0/4226622037' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:36.903+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:36.903+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4221734137' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:37.119+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:37.119+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:37.291+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:37.291+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3986482566' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:36.437043+0000 mgr.a (mgr.5974105) 7968 : audit [DBG] from='client.6071466 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:36.906266+0000 mon.a (mon.0) 234615 : audit [DBG] from='client.? 10.233.92.240:0/4221734137' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:37.124632+0000 mon.a (mon.0) 234616 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:37.124842+0000 mon.a (mon.0) 234617 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:37.294148+0000 mon.a (mon.0) 234618 : audit [DBG] from='client.? 10.233.92.240:0/3986482566' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:37.751+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:37.751+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/74973158' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:38.507+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:38.675+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:38.675+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/107258405' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:37.492023+0000 mgr.a (mgr.5974105) 7969 : cluster [DBG] pgmap v3669: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 3.9 MiB/s wr, 121 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:37.754413+0000 mon.a (mon.0) 234619 : audit [DBG] from='client.? 10.233.92.240:0/74973158' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:39.055+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:39.055+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4140619296' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:39.471+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:39.471+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1463077106' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:38.195422+0000 mgr.a (mgr.5974105) 7970 : audit [DBG] from='client.6071490 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:38.680111+0000 mon.a (mon.0) 234620 : audit [DBG] from='client.? 10.233.92.240:0/107258405' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:39.058918+0000 mon.a (mon.0) 234621 : audit [DBG] from='client.? 10.233.92.240:0/4140619296' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:39.475359+0000 mon.a (mon.0) 234622 : audit [DBG] from='client.? 10.233.92.240:0/1463077106' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:39.497974+0000 mgr.a (mgr.5974105) 7971 : cluster [DBG] pgmap v3670: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 471 KiB/s rd, 2.9 MiB/s wr, 69 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:04:40.307+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:40.311+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/959478824' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:40.707+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:40.707+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1845045052' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:41.071+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:41.071+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1417029248' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:39.863946+0000 mgr.a (mgr.5974105) 7972 : audit [DBG] from='client.6071514 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:40.313410+0000 mon.a (mon.0) 234623 : audit [DBG] from='client.? 10.233.92.240:0/959478824' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:40.709719+0000 mon.a (mon.0) 234624 : audit [DBG] from='client.? 10.233.92.240:0/1845045052' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:41.947+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:41.947+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2472123463' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:42.335+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:42.335+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/357738520' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:41.076110+0000 mon.a (mon.0) 234625 : audit [DBG] from='client.? 10.233.92.240:0/1417029248' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:41.489886+0000 mgr.a (mgr.5974105) 7973 : audit [DBG] from='client.6071538 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:41.501830+0000 mgr.a (mgr.5974105) 7974 : cluster [DBG] pgmap v3671: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 471 KiB/s rd, 2.9 MiB/s wr, 69 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:41.949963+0000 mon.a (mon.0) 234626 : audit [DBG] from='client.? 10.233.92.240:0/2472123463' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:42.711+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:42.711+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3071658359' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:43.507+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:43.547+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:43.547+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3879162025' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:42.339842+0000 mon.a (mon.0) 234627 : audit [DBG] from='client.? 10.233.92.240:0/357738520' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:42.714708+0000 mon.a (mon.0) 234628 : audit [DBG] from='client.? 10.233.92.240:0/3071658359' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:43.935+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:43.939+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3652223924' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:44.323+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:44.323+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1764222399' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:43.095318+0000 mgr.a (mgr.5974105) 7975 : audit [DBG] from='client.5852933 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:43.504777+0000 mgr.a (mgr.5974105) 7976 : cluster [DBG] pgmap v3672: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.0 MiB/s rd, 3.9 MiB/s wr, 110 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:43.553191+0000 mon.a (mon.0) 234629 : audit [DBG] from='client.? 10.233.92.240:0/3879162025' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:43.941430+0000 mon.a (mon.0) 234630 : audit [DBG] from='client.? 10.233.92.240:0/3652223924' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:44.326854+0000 mon.a (mon.0) 234631 : audit [DBG] from='client.? 10.233.92.240:0/1764222399' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:44.432444+0000 mon.b (mon.1) 1162 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:44.432683+0000 mon.b (mon.1) 1163 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:45.179+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:45.179+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/597223792' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:45.555+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:45.555+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3501430355' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:44.727894+0000 mgr.a (mgr.5974105) 7977 : audit [DBG] from='client.6071583 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:44.983272+0000 mon.c (mon.2) 7521 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:44.983510+0000 mon.c (mon.2) 7522 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:45.184125+0000 mon.a (mon.0) 234632 : audit [DBG] from='client.? 10.233.92.240:0/597223792' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:45.506051+0000 mgr.a (mgr.5974105) 7978 : cluster [DBG] pgmap v3673: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 926 KiB/s rd, 2.6 MiB/s wr, 87 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:45.558653+0000 mon.a (mon.0) 234633 : audit [DBG] from='client.? 10.233.92.240:0/3501430355' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:45.931+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:45.931+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3829496371' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:45.987+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:04:45.987+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3543277622' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:04:46.767+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:46.767+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3446525891' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:45.936298+0000 mon.a (mon.0) 234634 : audit [DBG] from='client.? 10.233.92.240:0/3829496371' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:45.990457+0000 mon.a (mon.0) 234635 : audit [DBG] from='client.? 10.233.92.240:0/3543277622' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:04:46.307034+0000 mgr.a (mgr.5974105) 7979 : audit [DBG] from='client.6071613 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:46.772908+0000 mon.a (mon.0) 234636 : audit [DBG] from='client.? 10.233.92.240:0/3446525891' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:47.131+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:47.131+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:47.263+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:47.263+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2911545363' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:47.723+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:47.723+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1801823550' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:47.951+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd dump", "format": "json"} v 0) v1
debug 2022-03-13T01:04:47.951+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3100942673' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch
audit 2022-03-13T01:04:47.136186+0000 mon.a (mon.0) 234637 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:47.136339+0000 mon.a (mon.0) 234638 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:47.268666+0000 mon.a (mon.0) 234639 : audit [DBG] from='client.? 10.233.92.240:0/2911545363' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:47.532395+0000 mgr.a (mgr.5974105) 7980 : cluster [DBG] pgmap v3674: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 3.4 MiB/s wr, 125 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:47.727472+0000 mon.a (mon.0) 234640 : audit [DBG] from='client.? 10.233.92.240:0/1801823550' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:47.955561+0000 mon.a (mon.0) 234641 : audit [DBG] from='client.? 10.233.92.240:0/3100942673' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch
debug 2022-03-13T01:04:48.351+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd crush class ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:48.351+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/931721306' entity='client.admin' cmd=[{"prefix": "osd crush class ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:48.507+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:04:48.651+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:48.651+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/505054536' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:48.743+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_write.cc:1668] [default] New memtable created with log file: #16938. Immutable memtables: 0.
debug 2022-03-13T01:04:48.747+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.749292) [db_impl/db_impl_compaction_flush.cc:2198] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
debug 2022-03-13T01:04:48.747+0000 7fb3af481700 4 rocksdb: [flush_job.cc:321] [default] [JOB 2405] Flushing memtable with next log file: 16938
debug 2022-03-13T01:04:48.747+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133488749368, "job": 2405, "event": "flush_started", "num_memtables": 1, "num_entries": 824, "num_deletes": 514, "total_data_size": 1412545, "memory_usage": 1429224, "flush_reason": "Manual Compaction"}
debug 2022-03-13T01:04:48.747+0000 7fb3af481700 4 rocksdb: [flush_job.cc:350] [default] [JOB 2405] Level-0 flush table #16939: started
debug 2022-03-13T01:04:48.783+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133488786984, "cf_name": "default", "job": 2405, "event": "table_file_creation", "file_number": 16939, "file_size": 1361193, "table_properties": {"data_size": 1356360, "index_size": 2386, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 1605, "raw_key_size": 13353, "raw_average_key_size": 21, "raw_value_size": 1346255, "raw_average_value_size": 2167, "num_data_blocks": 71, "num_entries": 621, "num_deletions": 514, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1647133463, "oldest_key_time": 1647133463, "file_creation_time": 1647133488}}
debug 2022-03-13T01:04:48.783+0000 7fb3af481700 4 rocksdb: [flush_job.cc:401] [default] [JOB 2405] Level-0 flush table #16939: 1361193 bytes OK
debug 2022-03-13T01:04:48.831+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.801375) [memtable_list.cc:447] [default] Level-0 commit table #16939 started
debug 2022-03-13T01:04:48.831+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.834285) [memtable_list.cc:503] [default] Level-0 commit table #16939: memtable #1 done
debug 2022-03-13T01:04:48.831+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.834310) EVENT_LOG_v1 {"time_micros": 1647133488834304, "job": 2405, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
debug 2022-03-13T01:04:48.831+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.834330) [db_impl/db_impl_compaction_flush.cc:205] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
debug 2022-03-13T01:04:48.831+0000 7fb3af481700 4 rocksdb: [db_impl/db_impl_files.cc:353] [JOB 2405] Try to delete WAL files size 1407223, prev total WAL file size 1407223, number of live WAL files 2.
debug 2022-03-13T01:04:48.831+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:48.831+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:48.835766) [db_impl/db_impl_compaction_flush.cc:2516] [default] Manual compaction from level-0 to level-6 from 'logm .. 'logm; will stop at (end)
debug 2022-03-13T01:04:48.831+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1676] [default] [JOB 2406] Compacting 1@0 + 1@6 files to L6, score -1.00
debug 2022-03-13T01:04:48.831+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1680] [default] Compaction start summary: Base version 2405 Base level 0, inputs: [16939(1329KB)], [16937(58MB)]
debug 2022-03-13T01:04:48.831+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133488835793, "job": 2406, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [16939], "files_L6": [16937], "score": -1, "input_data_size": 62834782}
debug 2022-03-13T01:04:49.099+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:49.099+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3157076289' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:49.391+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1349] [default] [JOB 2406] Generated table #16940: 3826 keys, 55676437 bytes
debug 2022-03-13T01:04:49.391+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133489394400, "cf_name": "default", "job": 2406, "event": "table_file_creation", "file_number": 16940, "file_size": 55676437, "table_properties": {"data_size": 55581745, "index_size": 84179, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 9669, "raw_key_size": 77134, "raw_average_key_size": 20, "raw_value_size": 55468367, "raw_average_value_size": 14497, "num_data_blocks": 2496, "num_entries": 3826, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1646580773, "oldest_key_time": 0, "file_creation_time": 1647133488}}
debug 2022-03-13T01:04:49.403+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1415] [default] [JOB 2406] Compacted 1@0 + 1@6 files to L6 => 55676437 bytes
debug 2022-03-13T01:04:49.503+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:49.506917) [compaction/compaction_job.cc:760] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 112.5 rd, 99.7 wr, level 6, files in(1, 1) out(1) MB in(1.3, 58.6) out(53.1), read-write-amplify(87.1) write-amplify(40.9) OK, records in: 4868, records dropped: 1042 output_compression: NoCompression
debug 2022-03-13T01:04:49.503+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:04:49.506943) EVENT_LOG_v1 {"time_micros": 1647133489506934, "job": 2406, "event": "compaction_finished", "compaction_time_micros": 558700, "compaction_time_cpu_micros": 80301, "output_level": 6, "num_output_files": 1, "total_output_size": 55676437, "num_input_records": 4868, "num_output_records": 3826, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
debug 2022-03-13T01:04:49.503+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133489507420, "job": 2406, "event": "table_file_deletion", "file_number": 16939}
debug 2022-03-13T01:04:49.511+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133489516131, "job": 2406, "event": "table_file_deletion", "file_number": 16937}
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.511+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:04:49.643+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:49.643+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4045267426' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:48.178368+0000 mgr.a (mgr.5974105) 7981 : audit [DBG] from='client.6071643 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:48.355789+0000 mon.a (mon.0) 234642 : audit [DBG] from='client.? 10.233.92.240:0/931721306' entity='client.admin' cmd=[{"prefix": "osd crush class ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:48.654349+0000 mon.a (mon.0) 234643 : audit [DBG] from='client.? 10.233.92.240:0/505054536' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:49.103667+0000 mon.a (mon.0) 234644 : audit [DBG] from='client.? 10.233.92.240:0/3157076289' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:50.615+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:50.615+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1036672117' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:49.535212+0000 mgr.a (mgr.5974105) 7982 : cluster [DBG] pgmap v3675: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.9 MiB/s wr, 78 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:49.647661+0000 mon.a (mon.0) 234645 : audit [DBG] from='client.? 10.233.92.240:0/4045267426' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:50.137984+0000 mgr.a (mgr.5974105) 7983 : audit [DBG] from='client.6071673 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:50.618166+0000 mon.a (mon.0) 234646 : audit [DBG] from='client.? 10.233.92.240:0/1036672117' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:51.031+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:51.031+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1362351783' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:51.415+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:51.415+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/339341830' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:52.299+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:52.299+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/731871248' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:52.679+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:52.679+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4015930727' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:51.036691+0000 mon.a (mon.0) 234647 : audit [DBG] from='client.? 10.233.92.240:0/1362351783' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:51.418021+0000 mon.a (mon.0) 234648 : audit [DBG] from='client.? 10.233.92.240:0/339341830' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:51.535802+0000 mgr.a (mgr.5974105) 7984 : cluster [DBG] pgmap v3676: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.9 MiB/s wr, 78 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:04:53.091+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:53.091+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1518334448' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:53.515+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:04:51.845278+0000 mgr.a (mgr.5974105) 7985 : audit [DBG] from='client.5852990 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:52.304684+0000 mon.a (mon.0) 234649 : audit [DBG] from='client.? 10.233.92.240:0/731871248' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:52.682910+0000 mon.a (mon.0) 234650 : audit [DBG] from='client.? 10.233.92.240:0/4015930727' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:53.096723+0000 mon.a (mon.0) 234651 : audit [DBG] from='client.? 10.233.92.240:0/1518334448' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:53.532323+0000 mgr.a (mgr.5974105) 7986 : audit [DBG] from='client.6071715 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:53.547002+0000 mgr.a (mgr.5974105) 7987 : cluster [DBG] pgmap v3677: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 3.6 MiB/s wr, 128 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:04:54.007+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:54.007+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3609277565' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:54.367+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:54.367+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1965385618' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:54.739+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:54.739+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1057501706' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:54.009572+0000 mon.a (mon.0) 234652 : audit [DBG] from='client.? 10.233.92.240:0/3609277565' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:54.213132+0000 mon.b (mon.1) 1164 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:54.213443+0000 mon.b (mon.1) 1165 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:54.370503+0000 mon.a (mon.0) 234653 : audit [DBG] from='client.? 10.233.92.240:0/1965385618' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:54.744298+0000 mon.a (mon.0) 234654 : audit [DBG] from='client.? 10.233.92.240:0/1057501706' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:55.563+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:55.563+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1413654048' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:55.947+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:55.947+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1216545317' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:56.339+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:56.339+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2544561585' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:55.000718+0000 mon.c (mon.2) 7523 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:55.000895+0000 mon.c (mon.2) 7524 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:55.115194+0000 mgr.a (mgr.5974105) 7988 : audit [DBG] from='client.6071739 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:55.553925+0000 mgr.a (mgr.5974105) 7989 : cluster [DBG] pgmap v3678: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.5 MiB/s wr, 87 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:55.568976+0000 mon.a (mon.0) 234655 : audit [DBG] from='client.? 10.233.92.240:0/1413654048' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:55.951667+0000 mon.a (mon.0) 234656 : audit [DBG] from='client.? 10.233.92.240:0/1216545317' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:57.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:04:57.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:04:57.223+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:57.223+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/122064147' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:56.343917+0000 mon.a (mon.0) 234657 : audit [DBG] from='client.? 10.233.92.240:0/2544561585' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:57.104912+0000 mon.a (mon.0) 234658 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:04:57.105150+0000 mon.a (mon.0) 234659 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:04:57.225918+0000 mon.a (mon.0) 234660 : audit [DBG] from='client.? 10.233.92.240:0/122064147' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:57.667+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:57.667+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1071957279' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:58.055+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:58.055+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/352226312' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:58.547+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:04:56.747443+0000 mgr.a (mgr.5974105) 7990 : audit [DBG] from='client.6071763 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:04:57.556398+0000 mgr.a (mgr.5974105) 7991 : cluster [DBG] pgmap v3679: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 4.1 MiB/s wr, 129 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:57.670999+0000 mon.a (mon.0) 234661 : audit [DBG] from='client.? 10.233.92.240:0/1071957279' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:58.057714+0000 mon.a (mon.0) 234662 : audit [DBG] from='client.? 10.233.92.240:0/352226312' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:58.915+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:58.915+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3271863834' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:59.279+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:04:59.279+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1964105501' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:04:59.367+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:04:59.367+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/693394672' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:04:59.679+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:04:59.679+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/789651441' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:58.450093+0000 mgr.a (mgr.5974105) 7992 : audit [DBG] from='client.6071787 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:04:58.918197+0000 mon.a (mon.0) 234663 : audit [DBG] from='client.? 10.233.92.240:0/3271863834' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:04:59.285061+0000 mon.a (mon.0) 234664 : audit [DBG] from='client.? 10.233.92.240:0/1964105501' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:59.373179+0000 mon.a (mon.0) 234665 : audit [DBG] from='client.? 10.233.92.240:0/693394672' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
cluster 2022-03-13T01:04:59.557221+0000 mgr.a (mgr.5974105) 7993 : cluster [DBG] pgmap v3680: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 3.3 MiB/s wr, 91 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:04:59.684511+0000 mon.a (mon.0) 234666 : audit [DBG] from='client.? 10.233.92.240:0/789651441' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:04:59.875+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "versions", "format": "json"} v 0) v1
debug 2022-03-13T01:04:59.879+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1560205642' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
debug 2022-03-13T01:05:00.515+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:00.515+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2557943288' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:00.899+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:00.899+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1283804287' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:04:59.881562+0000 mon.a (mon.0) 234667 : audit [DBG] from='client.? 10.233.92.240:0/1560205642' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
audit 2022-03-13T01:05:00.053344+0000 mgr.a (mgr.5974105) 7994 : audit [DBG] from='client.6071823 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:00.520893+0000 mon.a (mon.0) 234668 : audit [DBG] from='client.? 10.233.92.240:0/2557943288' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:01.263+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:01.263+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4191457000' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:02.123+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:02.123+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2324299197' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:00.901995+0000 mon.a (mon.0) 234669 : audit [DBG] from='client.? 10.233.92.240:0/1283804287' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:01.265678+0000 mon.a (mon.0) 234670 : audit [DBG] from='client.? 10.233.92.240:0/4191457000' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:01.558348+0000 mgr.a (mgr.5974105) 7995 : cluster [DBG] pgmap v3681: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 3.3 MiB/s wr, 91 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:05:02.535+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:02.535+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/327160064' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:02.915+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:02.915+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/441911643' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:01.667173+0000 mgr.a (mgr.5974105) 7996 : audit [DBG] from='client.6071847 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:02.127908+0000 mon.a (mon.0) 234671 : audit [DBG] from='client.? 10.233.92.240:0/2324299197' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:02.540207+0000 mon.a (mon.0) 234672 : audit [DBG] from='client.? 10.233.92.240:0/327160064' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:02.920868+0000 mon.a (mon.0) 234673 : audit [DBG] from='client.? 10.233.92.240:0/441911643' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:03.547+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:03.795+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:03.795+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4133723117' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:04.151+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:04.151+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2515492382' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:04.539+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:04.539+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1465483388' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:03.316389+0000 mgr.a (mgr.5974105) 7997 : audit [DBG] from='client.6071871 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:03.559460+0000 mgr.a (mgr.5974105) 7998 : cluster [DBG] pgmap v3682: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 4.5 MiB/s wr, 135 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:03.800104+0000 mon.a (mon.0) 234674 : audit [DBG] from='client.? 10.233.92.240:0/4133723117' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:04.155389+0000 mon.a (mon.0) 234675 : audit [DBG] from='client.? 10.233.92.240:0/2515492382' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:04.543461+0000 mon.a (mon.0) 234676 : audit [DBG] from='client.? 10.233.92.240:0/1465483388' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:05.387+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:05.387+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/377830233' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:05.763+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:05.763+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3820858290' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:06.143+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:06.143+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2025038649' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:04.922918+0000 mgr.a (mgr.5974105) 7999 : audit [DBG] from='client.6071895 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:04.953886+0000 mon.b (mon.1) 1166 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:04.954284+0000 mon.b (mon.1) 1167 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:05.125571+0000 mon.c (mon.2) 7525 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:05.125710+0000 mon.c (mon.2) 7526 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:05.390979+0000 mon.a (mon.0) 234677 : audit [DBG] from='client.? 10.233.92.240:0/377830233' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:05.567806+0000 mgr.a (mgr.5974105) 8000 : cluster [DBG] pgmap v3683: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 2.8 MiB/s wr, 85 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:05.769295+0000 mon.a (mon.0) 234678 : audit [DBG] from='client.? 10.233.92.240:0/3820858290' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:06.995+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:06.995+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3391092302' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:07.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:07.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:06.148896+0000 mon.a (mon.0) 234679 : audit [DBG] from='client.? 10.233.92.240:0/2025038649' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:06.531644+0000 mgr.a (mgr.5974105) 8001 : audit [DBG] from='client.6071919 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:06.999799+0000 mon.a (mon.0) 234680 : audit [DBG] from='client.? 10.233.92.240:0/3391092302' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:07.102279+0000 mon.a (mon.0) 234681 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:07.102525+0000 mon.a (mon.0) 234682 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:07.387+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:07.387+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2690309490' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:07.831+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:07.831+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3934271593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:07.389924+0000 mon.a (mon.0) 234683 : audit [DBG] from='client.? 10.233.92.240:0/2690309490' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:07.568517+0000 mgr.a (mgr.5974105) 8002 : cluster [DBG] pgmap v3684: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 4.0 MiB/s wr, 135 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:07.834598+0000 mon.a (mon.0) 234684 : audit [DBG] from='client.? 10.233.92.240:0/3934271593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:08.575+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:08.731+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:08.731+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1638055316' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:08.943+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"} v 0) v1
debug 2022-03-13T01:05:08.943+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
debug 2022-03-13T01:05:09.091+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:09.091+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/751268482' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:09.459+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:09.459+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/324038860' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:08.257731+0000 mgr.a (mgr.5974105) 8003 : audit [DBG] from='client.6071943 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:08.733867+0000 mon.a (mon.0) 234685 : audit [DBG] from='client.? 10.233.92.240:0/1638055316' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:08.949244+0000 mon.a (mon.0) 234686 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
audit 2022-03-13T01:05:09.097358+0000 mon.a (mon.0) 234687 : audit [DBG] from='client.? 10.233.92.240:0/751268482' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:09.464483+0000 mon.a (mon.0) 234688 : audit [DBG] from='client.? 10.233.92.240:0/324038860' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:10.291+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:10.291+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2702082781' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:10.695+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:10.695+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/162045746' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:09.569267+0000 mgr.a (mgr.5974105) 8004 : cluster [DBG] pgmap v3685: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.4 MiB/s wr, 93 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:10.296645+0000 mon.a (mon.0) 234689 : audit [DBG] from='client.? 10.233.92.240:0/2702082781' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:11.091+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:11.091+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2565628137' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:11.943+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:11.943+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1020898306' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:09.832109+0000 mgr.a (mgr.5974105) 8005 : audit [DBG] from='client.6071967 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:10.698081+0000 mon.a (mon.0) 234690 : audit [DBG] from='client.? 10.233.92.240:0/162045746' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:11.096610+0000 mon.a (mon.0) 234691 : audit [DBG] from='client.? 10.233.92.240:0/2565628137' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:11.493917+0000 mgr.a (mgr.5974105) 8006 : audit [DBG] from='client.6071991 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:11.571339+0000 mgr.a (mgr.5974105) 8007 : cluster [DBG] pgmap v3686: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.4 MiB/s wr, 93 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:05:12.147+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"} v 0) v1
debug 2022-03-13T01:05:12.147+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
debug 2022-03-13T01:05:12.307+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:12.307+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2970019023' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:12.671+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:12.671+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3545734745' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:11.949303+0000 mon.a (mon.0) 234692 : audit [DBG] from='client.? 10.233.92.240:0/1020898306' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:12.151918+0000 mon.a (mon.0) 234693 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
audit 2022-03-13T01:05:12.312112+0000 mon.a (mon.0) 234694 : audit [DBG] from='client.? 10.233.92.240:0/2970019023' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:12.675640+0000 mon.a (mon.0) 234695 : audit [DBG] from='client.? 10.233.92.240:0/3545734745' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:13.543+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:13.543+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4190640790' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:13.599+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:13.943+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:13.943+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3245297808' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:14.375+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:14.375+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3900367001' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:13.062634+0000 mgr.a (mgr.5974105) 8008 : audit [DBG] from='client.6072015 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:13.548814+0000 mon.a (mon.0) 234696 : audit [DBG] from='client.? 10.233.92.240:0/4190640790' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:13.572138+0000 mgr.a (mgr.5974105) 8009 : cluster [DBG] pgmap v3687: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 2.7 MiB/s wr, 121 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:13.948310+0000 mon.a (mon.0) 234697 : audit [DBG] from='client.? 10.233.92.240:0/3245297808' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:15.219+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:15.219+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/399678197' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:15.599+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:15.599+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1914765353' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:14.379010+0000 mon.a (mon.0) 234698 : audit [DBG] from='client.? 10.233.92.240:0/3900367001' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:14.643993+0000 mon.b (mon.1) 1168 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:14.652581+0000 mon.b (mon.1) 1169 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:15.119094+0000 mon.c (mon.2) 7527 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:15.119316+0000 mon.c (mon.2) 7528 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:15.222304+0000 mon.a (mon.0) 234699 : audit [DBG] from='client.? 10.233.92.240:0/399678197' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:15.995+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:15.999+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1216144839' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:16.583+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:05:16.583+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1857433579' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:05:14.765124+0000 mgr.a (mgr.5974105) 8010 : audit [DBG] from='client.6072039 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:15.572745+0000 mgr.a (mgr.5974105) 8011 : cluster [DBG] pgmap v3688: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.4 MiB/s rd, 1.5 MiB/s wr, 78 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:15.604926+0000 mon.a (mon.0) 234700 : audit [DBG] from='client.? 10.233.92.240:0/1914765353' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:16.001572+0000 mon.a (mon.0) 234701 : audit [DBG] from='client.? 10.233.92.240:0/1216144839' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:16.588462+0000 mon.a (mon.0) 234702 : audit [DBG] from='client.? 10.233.92.240:0/1857433579' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:05:16.835+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:16.835+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3172741508' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:17.103+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:17.107+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:17.227+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:17.227+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3407127241' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:17.627+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:17.627+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/223841295' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:16.376407+0000 mgr.a (mgr.5974105) 8012 : audit [DBG] from='client.6072063 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:16.841306+0000 mon.a (mon.0) 234703 : audit [DBG] from='client.? 10.233.92.240:0/3172741508' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:17.109207+0000 mon.a (mon.0) 234704 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:17.109675+0000 mon.a (mon.0) 234705 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:17.233116+0000 mon.a (mon.0) 234706 : audit [DBG] from='client.? 10.233.92.240:0/3407127241' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:17.576697+0000 mgr.a (mgr.5974105) 8013 : cluster [DBG] pgmap v3689: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 2.9 MiB/s wr, 127 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:17.629533+0000 mon.a (mon.0) 234707 : audit [DBG] from='client.? 10.233.92.240:0/223841295' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:18.495+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:18.495+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3619673957' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:18.715+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:18.879+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:18.879+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3007365180' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:19.255+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:19.255+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1947589923' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:18.016288+0000 mgr.a (mgr.5974105) 8014 : audit [DBG] from='client.6072093 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:18.499424+0000 mon.a (mon.0) 234708 : audit [DBG] from='client.? 10.233.92.240:0/3619673957' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:18.883967+0000 mon.a (mon.0) 234709 : audit [DBG] from='client.? 10.233.92.240:0/3007365180' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:20.199+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:20.199+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/591436731' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:19.257660+0000 mon.a (mon.0) 234710 : audit [DBG] from='client.? 10.233.92.240:0/1947589923' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:19.578314+0000 mgr.a (mgr.5974105) 8015 : cluster [DBG] pgmap v3690: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.7 MiB/s wr, 77 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:19.645330+0000 mgr.a (mgr.5974105) 8016 : audit [DBG] from='client.6072117 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:20.202875+0000 mon.a (mon.0) 234711 : audit [DBG] from='client.? 10.233.92.240:0/591436731' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:20.627+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:20.627+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/15050984' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:21.055+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:21.055+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4272299292' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:21.163+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "quorum_status", "format": "json"} v 0) v1
debug 2022-03-13T01:05:21.163+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/441484592' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
audit 2022-03-13T01:05:20.631657+0000 mon.a (mon.0) 234712 : audit [DBG] from='client.? 10.233.92.240:0/15050984' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:21.059082+0000 mon.a (mon.0) 234713 : audit [DBG] from='client.? 10.233.92.240:0/4272299292' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:21.168314+0000 mon.a (mon.0) 234714 : audit [DBG] from='client.? 10.233.92.240:0/441484592' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
debug 2022-03-13T01:05:21.943+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:21.943+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/353341032' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:22.363+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:22.363+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3261167668' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:22.747+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:22.747+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3088210198' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:21.443071+0000 mgr.a (mgr.5974105) 8017 : audit [DBG] from='client.6072147 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:21.580098+0000 mgr.a (mgr.5974105) 8018 : cluster [DBG] pgmap v3691: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.7 MiB/s wr, 77 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:21.947490+0000 mon.a (mon.0) 234715 : audit [DBG] from='client.? 10.233.92.240:0/353341032' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:22.369365+0000 mon.a (mon.0) 234716 : audit [DBG] from='client.? 10.233.92.240:0/3261167668' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:23.671+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:23.671+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1934761507' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:23.751+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:24.047+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:24.047+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/88902968' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:24.447+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:24.447+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2257255593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:22.752066+0000 mon.a (mon.0) 234717 : audit [DBG] from='client.? 10.233.92.240:0/3088210198' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:23.195415+0000 mgr.a (mgr.5974105) 8019 : audit [DBG] from='client.6072168 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:23.585423+0000 mgr.a (mgr.5974105) 8020 : cluster [DBG] pgmap v3692: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 2.7 MiB/s wr, 137 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:23.673763+0000 mon.a (mon.0) 234718 : audit [DBG] from='client.? 10.233.92.240:0/1934761507' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:25.663+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:25.663+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1492530494' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:24.052331+0000 mon.a (mon.0) 234719 : audit [DBG] from='client.? 10.233.92.240:0/88902968' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:24.451654+0000 mon.a (mon.0) 234720 : audit [DBG] from='client.? 10.233.92.240:0/2257255593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:24.649521+0000 mon.b (mon.1) 1170 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:24.649937+0000 mon.b (mon.1) 1171 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:24.996829+0000 mon.c (mon.2) 7529 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:24.997081+0000 mon.c (mon.2) 7530 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:25.287054+0000 mon.c (mon.2) 7531 : audit [DBG] from='client.? 10.233.92.240:0/2771353840' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:25.667133+0000 mon.a (mon.0) 234721 : audit [DBG] from='client.? 10.233.92.240:0/1492530494' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:26.063+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:26.063+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/791380697' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:26.927+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:26.927+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3207716358' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:24.819120+0000 mgr.a (mgr.5974105) 8021 : audit [DBG] from='client.6072192 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:25.598080+0000 mgr.a (mgr.5974105) 8022 : cluster [DBG] pgmap v3693: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.4 MiB/s wr, 108 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:26.066663+0000 mon.a (mon.0) 234722 : audit [DBG] from='client.? 10.233.92.240:0/791380697' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:26.457048+0000 mgr.a (mgr.5974105) 8023 : audit [DBG] from='client.6072216 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:05:27.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:27.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:27.335+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:27.335+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/954129750' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:27.779+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:27.779+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/343029586' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:26.930895+0000 mon.a (mon.0) 234723 : audit [DBG] from='client.? 10.233.92.240:0/3207716358' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:27.102625+0000 mon.a (mon.0) 234724 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:27.102768+0000 mon.a (mon.0) 234725 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:27.340764+0000 mon.a (mon.0) 234726 : audit [DBG] from='client.? 10.233.92.240:0/954129750' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:27.612764+0000 mgr.a (mgr.5974105) 8024 : cluster [DBG] pgmap v3694: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.7 MiB/s rd, 3.2 MiB/s wr, 150 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:27.784165+0000 mon.a (mon.0) 234727 : audit [DBG] from='client.? 10.233.92.240:0/343029586' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:28.671+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:28.671+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2508261515' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:28.783+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:29.051+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:29.051+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/655358307' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:28.201283+0000 mgr.a (mgr.5974105) 8025 : audit [DBG] from='client.6072240 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:28.677154+0000 mon.a (mon.0) 234728 : audit [DBG] from='client.? 10.233.92.240:0/2508261515' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:29.056446+0000 mon.a (mon.0) 234729 : audit [DBG] from='client.? 10.233.92.240:0/655358307' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:29.451+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:29.451+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2920139951' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:30.379+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:30.379+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1288802597' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:29.455000+0000 mon.a (mon.0) 234730 : audit [DBG] from='client.? 10.233.92.240:0/2920139951' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:29.613447+0000 mgr.a (mgr.5974105) 8026 : cluster [DBG] pgmap v3695: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 1.8 MiB/s wr, 101 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:30.385238+0000 mon.a (mon.0) 234731 : audit [DBG] from='client.? 10.233.92.240:0/1288802597' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:30.751+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:30.751+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1849903676' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:31.139+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:31.139+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1413881959' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:29.901915+0000 mgr.a (mgr.5974105) 8027 : audit [DBG] from='client.6072264 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:30.754296+0000 mon.a (mon.0) 234732 : audit [DBG] from='client.? 10.233.92.240:0/1849903676' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:31.143636+0000 mon.a (mon.0) 234733 : audit [DBG] from='client.? 10.233.92.240:0/1413881959' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:32.123+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:32.123+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3856384348' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:32.523+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:32.523+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4182227548' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:32.899+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:32.899+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4157029945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:31.587997+0000 mgr.a (mgr.5974105) 8028 : audit [DBG] from='client.6072288 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:31.632600+0000 mgr.a (mgr.5974105) 8029 : cluster [DBG] pgmap v3696: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 1.8 MiB/s wr, 100 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:32.127678+0000 mon.a (mon.0) 234734 : audit [DBG] from='client.? 10.233.92.240:0/3856384348' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:32.526919+0000 mon.a (mon.0) 234735 : audit [DBG] from='client.? 10.233.92.240:0/4182227548' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:32.901657+0000 mon.a (mon.0) 234736 : audit [DBG] from='client.? 10.233.92.240:0/4157029945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:33.739+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:33.739+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1670803385' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:33.811+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:34.115+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:34.115+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1536504156' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:33.269841+0000 mgr.a (mgr.5974105) 8030 : audit [DBG] from='client.6072312 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:33.635262+0000 mgr.a (mgr.5974105) 8031 : cluster [DBG] pgmap v3697: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.9 MiB/s rd, 3.2 MiB/s wr, 161 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:33.741542+0000 mon.a (mon.0) 234737 : audit [DBG] from='client.? 10.233.92.240:0/1670803385' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:34.495+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:34.495+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/777182039' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:35.339+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:35.339+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1443483082' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:34.120659+0000 mon.a (mon.0) 234738 : audit [DBG] from='client.? 10.233.92.240:0/1536504156' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:34.467746+0000 mon.b (mon.1) 1172 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:34.467970+0000 mon.b (mon.1) 1173 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:34.498023+0000 mon.a (mon.0) 234739 : audit [DBG] from='client.? 10.233.92.240:0/777182039' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:34.998515+0000 mon.c (mon.2) 7532 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:34.998829+0000 mon.c (mon.2) 7533 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:35.711+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:35.711+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1880368020' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:36.083+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:36.087+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4049517840' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:34.884777+0000 mgr.a (mgr.5974105) 8032 : audit [DBG] from='client.6072336 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:35.344599+0000 mon.a (mon.0) 234740 : audit [DBG] from='client.? 10.233.92.240:0/1443483082' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:35.635912+0000 mgr.a (mgr.5974105) 8033 : cluster [DBG] pgmap v3698: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 2.2 MiB/s wr, 102 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:35.714482+0000 mon.a (mon.0) 234741 : audit [DBG] from='client.? 10.233.92.240:0/1880368020' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:36.089654+0000 mon.a (mon.0) 234742 : audit [DBG] from='client.? 10.233.92.240:0/4049517840' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:36.979+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:36.979+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1556995122' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:37.107+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:37.107+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:37.343+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:37.343+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/595580157' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:36.488937+0000 mgr.a (mgr.5974105) 8034 : audit [DBG] from='client.6072360 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:36.984933+0000 mon.a (mon.0) 234743 : audit [DBG] from='client.? 10.233.92.240:0/1556995122' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:37.111810+0000 mon.a (mon.0) 234744 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:37.111999+0000 mon.a (mon.0) 234745 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:37.348620+0000 mon.a (mon.0) 234746 : audit [DBG] from='client.? 10.233.92.240:0/595580157' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:37.847+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:37.847+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/203096118' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:38.715+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:38.715+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3912916074' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:37.642556+0000 mgr.a (mgr.5974105) 8035 : cluster [DBG] pgmap v3699: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.2 MiB/s rd, 2.5 MiB/s wr, 138 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:37.852559+0000 mon.a (mon.0) 234747 : audit [DBG] from='client.? 10.233.92.240:0/203096118' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:38.835+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:39.459+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:39.459+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3792343687' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:38.227377+0000 mgr.a (mgr.5974105) 8036 : audit [DBG] from='client.6072384 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:38.719839+0000 mon.a (mon.0) 234748 : audit [DBG] from='client.? 10.233.92.240:0/3912916074' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:39.084768+0000 mon.c (mon.2) 7534 : audit [DBG] from='client.? 10.233.92.240:0/90079970' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:39.461689+0000 mon.a (mon.0) 234749 : audit [DBG] from='client.? 10.233.92.240:0/3792343687' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:39.643213+0000 mgr.a (mgr.5974105) 8037 : cluster [DBG] pgmap v3700: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.7 MiB/s wr, 96 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:05:40.335+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:40.335+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1613941882' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:40.727+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:40.727+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1997186840' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:41.099+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:41.099+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1144535851' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:39.884478+0000 mgr.a (mgr.5974105) 8038 : audit [DBG] from='client.6072408 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:40.340554+0000 mon.a (mon.0) 234750 : audit [DBG] from='client.? 10.233.92.240:0/1613941882' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:40.730838+0000 mon.a (mon.0) 234751 : audit [DBG] from='client.? 10.233.92.240:0/1997186840' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:41.104827+0000 mon.a (mon.0) 234752 : audit [DBG] from='client.? 10.233.92.240:0/1144535851' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:42.327+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:42.327+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3856596437' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:42.727+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:42.727+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/839739383' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:41.650041+0000 mgr.a (mgr.5974105) 8039 : cluster [DBG] pgmap v3701: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.7 MiB/s wr, 96 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:42.332088+0000 mon.a (mon.0) 234753 : audit [DBG] from='client.? 10.233.92.240:0/3856596437' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:43.107+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:43.107+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4009807057' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:43.855+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:43.999+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:43.999+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/739528086' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:41.801525+0000 mgr.a (mgr.5974105) 8040 : audit [DBG] from='client.6072432 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:42.732749+0000 mon.a (mon.0) 234754 : audit [DBG] from='client.? 10.233.92.240:0/839739383' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:43.110263+0000 mon.a (mon.0) 234755 : audit [DBG] from='client.? 10.233.92.240:0/4009807057' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:43.529806+0000 mgr.a (mgr.5974105) 8041 : audit [DBG] from='client.6072456 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:43.651323+0000 mgr.a (mgr.5974105) 8042 : cluster [DBG] pgmap v3702: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.8 MiB/s rd, 3.2 MiB/s wr, 161 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:05:44.363+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:44.363+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2542224671' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:44.767+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:44.767+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1728469951' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:44.001703+0000 mon.a (mon.0) 234756 : audit [DBG] from='client.? 10.233.92.240:0/739528086' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:44.368172+0000 mon.a (mon.0) 234757 : audit [DBG] from='client.? 10.233.92.240:0/2542224671' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:44.377950+0000 mon.b (mon.1) 1174 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:44.378262+0000 mon.b (mon.1) 1175 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:44.771429+0000 mon.a (mon.0) 234758 : audit [DBG] from='client.? 10.233.92.240:0/1728469951' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:44.999436+0000 mon.c (mon.2) 7535 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:44.999583+0000 mon.c (mon.2) 7536 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:45.655+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:45.655+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3608785441' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:46.027+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:46.027+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3785987211' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:46.431+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:46.431+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2081318730' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:45.194082+0000 mgr.a (mgr.5974105) 8043 : audit [DBG] from='client.6072480 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:45.652496+0000 mgr.a (mgr.5974105) 8044 : cluster [DBG] pgmap v3703: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.7 MiB/s rd, 1.8 MiB/s wr, 100 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:45.660846+0000 mon.a (mon.0) 234759 : audit [DBG] from='client.? 10.233.92.240:0/3608785441' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:46.032217+0000 mon.a (mon.0) 234760 : audit [DBG] from='client.? 10.233.92.240:0/3785987211' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:46.435475+0000 mon.a (mon.0) 234761 : audit [DBG] from='client.? 10.233.92.240:0/2081318730' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:47.051+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:05:47.051+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/989251394' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:05:47.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:47.099+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:47.347+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:47.347+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2930314838' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:47.831+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:47.835+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4103657647' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:46.889128+0000 mgr.a (mgr.5974105) 8045 : audit [DBG] from='client.6072504 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:47.055162+0000 mon.a (mon.0) 234762 : audit [DBG] from='client.? 10.233.92.240:0/989251394' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:05:47.102360+0000 mon.a (mon.0) 234763 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:47.102546+0000 mon.a (mon.0) 234764 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:47.350327+0000 mon.a (mon.0) 234765 : audit [DBG] from='client.? 10.233.92.240:0/2930314838' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:47.657184+0000 mgr.a (mgr.5974105) 8046 : cluster [DBG] pgmap v3704: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.0 MiB/s wr, 121 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:47.837585+0000 mon.a (mon.0) 234766 : audit [DBG] from='client.? 10.233.92.240:0/4103657647' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:48.279+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:48.279+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3582562456' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:48.887+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:49.503+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:49.503+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1585928208' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:49.519+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd crush class ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:49.519+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1628545272' entity='client.admin' cmd=[{"prefix": "osd crush class ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:48.282900+0000 mon.a (mon.0) 234767 : audit [DBG] from='client.? 10.233.92.240:0/3582562456' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:48.723375+0000 mgr.a (mgr.5974105) 8047 : audit [DBG] from='client.6072531 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:48.754755+0000 mon.c (mon.2) 7537 : audit [DBG] from='client.? 10.233.92.240:0/1086650747' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch
debug 2022-03-13T01:05:49.879+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:49.879+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1396290585' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:50.263+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:50.263+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2114420526' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:49.508542+0000 mon.a (mon.0) 234768 : audit [DBG] from='client.? 10.233.92.240:0/1585928208' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:49.522756+0000 mon.a (mon.0) 234769 : audit [DBG] from='client.? 10.233.92.240:0/1628545272' entity='client.admin' cmd=[{"prefix": "osd crush class ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:49.658281+0000 mgr.a (mgr.5974105) 8048 : cluster [DBG] pgmap v3705: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.7 MiB/s wr, 85 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:49.885077+0000 mon.a (mon.0) 234770 : audit [DBG] from='client.? 10.233.92.240:0/1396290585' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:50.266863+0000 mon.a (mon.0) 234771 : audit [DBG] from='client.? 10.233.92.240:0/2114420526' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:51.119+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:51.119+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/281566546' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:51.511+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:51.511+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/958841549' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:52.423+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:52.423+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2970039797' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:50.654503+0000 mgr.a (mgr.5974105) 8049 : audit [DBG] from='client.6072567 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:51.124043+0000 mon.a (mon.0) 234772 : audit [DBG] from='client.? 10.233.92.240:0/281566546' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:51.513978+0000 mon.a (mon.0) 234773 : audit [DBG] from='client.? 10.233.92.240:0/958841549' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:53.339+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:53.339+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3928910797' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:53.711+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:53.711+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3221937967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:51.660401+0000 mgr.a (mgr.5974105) 8050 : cluster [DBG] pgmap v3706: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.7 MiB/s wr, 85 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:52.429313+0000 mon.a (mon.0) 234774 : audit [DBG] from='client.? 10.233.92.240:0/2970039797' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:52.830238+0000 mgr.a (mgr.5974105) 8051 : audit [DBG] from='client.6072591 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:53.342628+0000 mon.a (mon.0) 234775 : audit [DBG] from='client.? 10.233.92.240:0/3928910797' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:53.717381+0000 mon.a (mon.0) 234776 : audit [DBG] from='client.? 10.233.92.240:0/3221937967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:54.095+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:54.095+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2371326022' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:54.259+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:54.915+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:54.915+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1847144537' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:53.663331+0000 mgr.a (mgr.5974105) 8052 : cluster [DBG] pgmap v3707: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 3.2 MiB/s wr, 140 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:54.098844+0000 mon.a (mon.0) 234777 : audit [DBG] from='client.? 10.233.92.240:0/2371326022' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:54.397159+0000 mon.b (mon.1) 1176 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:54.397419+0000 mon.b (mon.1) 1177 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:54.470266+0000 mgr.a (mgr.5974105) 8053 : audit [DBG] from='client.6072615 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:54.920110+0000 mon.a (mon.0) 234778 : audit [DBG] from='client.? 10.233.92.240:0/1847144537' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:55.001542+0000 mon.c (mon.2) 7538 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:55.001680+0000 mon.c (mon.2) 7539 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:05:55.287+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:55.287+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1205518442' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:55.691+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:55.691+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/208313861' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:55.290696+0000 mon.a (mon.0) 234779 : audit [DBG] from='client.? 10.233.92.240:0/1205518442' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:55.673456+0000 mgr.a (mgr.5974105) 8054 : cluster [DBG] pgmap v3708: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.2 MiB/s rd, 1.8 MiB/s wr, 75 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:55.697260+0000 mon.a (mon.0) 234780 : audit [DBG] from='client.? 10.233.92.240:0/208313861' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:56.551+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:56.551+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2737580136' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:56.939+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:56.939+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1896374253' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:57.095+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:05:57.095+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:56.076991+0000 mgr.a (mgr.5974105) 8055 : audit [DBG] from='client.6072639 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:56.554293+0000 mon.a (mon.0) 234781 : audit [DBG] from='client.? 10.233.92.240:0/2737580136' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:56.942680+0000 mon.a (mon.0) 234782 : audit [DBG] from='client.? 10.233.92.240:0/1896374253' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:05:57.098974+0000 mon.a (mon.0) 234783 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:05:57.099198+0000 mon.a (mon.0) 234784 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:05:57.355787+0000 mon.c (mon.2) 7540 : audit [DBG] from='client.? 10.233.92.240:0/1800533617' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:58.283+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:58.283+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2438444536' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:58.659+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:05:58.659+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1437672851' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:05:57.682779+0000 mgr.a (mgr.5974105) 8056 : cluster [DBG] pgmap v3709: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.9 MiB/s rd, 2.4 MiB/s wr, 113 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:57.805230+0000 mgr.a (mgr.5974105) 8057 : audit [DBG] from='client.6072654 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:05:58.289540+0000 mon.a (mon.0) 234785 : audit [DBG] from='client.? 10.233.92.240:0/2438444536' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:58.663954+0000 mon.a (mon.0) 234786 : audit [DBG] from='client.? 10.233.92.240:0/1437672851' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:05:59.063+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:59.063+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1807177024' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:05:59.295+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:05:59.927+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:05:59.931+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1316692874' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:05:59.066111+0000 mon.a (mon.0) 234787 : audit [DBG] from='client.? 10.233.92.240:0/1807177024' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:00.347+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:00.347+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1118197466' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:00.719+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:00.719+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2523101109' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:01.083+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "versions", "format": "json"} v 0) v1
debug 2022-03-13T01:06:01.083+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2164827855' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
audit 2022-03-13T01:05:59.468831+0000 mgr.a (mgr.5974105) 8058 : audit [DBG] from='client.6072678 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:05:59.693343+0000 mgr.a (mgr.5974105) 8059 : cluster [DBG] pgmap v3710: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.2 MiB/s wr, 92 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:05:59.933405+0000 mon.a (mon.0) 234788 : audit [DBG] from='client.? 10.233.92.240:0/1316692874' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:00.352403+0000 mon.a (mon.0) 234789 : audit [DBG] from='client.? 10.233.92.240:0/1118197466' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:00.609571+0000 mon.c (mon.2) 7541 : audit [DBG] from='client.? 10.233.92.240:0/1358124660' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
audit 2022-03-13T01:06:00.724396+0000 mon.a (mon.0) 234790 : audit [DBG] from='client.? 10.233.92.240:0/2523101109' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:01.591+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:01.591+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2325159288' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:01.983+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:01.983+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1741858533' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:02.355+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:02.355+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1565633631' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:01.087110+0000 mon.a (mon.0) 234791 : audit [DBG] from='client.? 10.233.92.240:0/2164827855' entity='client.admin' cmd=[{"prefix": "versions", "format": "json"}]: dispatch
audit 2022-03-13T01:06:01.099009+0000 mgr.a (mgr.5974105) 8060 : audit [DBG] from='client.6072714 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:01.595842+0000 mon.a (mon.0) 234792 : audit [DBG] from='client.? 10.233.92.240:0/2325159288' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:01.699227+0000 mgr.a (mgr.5974105) 8061 : cluster [DBG] pgmap v3711: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 2.2 MiB/s wr, 92 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:01.989431+0000 mon.a (mon.0) 234793 : audit [DBG] from='client.? 10.233.92.240:0/1741858533' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:03.271+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:03.271+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2946067707' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:02.358164+0000 mon.a (mon.0) 234794 : audit [DBG] from='client.? 10.233.92.240:0/1565633631' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:02.795153+0000 mgr.a (mgr.5974105) 8062 : audit [DBG] from='client.6072738 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:03.275017+0000 mon.a (mon.0) 234795 : audit [DBG] from='client.? 10.233.92.240:0/2946067707' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:03.639+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:03.639+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1758352383' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:03.999+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:03.999+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4098379123' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:04.319+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:06:03.644496+0000 mon.a (mon.0) 234796 : audit [DBG] from='client.? 10.233.92.240:0/1758352383' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:03.707362+0000 mgr.a (mgr.5974105) 8063 : cluster [DBG] pgmap v3712: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 3.1 MiB/s wr, 139 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:04.002915+0000 mon.a (mon.0) 234797 : audit [DBG] from='client.? 10.233.92.240:0/4098379123' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:04.280800+0000 mon.b (mon.1) 1178 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:04.281123+0000 mon.b (mon.1) 1179 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:05.015+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:05.015+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1871478180' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:05.827+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:05.827+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2272200944' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:04.385197+0000 mgr.a (mgr.5974105) 8064 : audit [DBG] from='client.6072762 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:04.988363+0000 mon.c (mon.2) 7542 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:04.988501+0000 mon.c (mon.2) 7543 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:05.020586+0000 mon.a (mon.0) 234798 : audit [DBG] from='client.? 10.233.92.240:0/1871478180' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:05.443737+0000 mon.c (mon.2) 7544 : audit [DBG] from='client.? 10.233.92.240:0/1062588450' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:05.829885+0000 mon.a (mon.0) 234799 : audit [DBG] from='client.? 10.233.92.240:0/2272200944' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:06.727+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:06.727+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2279804362' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:06.927+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "quorum_status", "format": "json"} v 0) v1
debug 2022-03-13T01:06:06.927+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1048976399' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
debug 2022-03-13T01:06:07.159+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:06:07.163+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:07.163+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:07.163+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/756762283' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:05.708902+0000 mgr.a (mgr.5974105) 8065 : cluster [DBG] pgmap v3713: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.6 MiB/s wr, 84 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:06.243796+0000 mgr.a (mgr.5974105) 8066 : audit [DBG] from='client.6072786 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:06.731337+0000 mon.a (mon.0) 234800 : audit [DBG] from='client.? 10.233.92.240:0/2279804362' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:06.931821+0000 mon.a (mon.0) 234801 : audit [DBG] from='client.? 10.233.92.240:0/1048976399' entity='client.admin' cmd=[{"prefix": "quorum_status", "format": "json"}]: dispatch
debug 2022-03-13T01:06:07.563+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:07.563+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1765792945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:08.471+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:08.471+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2397278571' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:07.165186+0000 mon.a (mon.0) 234802 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:07.165567+0000 mon.a (mon.0) 234803 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:07.165825+0000 mon.a (mon.0) 234804 : audit [DBG] from='client.? 10.233.92.240:0/756762283' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:07.567561+0000 mon.a (mon.0) 234805 : audit [DBG] from='client.? 10.233.92.240:0/1765792945' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:07.711342+0000 mgr.a (mgr.5974105) 8067 : cluster [DBG] pgmap v3714: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 2.5 MiB/s wr, 130 op/s; 13794/41382 objects degraded (33.333%)
debug 2022-03-13T01:06:08.847+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:08.847+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3956728528' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:08.991+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"} v 0) v1
debug 2022-03-13T01:06:08.991+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
debug 2022-03-13T01:06:09.227+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:09.227+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3311370572' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:09.343+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
audit 2022-03-13T01:06:07.961210+0000 mgr.a (mgr.5974105) 8068 : audit [DBG] from='client.6072816 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:08.476173+0000 mon.a (mon.0) 234806 : audit [DBG] from='client.? 10.233.92.240:0/2397278571' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:08.851299+0000 mon.a (mon.0) 234807 : audit [DBG] from='client.? 10.233.92.240:0/3956728528' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:08.995859+0000 mon.a (mon.0) 234808 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/mirror_snapshot_schedule"}]: dispatch
audit 2022-03-13T01:06:09.231197+0000 mon.a (mon.0) 234809 : audit [DBG] from='client.? 10.233.92.240:0/3311370572' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:10.043+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:10.047+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3895049593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:10.423+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:10.423+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3618064528' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:10.831+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:10.831+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2245354861' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:09.601971+0000 mgr.a (mgr.5974105) 8069 : audit [DBG] from='client.6072840 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:09.713375+0000 mgr.a (mgr.5974105) 8070 : cluster [DBG] pgmap v3715: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 1.8 MiB/s wr, 92 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:10.049565+0000 mon.a (mon.0) 234810 : audit [DBG] from='client.? 10.233.92.240:0/3895049593' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:10.427622+0000 mon.a (mon.0) 234811 : audit [DBG] from='client.? 10.233.92.240:0/3618064528' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:10.834542+0000 mon.a (mon.0) 234812 : audit [DBG] from='client.? 10.233.92.240:0/2245354861' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:12.179+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"} v 0) v1
debug 2022-03-13T01:06:12.179+0000 7fb3a9c76700 0 log_channel(audit) log [INF] : from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
debug 2022-03-13T01:06:12.183+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:12.183+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1977081228' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:12.579+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:12.583+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2936018234' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:11.268785+0000 mgr.a (mgr.5974105) 8071 : audit [DBG] from='client.5853410 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:11.718126+0000 mgr.a (mgr.5974105) 8072 : cluster [DBG] pgmap v3716: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 1.8 MiB/s wr, 92 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:11.785607+0000 mon.c (mon.2) 7545 : audit [DBG] from='client.? 10.233.92.240:0/4181990136' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:12.851+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_write.cc:1668] [default] New memtable created with log file: #16941. Immutable memtables: 0.
debug 2022-03-13T01:06:12.855+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.857316) [db_impl/db_impl_compaction_flush.cc:2198] Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots available 1, flush slots scheduled 1, compaction slots scheduled 0
debug 2022-03-13T01:06:12.855+0000 7fb3af481700 4 rocksdb: [flush_job.cc:321] [default] [JOB 2407] Flushing memtable with next log file: 16941
debug 2022-03-13T01:06:12.855+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133572857392, "job": 2407, "event": "flush_started", "num_memtables": 1, "num_entries": 1231, "num_deletes": 251, "total_data_size": 4433054, "memory_usage": 4457968, "flush_reason": "Manual Compaction"}
debug 2022-03-13T01:06:12.855+0000 7fb3af481700 4 rocksdb: [flush_job.cc:350] [default] [JOB 2407] Level-0 flush table #16942: started
debug 2022-03-13T01:06:12.903+0000 7fb3af481700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133572909241, "cf_name": "default", "job": 2407, "event": "table_file_creation", "file_number": 16942, "file_size": 4266867, "table_properties": {"data_size": 4257174, "index_size": 7369, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 1477, "raw_key_size": 11639, "raw_average_key_size": 20, "raw_value_size": 4244115, "raw_average_value_size": 7525, "num_data_blocks": 215, "num_entries": 564, "num_deletions": 251, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1647133489, "oldest_key_time": 1647133489, "file_creation_time": 1647133572}}
debug 2022-03-13T01:06:12.903+0000 7fb3af481700 4 rocksdb: [flush_job.cc:401] [default] [JOB 2407] Level-0 flush table #16942: 4266867 bytes OK
debug 2022-03-13T01:06:12.955+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.926251) [memtable_list.cc:447] [default] Level-0 commit table #16942 started
debug 2022-03-13T01:06:12.955+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.959855) [memtable_list.cc:503] [default] Level-0 commit table #16942: memtable #1 done
debug 2022-03-13T01:06:12.955+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.959887) EVENT_LOG_v1 {"time_micros": 1647133572959879, "job": 2407, "event": "flush_finished", "output_compression": "NoCompression", "lsm_state": [1, 0, 0, 0, 0, 0, 1], "immutable_memtables": 0}
debug 2022-03-13T01:06:12.955+0000 7fb3af481700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.959906) [db_impl/db_impl_compaction_flush.cc:205] [default] Level summary: base level 6 level multiplier 10.00 max bytes base 268435456 files[1 0 0 0 0 0 1] max score 0.25
debug 2022-03-13T01:06:12.955+0000 7fb3af481700 4 rocksdb: [db_impl/db_impl_files.cc:353] [JOB 2407] Try to delete WAL files size 4427294, prev total WAL file size 4427294, number of live WAL files 2.
debug 2022-03-13T01:06:12.955+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:06:12.955+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:06:12.960782) [db_impl/db_impl_compaction_flush.cc:2516] [default] Manual compaction from level-0 to level-6 from 'paxos .. 'paxos; will stop at (end)
debug 2022-03-13T01:06:12.955+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1676] [default] [JOB 2408] Compacting 1@0 + 1@6 files to L6, score -1.00
debug 2022-03-13T01:06:12.955+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1680] [default] Compaction start summary: Base version 2407 Base level 0, inputs: [16942(4166KB)], [16940(53MB)]
debug 2022-03-13T01:06:12.955+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133572960814, "job": 2408, "event": "compaction_started", "compaction_reason": "ManualCompaction", "files_L0": [16942], "files_L6": [16940], "score": -1, "input_data_size": 59943304}
debug 2022-03-13T01:06:13.475+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:13.475+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/421123837' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:13.715+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1349] [default] [JOB 2408] Generated table #16943: 3874 keys, 55495177 bytes
debug 2022-03-13T01:06:13.715+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133573721026, "cf_name": "default", "job": 2408, "event": "table_file_creation", "file_number": 16943, "file_size": 55495177, "table_properties": {"data_size": 55400246, "index_size": 84290, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 0, "filter_size": 9797, "raw_key_size": 78379, "raw_average_key_size": 20, "raw_value_size": 55286067, "raw_average_value_size": 14271, "num_data_blocks": 2491, "num_entries": 3874, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1646580773, "oldest_key_time": 0, "file_creation_time": 1647133572}}
debug 2022-03-13T01:06:13.843+0000 7fb3afc82700 4 rocksdb: [compaction/compaction_job.cc:1415] [default] [JOB 2408] Compacted 1@0 + 1@6 files to L6 => 55495177 bytes
debug 2022-03-13T01:06:13.867+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:06:13.873022) [compaction/compaction_job.cc:760] [default] compacted to: base level 6 level multiplier 10.00 max bytes base 268435456 files[0 0 0 0 0 0 1] max score 0.00, MB/sec: 78.8 rd, 73.0 wr, level 6, files in(1, 1) out(1) MB in(4.1, 53.1) out(52.9), read-write-amplify(27.1) write-amplify(13.0) OK, records in: 4390, records dropped: 516 output_compression: NoCompression
debug 2022-03-13T01:06:13.867+0000 7fb3afc82700 4 rocksdb: (Original Log Time 2022/03/13-01:06:13.873050) EVENT_LOG_v1 {"time_micros": 1647133573873036, "job": 2408, "event": "compaction_finished", "compaction_time_micros": 760349, "compaction_time_cpu_micros": 87180, "output_level": 6, "num_output_files": 1, "total_output_size": 55495177, "num_input_records": 4390, "num_output_records": 3874, "num_subcompactions": 1, "output_compression": "NoCompression", "num_single_delete_mismatches": 0, "num_single_delete_fallthrough": 0, "lsm_state": [0, 0, 0, 0, 0, 0, 1]}
debug 2022-03-13T01:06:13.875+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133573880895, "job": 2408, "event": "table_file_deletion", "file_number": 16942}
debug 2022-03-13T01:06:13.883+0000 7fb3afc82700 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1647133573886684, "job": 2408, "event": "table_file_deletion", "file_number": 16940}
debug 2022-03-13T01:06:13.883+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:06:13.883+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:06:13.883+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:06:13.883+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
debug 2022-03-13T01:06:13.883+0000 7fb3a546d700 4 rocksdb: [db_impl/db_impl_compaction_flush.cc:1443] [default] Manual compaction starting
audit 2022-03-13T01:06:12.182849+0000 mon.a (mon.0) 234813 : audit [INF] from='mgr.5974105 10.233.90.35:0/3790109779' entity='mgr.a' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/a/trash_purge_schedule"}]: dispatch
audit 2022-03-13T01:06:12.186303+0000 mon.a (mon.0) 234814 : audit [DBG] from='client.? 10.233.92.240:0/1977081228' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:12.585390+0000 mon.a (mon.0) 234815 : audit [DBG] from='client.? 10.233.92.240:0/2936018234' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:13.479778+0000 mon.a (mon.0) 234816 : audit [DBG] from='client.? 10.233.92.240:0/421123837' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:14.075+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:14.075+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3028018432' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:14.375+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:14.447+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:14.447+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1177500962' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:12.978766+0000 mgr.a (mgr.5974105) 8073 : audit [DBG] from='client.6072885 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:13.720427+0000 mgr.a (mgr.5974105) 8074 : cluster [DBG] pgmap v3717: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 2.9 MiB/s wr, 142 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:14.079807+0000 mon.a (mon.0) 234817 : audit [DBG] from='client.? 10.233.92.240:0/3028018432' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:14.452512+0000 mon.a (mon.0) 234818 : audit [DBG] from='client.? 10.233.92.240:0/1177500962' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:14.729104+0000 mon.b (mon.1) 1180 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:14.729314+0000 mon.b (mon.1) 1181 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:15.299+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:15.299+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/704546489' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:15.683+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:15.683+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1443099888' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:16.059+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:16.059+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2310502921' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:14.845860+0000 mgr.a (mgr.5974105) 8075 : audit [DBG] from='client.6072909 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:14.997971+0000 mon.c (mon.2) 7546 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:14.998211+0000 mon.c (mon.2) 7547 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:15.305014+0000 mon.a (mon.0) 234819 : audit [DBG] from='client.? 10.233.92.240:0/704546489' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:15.688850+0000 mon.a (mon.0) 234820 : audit [DBG] from='client.? 10.233.92.240:0/1443099888' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:15.721845+0000 mgr.a (mgr.5974105) 8076 : cluster [DBG] pgmap v3718: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 1.9 MiB/s wr, 95 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:16.062265+0000 mon.a (mon.0) 234821 : audit [DBG] from='client.? 10.233.92.240:0/2310502921' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:16.887+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:16.887+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2024742259' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:17.107+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:06:17.107+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:17.275+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:17.275+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1692436320' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:16.433397+0000 mgr.a (mgr.5974105) 8077 : audit [DBG] from='client.6072933 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:16.890062+0000 mon.a (mon.0) 234822 : audit [DBG] from='client.? 10.233.92.240:0/2024742259' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:17.111492+0000 mon.a (mon.0) 234823 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:17.111635+0000 mon.a (mon.0) 234824 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:17.279489+0000 mon.a (mon.0) 234825 : audit [DBG] from='client.? 10.233.92.240:0/1692436320' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:17.639+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "status", "format": "json"} v 0) v1
debug 2022-03-13T01:06:17.639+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2181471597' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
debug 2022-03-13T01:06:17.771+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:17.771+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1929209590' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:18.667+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:18.667+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3774731413' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:17.641754+0000 mon.a (mon.0) 234826 : audit [DBG] from='client.? 10.233.92.240:0/2181471597' entity='client.admin' cmd=[{"prefix": "status", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:17.724263+0000 mgr.a (mgr.5974105) 8078 : cluster [DBG] pgmap v3719: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.3 MiB/s rd, 2.6 MiB/s wr, 132 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:17.775628+0000 mon.a (mon.0) 234827 : audit [DBG] from='client.? 10.233.92.240:0/1929209590' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:19.027+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:19.027+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4007091832' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:19.399+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:19.423+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:19.423+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2507638332' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:18.151120+0000 mgr.a (mgr.5974105) 8079 : audit [DBG] from='client.6072963 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:18.672933+0000 mon.a (mon.0) 234828 : audit [DBG] from='client.? 10.233.92.240:0/3774731413' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:19.033202+0000 mon.a (mon.0) 234829 : audit [DBG] from='client.? 10.233.92.240:0/4007091832' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:19.427970+0000 mon.a (mon.0) 234830 : audit [DBG] from='client.? 10.233.92.240:0/2507638332' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:20.331+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:20.331+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1070139740' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:20.763+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:20.767+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/395396533' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:19.726257+0000 mgr.a (mgr.5974105) 8080 : cluster [DBG] pgmap v3720: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.7 MiB/s wr, 86 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:19.845929+0000 mgr.a (mgr.5974105) 8081 : audit [DBG] from='client.6072987 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:20.335308+0000 mon.a (mon.0) 234831 : audit [DBG] from='client.? 10.233.92.240:0/1070139740' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:20.769945+0000 mon.a (mon.0) 234832 : audit [DBG] from='client.? 10.233.92.240:0/395396533' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:21.187+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:21.187+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4262315083' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:22.119+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:22.119+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/753185004' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:21.189442+0000 mon.a (mon.0) 234833 : audit [DBG] from='client.? 10.233.92.240:0/4262315083' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:21.577137+0000 mgr.a (mgr.5974105) 8082 : audit [DBG] from='client.6073011 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:21.729470+0000 mgr.a (mgr.5974105) 8083 : cluster [DBG] pgmap v3721: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.6 MiB/s rd, 1.7 MiB/s wr, 86 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:22.125210+0000 mon.a (mon.0) 234834 : audit [DBG] from='client.? 10.233.92.240:0/753185004' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:22.503+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:22.503+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3212430967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:22.879+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:22.879+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1389896349' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:22.506352+0000 mon.a (mon.0) 234835 : audit [DBG] from='client.? 10.233.92.240:0/3212430967' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:22.884426+0000 mon.a (mon.0) 234836 : audit [DBG] from='client.? 10.233.92.240:0/1389896349' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:24.231+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:24.231+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3181724476' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:24.423+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:24.647+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:24.647+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/249864121' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:23.348314+0000 mgr.a (mgr.5974105) 8084 : audit [DBG] from='client.6073035 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:23.736701+0000 mgr.a (mgr.5974105) 8085 : cluster [DBG] pgmap v3722: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.4 MiB/s rd, 3.0 MiB/s wr, 130 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:23.802803+0000 mon.c (mon.2) 7548 : audit [DBG] from='client.? 10.233.92.240:0/1958937454' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:24.234544+0000 mon.a (mon.0) 234837 : audit [DBG] from='client.? 10.233.92.240:0/3181724476' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:25.567+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:25.567+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1315041559' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:25.947+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:25.951+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1291623286' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:26.335+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:26.335+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/818546022' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:24.649738+0000 mon.a (mon.0) 234838 : audit [DBG] from='client.? 10.233.92.240:0/249864121' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:24.999500+0000 mon.c (mon.2) 7549 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:24.999818+0000 mon.c (mon.2) 7550 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:25.070002+0000 mgr.a (mgr.5974105) 8086 : audit [DBG] from='client.6073059 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:25.256783+0000 mon.b (mon.1) 1182 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:25.257230+0000 mon.b (mon.1) 1183 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:25.571009+0000 mon.a (mon.0) 234839 : audit [DBG] from='client.? 10.233.92.240:0/1315041559' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:25.740534+0000 mgr.a (mgr.5974105) 8087 : cluster [DBG] pgmap v3723: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.3 MiB/s rd, 1.9 MiB/s wr, 80 op/s; 13794/41382 objects degraded (33.333%)
audit 2022-03-13T01:06:25.953660+0000 mon.a (mon.0) 234840 : audit [DBG] from='client.? 10.233.92.240:0/1291623286' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:27.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:06:27.115+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:27.211+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:27.211+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/746725820' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:26.339889+0000 mon.a (mon.0) 234841 : audit [DBG] from='client.? 10.233.92.240:0/818546022' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:26.720117+0000 mgr.a (mgr.5974105) 8088 : audit [DBG] from='client.6073083 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:27.117725+0000 mon.a (mon.0) 234842 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:27.117914+0000 mon.a (mon.0) 234843 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:27.216064+0000 mon.a (mon.0) 234844 : audit [DBG] from='client.? 10.233.92.240:0/746725820' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:27.659+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:27.659+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2012079345' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:28.031+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:28.031+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2529887003' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:28.635+0000 7fb3ac47b700 0 log_channel(cluster) log [WRN] : Health check update: Degraded data redundancy: 13795/41385 objects degraded (33.333%), 33 pgs degraded, 33 pgs undersized (PG_DEGRADED)
debug 2022-03-13T01:06:28.923+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:28.923+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/929858115' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:27.662404+0000 mon.a (mon.0) 234845 : audit [DBG] from='client.? 10.233.92.240:0/2012079345' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:27.745251+0000 mgr.a (mgr.5974105) 8089 : cluster [DBG] pgmap v3724: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.5 MiB/s rd, 3.7 MiB/s wr, 131 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:28.034334+0000 mon.a (mon.0) 234846 : audit [DBG] from='client.? 10.233.92.240:0/2529887003' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:29.307+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:29.307+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1337309417' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:29.451+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:29.707+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:29.707+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/828116658' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:30.563+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:30.563+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1139065489' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:28.457066+0000 mgr.a (mgr.5974105) 8090 : audit [DBG] from='client.6073107 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:28.641335+0000 mon.a (mon.0) 234847 : cluster [WRN] Health check update: Degraded data redundancy: 13795/41385 objects degraded (33.333%), 33 pgs degraded, 33 pgs undersized (PG_DEGRADED)
audit 2022-03-13T01:06:28.928825+0000 mon.a (mon.0) 234848 : audit [DBG] from='client.? 10.233.92.240:0/929858115' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:29.309892+0000 mon.a (mon.0) 234849 : audit [DBG] from='client.? 10.233.92.240:0/1337309417' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:29.713132+0000 mon.a (mon.0) 234850 : audit [DBG] from='client.? 10.233.92.240:0/828116658' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:29.769670+0000 mgr.a (mgr.5974105) 8091 : cluster [DBG] pgmap v3725: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.0 MiB/s rd, 3.0 MiB/s wr, 95 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:30.099695+0000 mgr.a (mgr.5974105) 8092 : audit [DBG] from='client.6073131 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
debug 2022-03-13T01:06:30.947+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:30.947+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1546727404' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:31.323+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:31.323+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4291635699' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:30.567252+0000 mon.a (mon.0) 234851 : audit [DBG] from='client.? 10.233.92.240:0/1139065489' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:30.952549+0000 mon.a (mon.0) 234852 : audit [DBG] from='client.? 10.233.92.240:0/1546727404' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:31.329256+0000 mon.a (mon.0) 234853 : audit [DBG] from='client.? 10.233.92.240:0/4291635699' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:32.199+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:32.199+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2569688890' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:32.571+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:32.571+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1439742542' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:32.959+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:32.959+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1016223083' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:31.747567+0000 mgr.a (mgr.5974105) 8093 : audit [DBG] from='client.6073155 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:31.776699+0000 mgr.a (mgr.5974105) 8094 : cluster [DBG] pgmap v3726: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.0 MiB/s rd, 3.0 MiB/s wr, 95 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:32.203538+0000 mon.a (mon.0) 234854 : audit [DBG] from='client.? 10.233.92.240:0/2569688890' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:32.575531+0000 mon.a (mon.0) 234855 : audit [DBG] from='client.? 10.233.92.240:0/1439742542' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:33.871+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:33.871+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/866577835' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:32.963184+0000 mon.a (mon.0) 234856 : audit [DBG] from='client.? 10.233.92.240:0/1016223083' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:33.875201+0000 mon.a (mon.0) 234857 : audit [DBG] from='client.? 10.233.92.240:0/866577835' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:34.251+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:34.251+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/702392450' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:34.479+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:34.651+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:34.651+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3962634719' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:33.413174+0000 mgr.a (mgr.5974105) 8095 : audit [DBG] from='client.6073179 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:33.792446+0000 mgr.a (mgr.5974105) 8096 : cluster [DBG] pgmap v3727: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.7 MiB/s rd, 4.3 MiB/s wr, 129 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:34.254850+0000 mon.a (mon.0) 234858 : audit [DBG] from='client.? 10.233.92.240:0/702392450' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:34.351464+0000 mon.b (mon.1) 1184 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:34.351702+0000 mon.b (mon.1) 1185 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:34.657107+0000 mon.a (mon.0) 234859 : audit [DBG] from='client.? 10.233.92.240:0/3962634719' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:34.995508+0000 mon.c (mon.2) 7551 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:34.995762+0000 mon.c (mon.2) 7552 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:35.487+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:35.487+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3779719910' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:35.851+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:35.851+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2972404406' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:36.231+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:36.231+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1581133349' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:35.037156+0000 mgr.a (mgr.5974105) 8097 : audit [DBG] from='client.5853533 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:35.491383+0000 mon.a (mon.0) 234860 : audit [DBG] from='client.? 10.233.92.240:0/3779719910' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:35.793107+0000 mgr.a (mgr.5974105) 8098 : cluster [DBG] pgmap v3728: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.8 MiB/s rd, 3.1 MiB/s wr, 85 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:35.855001+0000 mon.a (mon.0) 234861 : audit [DBG] from='client.? 10.233.92.240:0/2972404406' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:37.111+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:06:37.111+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:37.139+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:37.139+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/584111827' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:37.535+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:37.535+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2586993599' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:36.235486+0000 mon.a (mon.0) 234862 : audit [DBG] from='client.? 10.233.92.240:0/1581133349' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:36.650126+0000 mgr.a (mgr.5974105) 8099 : audit [DBG] from='client.5853545 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:37.113811+0000 mon.a (mon.0) 234863 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:37.113953+0000 mon.a (mon.0) 234864 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:37.142521+0000 mon.a (mon.0) 234865 : audit [DBG] from='client.? 10.233.92.240:0/584111827' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:38.011+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:38.011+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3651145124' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:38.939+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:38.939+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/638778433' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:37.537494+0000 mon.a (mon.0) 234866 : audit [DBG] from='client.? 10.233.92.240:0/2586993599' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:37.795607+0000 mgr.a (mgr.5974105) 8100 : cluster [DBG] pgmap v3729: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.6 MiB/s rd, 4.2 MiB/s wr, 151 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:38.016797+0000 mon.a (mon.0) 234867 : audit [DBG] from='client.? 10.233.92.240:0/3651145124' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:39.299+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:39.299+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3204049864' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:39.499+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:39.675+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:39.675+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/535845589' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:38.483754+0000 mgr.a (mgr.5974105) 8101 : audit [DBG] from='client.6073248 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:38.941734+0000 mon.a (mon.0) 234868 : audit [DBG] from='client.? 10.233.92.240:0/638778433' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:39.305317+0000 mon.a (mon.0) 234869 : audit [DBG] from='client.? 10.233.92.240:0/3204049864' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:39.681293+0000 mon.a (mon.0) 234870 : audit [DBG] from='client.? 10.233.92.240:0/535845589' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:40.511+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:40.511+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1197668221' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:40.911+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:40.911+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1397627551' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:41.275+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:41.275+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1526358507' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:39.799578+0000 mgr.a (mgr.5974105) 8102 : cluster [DBG] pgmap v3730: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.5 MiB/s wr, 100 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:40.044442+0000 mgr.a (mgr.5974105) 8103 : audit [DBG] from='client.6073272 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:40.514757+0000 mon.a (mon.0) 234871 : audit [DBG] from='client.? 10.233.92.240:0/1197668221' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:40.917253+0000 mon.a (mon.0) 234872 : audit [DBG] from='client.? 10.233.92.240:0/1397627551' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:42.135+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:42.135+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/888946758' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:41.280269+0000 mon.a (mon.0) 234873 : audit [DBG] from='client.? 10.233.92.240:0/1526358507' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:41.673225+0000 mgr.a (mgr.5974105) 8104 : audit [DBG] from='client.6073296 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
cluster 2022-03-13T01:06:41.800498+0000 mgr.a (mgr.5974105) 8105 : cluster [DBG] pgmap v3731: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 1.5 MiB/s rd, 2.5 MiB/s wr, 100 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:42.139679+0000 mon.a (mon.0) 234874 : audit [DBG] from='client.? 10.233.92.240:0/888946758' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:42.515+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:42.515+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3358348167' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:42.883+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:42.883+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/2039914761' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:42.518377+0000 mon.a (mon.0) 234875 : audit [DBG] from='client.? 10.233.92.240:0/3358348167' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:42.889043+0000 mon.a (mon.0) 234876 : audit [DBG] from='client.? 10.233.92.240:0/2039914761' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:43.739+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:43.739+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/91234993' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:44.119+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:44.119+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/311864221' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:44.515+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:44.515+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1869561252' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:44.539+0000 7fb3ac47b700 1 mon.a@0(leader).osd e690 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 71303168 full_alloc: 71303168 kv_alloc: 876609536
debug 2022-03-13T01:06:45.371+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:45.371+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3422698865' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:43.281090+0000 mgr.a (mgr.5974105) 8106 : audit [DBG] from='client.6073320 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:43.744845+0000 mon.a (mon.0) 234877 : audit [DBG] from='client.? 10.233.92.240:0/91234993' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:43.801237+0000 mgr.a (mgr.5974105) 8107 : cluster [DBG] pgmap v3732: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.8 MiB/s rd, 3.9 MiB/s wr, 167 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:44.122962+0000 mon.a (mon.0) 234878 : audit [DBG] from='client.? 10.233.92.240:0/311864221' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
audit 2022-03-13T01:06:44.367299+0000 mon.b (mon.1) 1186 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:44.367546+0000 mon.b (mon.1) 1187 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:44.520027+0000 mon.a (mon.0) 234879 : audit [DBG] from='client.? 10.233.92.240:0/1869561252' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:45.731+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:45.731+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/3745619982' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:46.111+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:46.111+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1040985088' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:44.904166+0000 mgr.a (mgr.5974105) 8108 : audit [DBG] from='client.6073344 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 20, "target": ["mon-mgr", ""], "format": "json"}]: dispatch
audit 2022-03-13T01:06:45.000888+0000 mon.c (mon.2) 7553 : audit [DBG] from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
audit 2022-03-13T01:06:45.001079+0000 mon.c (mon.2) 7554 : audit [DBG] from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
audit 2022-03-13T01:06:45.373725+0000 mon.a (mon.0) 234880 : audit [DBG] from='client.? 10.233.92.240:0/3422698865' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:45.735009+0000 mon.a (mon.0) 234881 : audit [DBG] from='client.? 10.233.92.240:0/3745619982' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
cluster 2022-03-13T01:06:45.802244+0000 mgr.a (mgr.5974105) 8109 : cluster [DBG] pgmap v3733: 33 pgs: 33 active+undersized+degraded; 51 GiB data, 103 GiB used, 497 GiB / 600 GiB avail; 2.1 MiB/s rd, 2.6 MiB/s wr, 133 op/s; 13795/41385 objects degraded (33.333%)
audit 2022-03-13T01:06:46.115073+0000 mon.a (mon.0) 234882 : audit [DBG] from='client.? 10.233.92.240:0/1040985088' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:46.967+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:46.967+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/1513946077' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
debug 2022-03-13T01:06:47.095+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd='mon_status' args=[]: dispatch
debug 2022-03-13T01:06:47.095+0000 7fb3b1502700 0 log_channel(audit) log [DBG] : from='admin socket' entity='admin socket' cmd=mon_status args=[]: finished
debug 2022-03-13T01:06:47.347+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd tree", "format": "json"} v 0) v1
debug 2022-03-13T01:06:47.347+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/4118544420' entity='client.admin' cmd=[{"prefix": "osd tree", "format": "json"}]: dispatch
debug 2022-03-13T01:06:47.771+0000 7fb3a9c76700 0 mon.a@0(leader) e3 handle_command mon_command({"prefix": "osd ls", "format": "json"} v 0) v1
debug 2022-03-13T01:06:47.771+0000 7fb3a9c76700 0 log_channel(audit) log [DBG] : from='client.? 10.233.92.240:0/649945549' entity='client.admin' cmd=[{"prefix": "osd ls", "format": "json"}]: dispatch
audit 2022-03-13T01:06:46.485217+0000 mgr.a (mgr.5974105) 8110 : audit [DBG] from='client.6073368 -' entity='client.admin' cmd=[{"prefix": "osd ok-to-stop", "id
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment