我用 ceph 16.2.7 创建了一个集群,并添加了带有集群扩展的 OSD,然后我按照以下步骤从粉碎图中删除了一些 OSD。此外,选项 --all-available-devices 是不受管理的。
在此处输入图像描述 #ceph orch apply osd --all-available-devices --unmanaged=true
#ceph osd out osd.x
#ceph osd down osd.x
#systemctl stop ceph-d813d6b4-6d3c-11ec-a97e-000c2992a0d6@osd.x.service
#ceph osd crush rm osd.x
#ceph osd rm osd.x
#ceph auth rm osd.x
#cd /var/lib/ceph/d813d6b4-6d3c-11ec-a97e-000c2992a0d6/
#rm osd.x -rf
# cd /etc/systemd/system/ceph-d813d6b4-6d3c-11ec-a97e-000c2992a0d6.target.wants/
# rm ceph-d813d6b4-6d3c-11ec-a97e-000c2992a0d6@osd.x.service
#lvremove /dev/ceph-*
但是当我删除与已删除 OSD 相关的 LVM 时。被移除的 OSD 会自动恢复。我不希望这种情况发生。我想在磁盘上手动创建 OSD。谁能给我解释一下?
[root@ceph2-node-01 ~]# ceph orch ls --export --format yaml
service_type: alertmanager
service_name: alertmanager
placement:
count: 3
label: mon
---
service_type: crash
service_name: crash
placement:
host_pattern: '*'
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
count: 3
count_per_host: null
host_pattern: null
hosts:
- !!python/object/new:ceph.deployment.service_spec.HostPlacementSpec
- ceph2-node-02
- ''
- ''
- !!python/object/new:ceph.deployment.service_spec.HostPlacementSpec
- ceph2-node-03
- ''
- ''
label: null
port: null
preview_only: false
service_id: null
service_type: grafana
unmanaged: false
---
service_type: mgr
service_name: mgr
placement:
count: 2
---
service_type: mon
service_name: mon
placement:
count: 5
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
count: null
count_per_host: null
host_pattern: '*'
hosts: []
label: null
port: null
preview_only: false
service_id: null
service_type: node-exporter
unmanaged: false
---
service_type: osd
service_id: all-available-devices
service_name: osd.all-available-devices
placement:
host_pattern: '*'
unmanaged: true
spec:
data_devices:
all: true
filter_logic: AND
objectstore: bluestore
---
service_type: osd
service_id: dashboard-admin-1642344788791
service_name: osd.dashboard-admin-1642344788791
placement:
host_pattern: '*'
spec:
data_devices:
rotational: true
db_devices:
rotational: false
db_slots: 2
filter_logic: AND
objectstore: bluestore
--- !!python/object:ceph.deployment.service_spec.MonitoringSpec
config: null
networks: []
placement: !!python/object:ceph.deployment.service_spec.PlacementSpec
count: 3
count_per_host: null
host_pattern: null
hosts: []
label: mon
port: null
preview_only: false
service_id: null
service_type: prometheus
unmanaged: false