Configurare Ceph come backend per Glance, Cinder e Nova in OpenStack 2025.1.
glance-api.conf
[DEFAULT] enabled_backends = rbd:rbd,http:http,cinder:cinder [glance_store] default_backend = rbd [rbd] rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf rbd_store_chunk_size = 8
cinder.conf
[DEFAULT] enabled_backends = rbd [rbd] image_upload_use_cinder_backend = True image_upload_use_internal_tenant = True rados_connect_timeout = -1 rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = False rbd_max_clone_depth = 5 rbd_pool = volumes rbd_secret_uuid = ad44c82d-e905-4d13-bfad-8fe47567a8e0 rbd_store_chunk_size = 4 rbd_user = cinder report_discard_supported = True volume_backend_name = rbd volume_driver = cinder.volume.drivers.rbd.RBDDriver
Per verificare che le secret corrispondano, si legge la secret per libvirt, che deve corrispondere a quella del keyring definito in ceph per cinder:
root@os-worker-2:~# virsh secret-list UUID Usage ------------------------------------------------------------------- ad44c82d-e905-4d13-bfad-8fe47567a8e0 ceph client.cinder secret root@os-worker-2:~# virsh secret-get-value ad44c82d-e905-4d13-bfad-8fe47567a8e0 AQCg0eRoZJzYJxAAvXBnsXlzg7L5zPMk6U4sUg==
nova.conf
[libvirt]
inject_partition = -2
inject_password = False
inject_key = False
virt_type = kvm
# ceph rbd support
rbd_user = cinder
rbd_secret_uuid = ad44c82d-e905-4d13-bfad-8fe47567a8e0
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
live_migration_with_native_tls = true
live_migration_scheme = tls
live_migration_inbound_addr = 10.224.10.34
hw_disk_discard = unmap
disk_cachemodes = network=writeback