From 9149dc1d7f44b37b0dfa82a0505875a5d36b0087 Mon Sep 17 00:00:00 2001 From: "a.pivkin" Date: Mon, 6 Apr 2026 07:08:17 +0300 Subject: [PATCH] deep_scrub_choked --- .deepsource.toml | 18 + .fuse_hidden0000003200000001 | 0 .fuse_hidden0000010600000002 | 0 .fuse_hidden0000011600000001 | 0 .mergify.yml | 63 + .readthedocs.yaml | 10 + CONTRIBUTING.md | 101 ++ LICENSE | 201 +++ Makefile | 113 ++ README.rst | 10 + Vagrantfile | 605 +++++++ ansible.cfg | 43 + ceph-ansible.spec.in | 62 + contrib/backport_to_stable_branch.sh | 96 + contrib/push-roles-to-ansible-galaxy.sh | 105 ++ contrib/rundep.sample | 44 + contrib/rundep_installer.sh | 27 + contrib/snapshot_vms.sh | 73 + contrib/vagrant_variables.yml.atomic | 30 + contrib/vagrant_variables.yml.linode | 36 + contrib/vagrant_variables.yml.openstack | 49 + dashboard.yml | 148 ++ docs/.gitignore | 1 + docs/Makefile | 20 + docs/source/_static/.empty | 0 docs/source/_templates/.empty | 0 docs/source/conf.py | 155 ++ docs/source/day-2/osds.rst | 51 + docs/source/day-2/purge.rst | 15 + docs/source/day-2/upgrade.rst | 17 + docs/source/dev/index.rst | 102 ++ docs/source/glossary.rst | 9 + docs/source/index.rst | 339 ++++ docs/source/installation/containerized.rst | 64 + docs/source/installation/methods.rst | 12 + .../source/installation/non-containerized.rst | 58 + docs/source/osds/scenarios.rst | 221 +++ docs/source/rbdmirror/index.rst | 60 + docs/source/testing/development.rst | 4 + docs/source/testing/glossary.rst | 14 + docs/source/testing/index.rst | 38 + docs/source/testing/layout.rst | 60 + docs/source/testing/modifying.rst | 4 + docs/source/testing/running.rst | 169 ++ docs/source/testing/scenarios.rst | 211 +++ docs/source/testing/tests.rst | 99 ++ docs/source/testing/tox.rst | 75 + docs/tox.ini | 10 + dummy-ansible-hosts | 4 + generate_group_vars_sample.sh | 78 + group_vars/all.yml | 666 +++++++ group_vars/all.yml copy.sample | 667 +++++++ group_vars/clients.yml.sample | 50 + group_vars/exporters.yml.sample | 33 + group_vars/mdss.yml.sample | 52 + group_vars/mgrs.yml | 65 + group_vars/mgrs.yml.sample | 66 + group_vars/mons.yml | 77 + group_vars/mons.yml.sample | 78 + group_vars/nfss.yml.sample | 131 ++ group_vars/osds.yml | 225 +++ group_vars/osds.yml copy.sample | 227 +++ group_vars/rbdmirrors.yml.sample | 55 + group_vars/rgwloadbalancers.yml.sample | 35 + group_vars/rgws.yml.sample | 106 ++ infrastructure-playbooks/README.md | 7 + infrastructure-playbooks/add-mon.yml | 128 ++ .../backup-and-restore-ceph-files.yml | 109 ++ infrastructure-playbooks/ceph-keys.yml | 74 + infrastructure-playbooks/cephadm-adopt.yml | 1552 +++++++++++++++++ infrastructure-playbooks/cephadm.yml | 383 ++++ infrastructure-playbooks/docker-to-podman.yml | 236 +++ infrastructure-playbooks/gather-ceph-logs.yml | 39 + infrastructure-playbooks/lv-create.yml | 100 ++ infrastructure-playbooks/lv-teardown.yml | 109 ++ infrastructure-playbooks/purge-cluster.yml | 1175 +++++++++++++ .../purge-container-cluster.yml | 1 + infrastructure-playbooks/purge-dashboard.yml | 222 +++ .../rgw-add-users-buckets.yml | 65 + infrastructure-playbooks/rolling_update.yml | 1359 +++++++++++++++ infrastructure-playbooks/shrink-mds.yml | 177 ++ infrastructure-playbooks/shrink-mgr.yml | 138 ++ infrastructure-playbooks/shrink-mon.yml | 151 ++ infrastructure-playbooks/shrink-osd.yml | 379 ++++ infrastructure-playbooks/shrink-rbdmirror.yml | 128 ++ infrastructure-playbooks/shrink-rgw.yml | 141 ++ .../storage-inventory.yml | 30 + ...inerized-to-containerized-ceph-daemons.yml | 808 +++++++++ .../take-over-existing-cluster.yml | 73 + .../untested-by-ci/cluster-maintenance.yml | 39 + .../untested-by-ci/cluster-os-migration.yml | 552 ++++++ .../untested-by-ci/make-osd-partitions.yml | 97 ++ .../untested-by-ci/migrate-journal-to-ssd.yml | 105 ++ .../untested-by-ci/purge-multisite.yml | 11 + ...recover-osds-after-ssd-journal-failure.yml | 115 ++ .../untested-by-ci/replace-osd.yml | 190 ++ .../vars/lv_vars.yaml.sample | 57 + library/__init__.py | 0 .../__pycache__/ceph_config.cpython-314.pyc | Bin 0 -> 6677 bytes .../__pycache__/ceph_crush.cpython-314.pyc | Bin 0 -> 8353 bytes .../ceph_crush_rule_info.cpython-314.pyc | Bin 0 -> 2991 bytes .../ceph_ec_profile.cpython-314.pyc | Bin 0 -> 7263 bytes library/__pycache__/ceph_key.cpython-314.pyc | Bin 0 -> 20192 bytes .../__pycache__/ceph_key_info.cpython-314.pyc | Bin 0 -> 6830 bytes .../ceph_mgr_module.cpython-314.pyc | Bin 0 -> 3222 bytes .../__pycache__/ceph_osd_flag.cpython-314.pyc | Bin 0 -> 3415 bytes library/__pycache__/ceph_pool.cpython-314.pyc | Bin 0 -> 19935 bytes .../__pycache__/ceph_volume.cpython-314.pyc | Bin 0 -> 23951 bytes .../__pycache__/radosgw_user.cpython-314.pyc | Bin 0 -> 19023 bytes library/ceph_add_users_buckets.py | 571 ++++++ library/ceph_authtool.py | 131 ++ library/ceph_config.py | 208 +++ library/ceph_crush.py | 245 +++ library/ceph_crush_rule.py | 256 +++ library/ceph_crush_rule_info.py | 119 ++ library/ceph_dashboard_user.py | 289 +++ library/ceph_ec_profile.py | 256 +++ library/ceph_fs.py | 278 +++ library/ceph_key.py | 683 ++++++++ library/ceph_key_info.py | 265 +++ library/ceph_mgr_module.py | 133 ++ library/ceph_orch_apply.py | 202 +++ library/ceph_osd.py | 146 ++ library/ceph_osd_flag.py | 130 ++ library/ceph_pool.py | 739 ++++++++ library/ceph_volume.py | 732 ++++++++ library/ceph_volume_simple_activate.py | 190 ++ library/ceph_volume_simple_scan.py | 163 ++ library/cephadm_adopt.py | 184 ++ library/cephadm_bootstrap.py | 265 +++ library/radosgw_caps.py | 378 ++++ library/radosgw_realm.py | 339 ++++ library/radosgw_user.py | 581 ++++++ library/radosgw_zone.py | 543 ++++++ library/radosgw_zonegroup.py | 397 +++++ module_utils/__init__.py | 0 module_utils/ca_common.py | 151 ++ .../installer_checkpoint.cpython-310.pyc | Bin 0 -> 3938 bytes .../installer_checkpoint.cpython-314.pyc | Bin 0 -> 5894 bytes plugins/callback/installer_checkpoint.py | 154 ++ plugins/filter/__init__.py | 0 .../__pycache__/dict2dict.cpython-310.pyc | Bin 0 -> 878 bytes .../__pycache__/dict2dict.cpython-314.pyc | Bin 0 -> 1237 bytes .../ipaddrs_in_ranges.cpython-310.pyc | Bin 0 -> 1149 bytes .../ipaddrs_in_ranges.cpython-314.pyc | Bin 0 -> 1643 bytes plugins/filter/dict2dict.py | 23 + plugins/filter/ipaddrs_in_ranges.py | 33 + profiles/rgw-keystone-v2 | 30 + profiles/rgw-keystone-v3 | 31 + profiles/rgw-radosgw-static-website | 11 + profiles/rgw-usage-log | 15 + raw_install_python.yml | 69 + requirements.txt | 3 + requirements.yml | 10 + roles/ceph-client/LICENSE | 201 +++ roles/ceph-client/README.md | 3 + roles/ceph-client/defaults/main.yml | 41 + roles/ceph-client/meta/main.yml | 14 + roles/ceph-client/tasks/create_users_keys.yml | 85 + roles/ceph-client/tasks/main.yml | 10 + roles/ceph-client/tasks/pre_requisite.yml | 28 + roles/ceph-common/LICENSE | 201 +++ roles/ceph-common/README.md | 3 + roles/ceph-common/defaults/main.yml | 1 + roles/ceph-common/files/cephstable.asc | 29 + roles/ceph-common/files/cephstablerhcs.asc | 34 + roles/ceph-common/meta/main.yml | 14 + .../tasks/configure_cluster_name.yml | 52 + .../tasks/configure_memory_allocator.yml | 36 + .../tasks/configure_repository.yml | 32 + .../tasks/create_rbd_client_dir.yml | 12 + ...nfigure_debian_repository_installation.yml | 16 + .../configure_redhat_local_installation.yml | 45 + ...nfigure_redhat_repository_installation.yml | 34 + ...configure_suse_repository_installation.yml | 4 + .../installs/debian_community_repository.yml | 20 + .../installs/debian_custom_repository.yml | 14 + .../tasks/installs/debian_dev_repository.yml | 12 + .../tasks/installs/debian_uca_repository.yml | 12 + .../installs/install_debian_packages.yml | 9 + .../tasks/installs/install_on_clear.yml | 7 + .../tasks/installs/install_on_debian.yml | 13 + .../installs/install_redhat_packages.yml | 23 + .../tasks/installs/install_suse_packages.yml | 14 + .../installs/redhat_community_repository.yml | 41 + .../installs/redhat_custom_repository.yml | 16 + .../tasks/installs/redhat_dev_repository.yml | 25 + .../tasks/installs/suse_obs_repository.yml | 8 + roles/ceph-common/tasks/main.yml | 63 + roles/ceph-common/tasks/selinux.yml | 22 + roles/ceph-common/vars/main.yml | 32 + roles/ceph-config/LICENSE | 201 +++ roles/ceph-config/README.md | 3 + roles/ceph-config/meta/main.yml | 14 + .../tasks/create_ceph_initial_dirs.yml | 25 + roles/ceph-config/tasks/main.yml | 194 +++ .../tasks/rgw_systemd_environment_file.yml | 22 + roles/ceph-config/templates/ceph.conf.j2 | 31 + roles/ceph-container-common/README.md | 3 + roles/ceph-container-common/defaults/main.yml | 1 + roles/ceph-container-common/files/ceph.target | 5 + roles/ceph-container-common/meta/main.yml | 14 + .../tasks/fetch_image.yml | 81 + roles/ceph-container-common/tasks/main.yml | 39 + .../tasks/prerequisites.yml | 52 + .../ceph-container-common/tasks/registry.yml | 11 + roles/ceph-container-common/tasks/release.yml | 50 + roles/ceph-container-engine/README.md | 3 + roles/ceph-container-engine/meta/main.yml | 17 + roles/ceph-container-engine/tasks/main.yml | 4 + .../pre_requisites/debian_prerequisites.yml | 32 + .../tasks/pre_requisites/prerequisites.yml | 77 + .../templates/docker-proxy.conf.j2 | 8 + roles/ceph-container-engine/vars/CentOS-8.yml | 1 + roles/ceph-container-engine/vars/CentOS-9.yml | 3 + roles/ceph-container-engine/vars/Debian.yml | 3 + roles/ceph-container-engine/vars/RedHat-8.yml | 3 + roles/ceph-container-engine/vars/RedHat.yml | 3 + roles/ceph-container-engine/vars/Ubuntu.yml | 3 + roles/ceph-crash/meta/main.yml | 14 + roles/ceph-crash/tasks/main.yml | 67 + roles/ceph-crash/tasks/systemd.yml | 9 + .../templates/ceph-crash.service.j2 | 52 + roles/ceph-dashboard/meta/main.yml | 14 + .../tasks/configure_dashboard.yml | 331 ++++ .../tasks/configure_grafana_layouts.yml | 13 + roles/ceph-dashboard/tasks/main.yml | 8 + roles/ceph-defaults/README.md | 3 + roles/ceph-defaults/defaults/main.yml | 658 +++++++ roles/ceph-defaults/meta/main.yml | 17 + roles/ceph-defaults/tasks/main.yml | 1 + roles/ceph-defaults/vars/main.yml | 3 + roles/ceph-exporter/defaults/main.yml | 24 + roles/ceph-exporter/meta/main.yml | 14 + roles/ceph-exporter/tasks/main.yml | 58 + roles/ceph-exporter/tasks/systemd.yml | 9 + .../templates/ceph-exporter.service.j2 | 50 + roles/ceph-facts/README.md | 3 + roles/ceph-facts/meta/main.yml | 17 + roles/ceph-facts/tasks/container_binary.yml | 10 + roles/ceph-facts/tasks/devices.yml | 81 + roles/ceph-facts/tasks/facts.yml | 250 +++ .../tasks/get_def_crush_rule_name.yml | 17 + roles/ceph-facts/tasks/grafana.yml | 35 + roles/ceph-facts/tasks/main.yml | 3 + .../ceph-facts/tasks/set_monitor_address.yml | 14 + .../ceph-facts/tasks/set_radosgw_address.yml | 80 + roles/ceph-fetch-keys/LICENSE | 201 +++ roles/ceph-fetch-keys/README.md | 3 + roles/ceph-fetch-keys/defaults/main.yml | 10 + roles/ceph-fetch-keys/meta/main.yml | 14 + roles/ceph-fetch-keys/tasks/main.yml | 28 + roles/ceph-grafana/meta/main.yml | 14 + .../ceph-grafana/tasks/configure_grafana.yml | 117 ++ roles/ceph-grafana/tasks/main.yml | 6 + roles/ceph-grafana/tasks/setup_container.yml | 22 + roles/ceph-grafana/tasks/systemd.yml | 8 + .../dashboards-ceph-dashboard.yml.j2 | 12 + .../datasources-ceph-dashboard.yml.j2 | 26 + .../templates/grafana-server.service.j2 | 61 + roles/ceph-grafana/templates/grafana.ini.j2 | 35 + roles/ceph-handler/LICENSE | 201 +++ roles/ceph-handler/README.md | 2 + roles/ceph-handler/handlers/main.yml | 77 + roles/ceph-handler/meta/main.yml | 14 + .../tasks/check_running_cluster.yml | 8 + .../tasks/check_running_containers.yml | 84 + .../tasks/check_socket_non_container.yml | 234 +++ roles/ceph-handler/tasks/handler_crash.yml | 18 + roles/ceph-handler/tasks/handler_exporter.yml | 18 + roles/ceph-handler/tasks/handler_mdss.yml | 28 + roles/ceph-handler/tasks/handler_mgrs.yml | 28 + roles/ceph-handler/tasks/handler_mons.yml | 32 + roles/ceph-handler/tasks/handler_nfss.yml | 28 + roles/ceph-handler/tasks/handler_osds.yml | 122 ++ .../ceph-handler/tasks/handler_rbdmirrors.yml | 28 + roles/ceph-handler/tasks/handler_rgws.yml | 28 + roles/ceph-handler/tasks/main.yml | 61 + .../templates/restart_mds_daemon.sh.j2 | 26 + .../templates/restart_mgr_daemon.sh.j2 | 27 + .../templates/restart_mon_daemon.sh.j2 | 49 + .../templates/restart_nfs_daemon.sh.j2 | 26 + .../templates/restart_osd_daemon.sh.j2 | 82 + .../templates/restart_rbd_mirror_daemon.sh.j2 | 26 + .../templates/restart_rgw_daemon.sh.j2 | 116 ++ roles/ceph-infra/handlers/main.yml | 21 + roles/ceph-infra/meta/main.yml | 14 + roles/ceph-infra/tasks/configure_firewall.yml | 257 +++ roles/ceph-infra/tasks/dashboard_firewall.yml | 70 + roles/ceph-infra/tasks/main.yml | 53 + roles/ceph-infra/tasks/setup_ntp.yml | 68 + roles/ceph-infra/templates/logrotate.conf.j2 | 13 + roles/ceph-mds/LICENSE | 201 +++ roles/ceph-mds/README.md | 3 + roles/ceph-mds/defaults/main.yml | 43 + roles/ceph-mds/files/ceph-mds.target | 9 + roles/ceph-mds/meta/main.yml | 14 + roles/ceph-mds/tasks/common.yml | 65 + roles/ceph-mds/tasks/containerized.yml | 26 + .../ceph-mds/tasks/create_mds_filesystems.yml | 36 + roles/ceph-mds/tasks/main.yml | 17 + roles/ceph-mds/tasks/non_containerized.yml | 48 + roles/ceph-mds/tasks/systemd.yml | 16 + .../templates/ceph-mds.service.d-overrides.j2 | 1 + roles/ceph-mds/templates/ceph-mds.service.j2 | 57 + roles/ceph-mgr/LICENSE | 201 +++ roles/ceph-mgr/README.md | 3 + roles/ceph-mgr/defaults/main.yml | 57 + roles/ceph-mgr/files/ceph-mgr.target | 9 + roles/ceph-mgr/meta/main.yml | 14 + roles/ceph-mgr/tasks/common.yml | 100 ++ roles/ceph-mgr/tasks/main.yml | 26 + roles/ceph-mgr/tasks/mgr_modules.yml | 51 + roles/ceph-mgr/tasks/pre_requisite.yml | 45 + roles/ceph-mgr/tasks/start_mgr.yml | 38 + roles/ceph-mgr/tasks/systemd.yml | 16 + .../templates/ceph-mgr.service.d-overrides.j2 | 1 + roles/ceph-mgr/templates/ceph-mgr.service.j2 | 56 + roles/ceph-mon/LICENSE | 201 +++ roles/ceph-mon/README.md | 3 + roles/ceph-mon/defaults/main.yml | 69 + roles/ceph-mon/files/ceph-mon.target | 8 + roles/ceph-mon/meta/main.yml | 14 + roles/ceph-mon/tasks/ceph_keys.yml | 32 + roles/ceph-mon/tasks/deploy_monitors.yml | 201 +++ roles/ceph-mon/tasks/main.yml | 39 + roles/ceph-mon/tasks/secure_cluster.yml | 15 + roles/ceph-mon/tasks/start_monitor.yml | 33 + roles/ceph-mon/tasks/systemd.yml | 23 + .../templates/ceph-mon.service.d-overrides.j2 | 1 + roles/ceph-mon/templates/ceph-mon.service.j2 | 66 + roles/ceph-nfs/LICENSE | 201 +++ roles/ceph-nfs/README.md | 3 + roles/ceph-nfs/defaults/main.yml | 122 ++ roles/ceph-nfs/meta/main.yml | 14 + roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 23 + roles/ceph-nfs/tasks/main.yml | 96 + .../tasks/pre_requisite_container.yml | 108 ++ .../tasks/pre_requisite_non_container.yml | 96 + .../pre_requisite_non_container_debian.yml | 80 + .../pre_requisite_non_container_red_hat.yml | 43 + roles/ceph-nfs/tasks/start_nfs.yml | 105 ++ roles/ceph-nfs/tasks/systemd.yml | 9 + roles/ceph-nfs/templates/ceph-nfs.service.j2 | 56 + roles/ceph-nfs/templates/ganesha.conf.j2 | 124 ++ roles/ceph-nfs/templates/idmap.conf.j2 | 137 ++ roles/ceph-nfs/templates/systemd-run.j2 | 27 + roles/ceph-node-exporter/meta/main.yml | 14 + roles/ceph-node-exporter/tasks/main.yml | 3 + .../tasks/setup_container.yml | 11 + roles/ceph-node-exporter/tasks/systemd.yml | 8 + .../templates/node_exporter.service.j2 | 51 + roles/ceph-osd/LICENSE | 201 +++ roles/ceph-osd/README.md | 3 + roles/ceph-osd/defaults/main.yml | 218 +++ roles/ceph-osd/files/ceph-osd.target | 9 + roles/ceph-osd/meta/main.yml | 14 + roles/ceph-osd/tasks/common.yml | 46 + roles/ceph-osd/tasks/crush_rules.yml | 98 ++ roles/ceph-osd/tasks/main.yml | 101 ++ roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 21 + roles/ceph-osd/tasks/scenarios/lvm.yml | 21 + roles/ceph-osd/tasks/start_osds.yml | 69 + roles/ceph-osd/tasks/system_tuning.yml | 43 + roles/ceph-osd/tasks/systemd.yml | 23 + .../templates/ceph-osd.service.d-overrides.j2 | 1 + roles/ceph-osd/templates/ceph-osd.service.j2 | 42 + roles/ceph-osd/templates/systemd-run.j2 | 58 + roles/ceph-osd/templates/tmpfiles_hugepage.j2 | 3 + roles/ceph-osd/vars/main.yml | 2 + .../ceph-prometheus/files/ceph_dashboard.yml | 115 ++ roles/ceph-prometheus/handlers/main.yml | 13 + roles/ceph-prometheus/meta/main.yml | 14 + roles/ceph-prometheus/tasks/main.yml | 63 + .../ceph-prometheus/tasks/setup_container.yml | 13 + roles/ceph-prometheus/tasks/systemd.yml | 12 + .../templates/alertmanager.service.j2 | 58 + .../templates/alertmanager.yml.j2 | 20 + .../templates/prometheus.service.j2 | 57 + .../templates/prometheus.yml.j2 | 38 + roles/ceph-rbd-mirror/LICENSE | 201 +++ roles/ceph-rbd-mirror/README.md | 3 + roles/ceph-rbd-mirror/defaults/main.yml | 46 + .../files/ceph-rbd-mirror.target | 7 + roles/ceph-rbd-mirror/meta/main.yml | 14 + .../tasks/configure_mirroring.yml | 170 ++ roles/ceph-rbd-mirror/tasks/main.yml | 53 + .../tasks/start_container_rbd_mirror.yml | 12 + roles/ceph-rbd-mirror/tasks/systemd.yml | 23 + .../ceph-rbd-mirror.service.d-overrides.j2 | 1 + .../templates/ceph-rbd-mirror.service.j2 | 57 + roles/ceph-rgw-loadbalancer/defaults/main.yml | 26 + roles/ceph-rgw-loadbalancer/handlers/main.yml | 10 + roles/ceph-rgw-loadbalancer/meta/main.yml | 13 + roles/ceph-rgw-loadbalancer/tasks/main.yml | 6 + .../tasks/pre_requisite.yml | 50 + .../tasks/start_rgw_loadbalancer.yml | 12 + .../templates/haproxy.cfg.j2 | 63 + .../templates/keepalived.conf.j2 | 35 + roles/ceph-rgw/LICENSE | 201 +++ roles/ceph-rgw/README.md | 3 + roles/ceph-rgw/defaults/main.yml | 97 ++ roles/ceph-rgw/files/ceph-radosgw.target | 9 + roles/ceph-rgw/handlers/main.yml | 6 + roles/ceph-rgw/meta/main.yml | 14 + roles/ceph-rgw/tasks/common.yml | 44 + roles/ceph-rgw/tasks/main.yml | 25 + roles/ceph-rgw/tasks/openstack-keystone.yml | 32 + roles/ceph-rgw/tasks/pre_requisite.yml | 64 + roles/ceph-rgw/tasks/rgw_create_pools.yml | 52 + roles/ceph-rgw/tasks/start_docker_rgw.yml | 12 + roles/ceph-rgw/tasks/start_radosgw.yml | 29 + roles/ceph-rgw/tasks/systemd.yml | 23 + .../templates/ceph-radosgw.service.j2 | 69 + .../templates/ceph-rgw.service.d-overrides.j2 | 1 + roles/ceph-validate/meta/main.yml | 14 + roles/ceph-validate/tasks/check_devices.yml | 115 ++ roles/ceph-validate/tasks/check_eth_rgw.yml | 24 + .../ceph-validate/tasks/check_ipaddr_mon.yml | 5 + roles/ceph-validate/tasks/check_nfs.yml | 15 + roles/ceph-validate/tasks/check_pools.yml | 10 + roles/ceph-validate/tasks/check_rbdmirror.yml | 12 + .../ceph-validate/tasks/check_repository.yml | 19 + roles/ceph-validate/tasks/check_rgw_pools.yml | 29 + roles/ceph-validate/tasks/check_system.yml | 53 + roles/ceph-validate/tasks/main.yml | 215 +++ site-container.yml.sample | 545 ++++++ site.yml | 520 ++++++ site.yml copy.sample | 524 ++++++ test.yml | 16 + tests/README.md | 6 + tests/README.rst | 50 + .../conftest.cpython-314-pytest-8.4.2.pyc | Bin 0 -> 10225 bytes tests/conftest.py | 226 +++ tests/functional/.gitignore | 3 + tests/functional/add-mdss/Vagrantfile | 1 + tests/functional/add-mdss/ceph-override.json | 1 + .../functional/add-mdss/container/Vagrantfile | 1 + .../add-mdss/container/ceph-override.json | 1 + .../add-mdss/container/group_vars/all | 31 + tests/functional/add-mdss/container/hosts | 5 + tests/functional/add-mdss/container/hosts-2 | 8 + .../add-mdss/container/vagrant_variables.yml | 70 + tests/functional/add-mdss/group_vars/all | 26 + tests/functional/add-mdss/hosts | 5 + tests/functional/add-mdss/hosts-2 | 8 + .../functional/add-mdss/vagrant_variables.yml | 70 + tests/functional/add-mgrs/Vagrantfile | 1 + tests/functional/add-mgrs/ceph-override.json | 1 + .../functional/add-mgrs/container/Vagrantfile | 1 + .../add-mgrs/container/ceph-override.json | 1 + .../add-mgrs/container/group_vars/all | 31 + tests/functional/add-mgrs/container/hosts | 5 + tests/functional/add-mgrs/container/hosts-2 | 8 + .../add-mgrs/container/vagrant_variables.yml | 70 + tests/functional/add-mgrs/group_vars/all | 26 + tests/functional/add-mgrs/hosts | 5 + tests/functional/add-mgrs/hosts-2 | 8 + .../functional/add-mgrs/vagrant_variables.yml | 70 + tests/functional/add-mons/Vagrantfile | 1 + tests/functional/add-mons/ceph-override.json | 1 + .../functional/add-mons/container/Vagrantfile | 1 + .../add-mons/container/ceph-override.json | 1 + .../add-mons/container/group_vars/all | 31 + tests/functional/add-mons/container/hosts | 5 + tests/functional/add-mons/container/hosts-2 | 6 + .../add-mons/container/vagrant_variables.yml | 70 + tests/functional/add-mons/group_vars/all | 26 + tests/functional/add-mons/hosts | 5 + tests/functional/add-mons/hosts-2 | 6 + .../functional/add-mons/vagrant_variables.yml | 70 + tests/functional/add-osds/Vagrantfile | 1 + tests/functional/add-osds/ceph-override.json | 1 + .../functional/add-osds/container/Vagrantfile | 1 + .../add-osds/container/ceph-override.json | 1 + .../add-osds/container/group_vars/all | 31 + tests/functional/add-osds/container/hosts | 5 + tests/functional/add-osds/container/hosts-2 | 6 + .../add-osds/container/vagrant_variables.yml | 70 + tests/functional/add-osds/group_vars/all | 26 + tests/functional/add-osds/hosts | 5 + tests/functional/add-osds/hosts-2 | 6 + .../functional/add-osds/vagrant_variables.yml | 70 + tests/functional/add-rbdmirrors/Vagrantfile | 1 + .../add-rbdmirrors/ceph-override.json | 1 + .../add-rbdmirrors/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../add-rbdmirrors/container/group_vars/all | 31 + .../functional/add-rbdmirrors/container/hosts | 5 + .../add-rbdmirrors/container/hosts-2 | 8 + .../container/vagrant_variables.yml | 70 + .../functional/add-rbdmirrors/group_vars/all | 26 + tests/functional/add-rbdmirrors/hosts | 5 + tests/functional/add-rbdmirrors/hosts-2 | 8 + .../add-rbdmirrors/vagrant_variables.yml | 70 + tests/functional/add-rgws/Vagrantfile | 1 + tests/functional/add-rgws/ceph-override.json | 1 + .../functional/add-rgws/container/Vagrantfile | 1 + .../add-rgws/container/ceph-override.json | 1 + .../add-rgws/container/group_vars/all | 33 + tests/functional/add-rgws/container/hosts | 5 + tests/functional/add-rgws/container/hosts-2 | 8 + .../add-rgws/container/vagrant_variables.yml | 70 + tests/functional/add-rgws/group_vars/all | 26 + tests/functional/add-rgws/group_vars/rgws | 9 + tests/functional/add-rgws/hosts | 5 + tests/functional/add-rgws/hosts-2 | 8 + .../functional/add-rgws/vagrant_variables.yml | 70 + tests/functional/all-in-one/Vagrantfile | 1 + .../functional/all-in-one/ceph-override.json | 1 + .../all-in-one/container/Vagrantfile | 1 + .../all-in-one/container/ceph-override.json | 1 + .../all-in-one/container/group_vars/all | 46 + tests/functional/all-in-one/container/hosts | 20 + .../container/vagrant_variables.yml | 60 + tests/functional/all-in-one/group_vars/all | 40 + tests/functional/all-in-one/hosts | 20 + .../all-in-one/vagrant_variables.yml | 56 + tests/functional/all_daemons/Vagrantfile | 1 + .../functional/all_daemons/ceph-override.json | 40 + .../all_daemons/container/Vagrantfile | 1 + .../all_daemons/container/ceph-override.json | 1 + .../all_daemons/container/group_vars/all | 32 + .../all_daemons/container/group_vars/clients | 15 + .../all_daemons/container/group_vars/iscsigws | 2 + .../all_daemons/container/group_vars/mons | 11 + .../all_daemons/container/group_vars/osds | 8 + .../all_daemons/container/group_vars/rgws | 8 + tests/functional/all_daemons/container/hosts | 33 + .../container/vagrant_variables.yml | 60 + tests/functional/all_daemons/group_vars/all | 25 + .../functional/all_daemons/group_vars/clients | 13 + .../all_daemons/group_vars/iscsigws | 2 + tests/functional/all_daemons/group_vars/mons | 11 + tests/functional/all_daemons/group_vars/nfss | 10 + tests/functional/all_daemons/group_vars/osds | 10 + tests/functional/all_daemons/group_vars/rgws | 9 + tests/functional/all_daemons/hosts | 33 + .../all_daemons/hosts-switch-to-containers | 27 + .../all_daemons/vagrant_variables.yml | 73 + tests/functional/all_daemons_ipv6/Vagrantfile | 1 + .../all_daemons_ipv6/ceph-override.json | 40 + .../all_daemons_ipv6/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../all_daemons_ipv6/container/group_vars/all | 33 + .../container/group_vars/clients | 13 + .../container/group_vars/iscsigws | 2 + .../container/group_vars/mons | 11 + .../container/group_vars/osds | 8 + .../container/group_vars/rgws | 8 + .../all_daemons_ipv6/container/hosts | 33 + .../container/vagrant_variables.yml | 60 + .../all_daemons_ipv6/group_vars/all | 25 + .../all_daemons_ipv6/group_vars/clients | 13 + .../all_daemons_ipv6/group_vars/iscsigws | 2 + .../all_daemons_ipv6/group_vars/mons | 11 + .../all_daemons_ipv6/group_vars/nfss | 10 + .../all_daemons_ipv6/group_vars/osds | 10 + .../all_daemons_ipv6/group_vars/rgws | 9 + tests/functional/all_daemons_ipv6/hosts | 33 + .../all_daemons_ipv6/vagrant_variables.yml | 73 + tests/functional/cephadm/Vagrantfile | 1 + tests/functional/cephadm/group_vars/all | 8 + tests/functional/cephadm/hosts | 28 + .../functional/cephadm/vagrant_variables.yml | 32 + tests/functional/collect-logs.yml | 91 + tests/functional/collocation/Vagrantfile | 1 + .../functional/collocation/ceph-override.json | 1 + .../collocation/container/Vagrantfile | 1 + .../collocation/container/ceph-override.json | 1 + .../collocation/container/group_vars/all | 33 + .../collocation/container/group_vars/clients | 11 + .../collocation/container/group_vars/osds | 9 + .../collocation/container/group_vars/rgws | 7 + tests/functional/collocation/container/hosts | 27 + .../container/vagrant_variables.yml | 60 + tests/functional/collocation/group_vars/all | 28 + .../functional/collocation/group_vars/clients | 11 + tests/functional/collocation/group_vars/osds | 9 + tests/functional/collocation/group_vars/rgws | 7 + tests/functional/collocation/hosts | 28 + .../collocation/vagrant_variables.yml | 56 + tests/functional/dev_setup.yml | 38 + tests/functional/docker2podman/Vagrantfile | 1 + .../docker2podman/ceph-override.json | 37 + tests/functional/docker2podman/group_vars/all | 29 + .../docker2podman/group_vars/clients | 12 + .../docker2podman/group_vars/iscsigws | 2 + .../functional/docker2podman/group_vars/mons | 11 + .../functional/docker2podman/group_vars/osds | 9 + .../functional/docker2podman/group_vars/rgws | 7 + tests/functional/docker2podman/hosts | 11 + .../docker2podman/vagrant_variables.yml | 32 + tests/functional/external_clients/Vagrantfile | 1 + .../external_clients/ceph-override.json | 1 + .../external_clients/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../inventory/external_clients-hosts | 3 + .../container/inventory/group_vars/all | 40 + .../container/inventory/group_vars/clients | 10 + .../container/inventory/hosts | 7 + .../container/vagrant_variables.yml | 60 + .../inventory/external_clients-hosts | 3 + .../external_clients/inventory/group_vars/all | 34 + .../inventory/group_vars/clients | 10 + .../external_clients/inventory/hosts | 7 + .../external_clients/vagrant_variables.yml | 56 + .../functional/external_clients_admin_key.yml | 27 + tests/functional/infra_lv_create/Vagrantfile | 1 + .../functional/infra_lv_create/group_vars/all | 33 + tests/functional/infra_lv_create/hosts | 2 + .../infra_lv_create/vagrant_variables.yml | 70 + .../functional/lvm-auto-discovery/Vagrantfile | 1 + .../lvm-auto-discovery/ceph-override.json | 1 + .../lvm-auto-discovery/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../container/group_vars/all | 31 + .../lvm-auto-discovery/container/hosts | 5 + .../container/vagrant_variables.yml | 70 + .../lvm-auto-discovery/group_vars/all | 22 + tests/functional/lvm-auto-discovery/hosts | 5 + .../lvm-auto-discovery/vagrant_variables.yml | 70 + tests/functional/lvm-batch/Vagrantfile | 1 + tests/functional/lvm-batch/ceph-override.json | 1 + .../lvm-batch/container/Vagrantfile | 1 + .../lvm-batch/container/ceph-override.json | 1 + .../lvm-batch/container/group_vars/all | 31 + tests/functional/lvm-batch/container/hosts | 5 + .../lvm-batch/container/vagrant_variables.yml | 70 + tests/functional/lvm-batch/group_vars/all | 23 + tests/functional/lvm-batch/hosts | 5 + .../lvm-batch/vagrant_variables.yml | 70 + tests/functional/lvm-osds/Vagrantfile | 1 + tests/functional/lvm-osds/ceph-override.json | 1 + .../functional/lvm-osds/container/Vagrantfile | 1 + .../lvm-osds/container/ceph-override.json | 1 + .../lvm-osds/container/group_vars/all | 26 + tests/functional/lvm-osds/container/hosts | 8 + .../lvm-osds/container/vagrant_variables.yml | 70 + tests/functional/lvm-osds/group_vars/all | 19 + tests/functional/lvm-osds/hosts | 8 + .../functional/lvm-osds/vagrant_variables.yml | 70 + tests/functional/lvm_setup.yml | 83 + .../Vagrantfile | 1 + .../group_vars/all | 25 + .../migrate_ceph_disk_to_ceph_volume/hosts | 10 + .../vagrant_variables.yml | 73 + tests/functional/podman/Vagrantfile | 1 + tests/functional/podman/ceph-override.json | 1 + tests/functional/podman/group_vars/all | 28 + tests/functional/podman/group_vars/clients | 12 + tests/functional/podman/group_vars/iscsigws | 2 + tests/functional/podman/group_vars/mons | 11 + tests/functional/podman/group_vars/osds | 9 + tests/functional/podman/group_vars/rgws | 7 + tests/functional/podman/hosts | 30 + tests/functional/podman/vagrant_variables.yml | 32 + tests/functional/rbd_map_devices.yml | 58 + tests/functional/rbdmirror.yml | 32 + tests/functional/rbdmirror/Vagrantfile | 1 + .../rbdmirror/container/Vagrantfile | 1 + .../rbdmirror/container/group_vars/all | 31 + tests/functional/rbdmirror/container/hosts | 11 + .../rbdmirror/container/secondary/Vagrantfile | 1 + .../container/secondary/group_vars/all | 31 + .../rbdmirror/container/secondary/hosts | 12 + .../container/secondary/vagrant_variables.yml | 70 + .../rbdmirror/container/vagrant_variables.yml | 70 + tests/functional/rbdmirror/group_vars/all | 26 + tests/functional/rbdmirror/hosts | 12 + .../rbdmirror/secondary/Vagrantfile | 1 + .../rbdmirror/secondary/group_vars/all | 26 + tests/functional/rbdmirror/secondary/hosts | 12 + .../rbdmirror/secondary/vagrant_variables.yml | 70 + .../rbdmirror/vagrant_variables.yml | 70 + tests/functional/reboot.yml | 9 + tests/functional/setup.yml | 104 ++ tests/functional/shrink_mds/Vagrantfile | 1 + .../functional/shrink_mds/ceph-override.json | 1 + .../shrink_mds/container/Vagrantfile | 1 + .../shrink_mds/container/ceph-override.json | 1 + .../shrink_mds/container/group_vars/all | 19 + .../shrink_mds/container/group_vars/mons | 3 + .../shrink_mds/container/group_vars/osds | 9 + tests/functional/shrink_mds/container/hosts | 8 + .../container/vagrant_variables.yml | 60 + tests/functional/shrink_mds/group_vars/all | 15 + tests/functional/shrink_mds/group_vars/mons | 3 + tests/functional/shrink_mds/group_vars/osds | 11 + tests/functional/shrink_mds/hosts | 8 + .../shrink_mds/vagrant_variables.yml | 65 + tests/functional/shrink_mgr/Vagrantfile | 1 + .../functional/shrink_mgr/ceph-override.json | 1 + .../shrink_mgr/container/Vagrantfile | 1 + .../shrink_mgr/container/ceph-override.json | 1 + .../shrink_mgr/container/group_vars/all | 18 + .../shrink_mgr/container/group_vars/mons | 3 + .../shrink_mgr/container/group_vars/osds | 9 + tests/functional/shrink_mgr/container/hosts | 9 + .../container/vagrant_variables.yml | 60 + tests/functional/shrink_mgr/group_vars/all | 12 + tests/functional/shrink_mgr/group_vars/mons | 3 + tests/functional/shrink_mgr/group_vars/osds | 11 + tests/functional/shrink_mgr/hosts | 9 + .../shrink_mgr/vagrant_variables.yml | 73 + tests/functional/shrink_mon/Vagrantfile | 1 + .../functional/shrink_mon/ceph-override.json | 1 + .../shrink_mon/container/Vagrantfile | 1 + .../shrink_mon/container/ceph-override.json | 1 + .../shrink_mon/container/group_vars/all | 18 + .../shrink_mon/container/group_vars/mons | 3 + .../shrink_mon/container/group_vars/osds | 9 + tests/functional/shrink_mon/container/hosts | 7 + .../container/vagrant_variables.yml | 60 + tests/functional/shrink_mon/group_vars/all | 11 + tests/functional/shrink_mon/group_vars/mons | 3 + tests/functional/shrink_mon/group_vars/osds | 11 + tests/functional/shrink_mon/hosts | 7 + .../shrink_mon/hosts-switch-to-containers | 19 + .../shrink_mon/vagrant_variables.yml | 73 + tests/functional/shrink_osd/Vagrantfile | 1 + .../functional/shrink_osd/ceph-override.json | 1 + .../shrink_osd/container/Vagrantfile | 1 + .../shrink_osd/container/ceph-override.json | 1 + .../shrink_osd/container/group_vars/all | 19 + tests/functional/shrink_osd/container/hosts | 6 + .../container/vagrant_variables.yml | 60 + tests/functional/shrink_osd/group_vars/all | 10 + tests/functional/shrink_osd/group_vars/osds | 3 + tests/functional/shrink_osd/hosts | 6 + .../shrink_osd/vagrant_variables.yml | 73 + tests/functional/shrink_rbdmirror/Vagrantfile | 1 + .../shrink_rbdmirror/ceph-override.json | 1 + .../shrink_rbdmirror/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../shrink_rbdmirror/container/group_vars/all | 18 + .../container/group_vars/mons | 3 + .../container/group_vars/osds | 9 + .../shrink_rbdmirror/container/hosts | 8 + .../container/vagrant_variables.yml | 60 + .../shrink_rbdmirror/group_vars/all | 13 + .../shrink_rbdmirror/group_vars/mons | 3 + .../shrink_rbdmirror/group_vars/osds | 11 + tests/functional/shrink_rbdmirror/hosts | 8 + .../shrink_rbdmirror/vagrant_variables.yml | 73 + tests/functional/shrink_rgw/Vagrantfile | 1 + .../functional/shrink_rgw/ceph-override.json | 1 + .../shrink_rgw/container/Vagrantfile | 1 + .../shrink_rgw/container/ceph-override.json | 1 + .../shrink_rgw/container/group_vars/all | 20 + .../shrink_rgw/container/group_vars/mons | 3 + .../shrink_rgw/container/group_vars/osds | 9 + .../shrink_rgw/container/group_vars/rgws | 10 + tests/functional/shrink_rgw/container/hosts | 8 + .../container/vagrant_variables.yml | 60 + tests/functional/shrink_rgw/group_vars/all | 14 + tests/functional/shrink_rgw/group_vars/mons | 3 + tests/functional/shrink_rgw/group_vars/osds | 11 + tests/functional/shrink_rgw/group_vars/rgws | 10 + tests/functional/shrink_rgw/hosts | 8 + .../shrink_rgw/vagrant_variables.yml | 73 + tests/functional/subset_update/Vagrantfile | 1 + .../subset_update/ceph-override.json | 15 + .../subset_update/container/Vagrantfile | 1 + .../container/ceph-override.json | 1 + .../subset_update/container/group_vars/all | 34 + .../container/group_vars/clients | 13 + .../container/group_vars/iscsigws | 2 + .../subset_update/container/group_vars/mons | 3 + .../subset_update/container/group_vars/osds | 6 + .../subset_update/container/group_vars/rgws | 8 + .../functional/subset_update/container/hosts | 17 + .../container/vagrant_variables.yml | 60 + tests/functional/subset_update/group_vars/all | 26 + .../subset_update/group_vars/clients | 13 + .../subset_update/group_vars/iscsigws | 2 + .../functional/subset_update/group_vars/mons | 3 + .../functional/subset_update/group_vars/nfss | 10 + .../functional/subset_update/group_vars/osds | 8 + .../functional/subset_update/group_vars/rgws | 9 + tests/functional/subset_update/hosts | 18 + .../subset_update/vagrant_variables.yml | 73 + tests/functional/tests/__init__.py | 0 .../functional/tests/grafana/test_grafana.py | 26 + tests/functional/tests/mds/__init__.py | 0 tests/functional/tests/mds/test_mds.py | 25 + tests/functional/tests/mgr/__init__.py | 0 tests/functional/tests/mgr/test_mgr.py | 39 + tests/functional/tests/mon/__init__.py | 0 tests/functional/tests/mon/test_mons.py | 29 + .../functional/tests/nfs/test_nfs_ganesha.py | 48 + .../tests/node-exporter/test_node_exporter.py | 14 + tests/functional/tests/osd/__init__.py | 0 tests/functional/tests/osd/test_osds.py | 81 + tests/functional/tests/rbd-mirror/__init__.py | 0 .../tests/rbd-mirror/test_rbd_mirror.py | 33 + tests/functional/tests/rgw/__init__.py | 0 tests/functional/tests/rgw/test_rgw.py | 47 + tests/functional/tests/test_install.py | 56 + tests/inventories/single-machine.yml | 11 + tests/library/ca_test_common.py | 29 + tests/library/test_ceph_crush.py | 112 ++ tests/library/test_ceph_crush_rule.py | 439 +++++ tests/library/test_ceph_dashboard_user.py | 170 ++ tests/library/test_ceph_ec_profile.py | 243 +++ tests/library/test_ceph_fs.py | 107 ++ tests/library/test_ceph_key.py | 589 +++++++ tests/library/test_ceph_mgr_module.py | 162 ++ tests/library/test_ceph_osd.py | 244 +++ tests/library/test_ceph_osd_flag.py | 156 ++ tests/library/test_ceph_pool.py | 763 ++++++++ tests/library/test_ceph_volume.py | 480 +++++ .../test_ceph_volume_simple_activate.py | 174 ++ tests/library/test_ceph_volume_simple_scan.py | 166 ++ tests/library/test_cephadm_adopt.py | 208 +++ tests/library/test_cephadm_bootstrap.py | 304 ++++ tests/library/test_radosgw_caps.py | 101 ++ tests/library/test_radosgw_realm.py | 125 ++ tests/library/test_radosgw_user.py | 205 +++ tests/library/test_radosgw_zone.py | 213 +++ tests/library/test_radosgw_zonegroup.py | 144 ++ tests/module_utils/test_ca_common.py | 144 ++ .../plugins/filter/test_ipaddrs_in_ranges.py | 63 + tests/pytest.ini | 18 + tests/requirements.txt | 11 + tests/scripts/generate_ssh_config.sh | 32 + tests/scripts/vagrant_up.sh | 28 + tests/scripts/workflows/defaults.sh | 34 + tests/scripts/workflows/signed-off.sh | 9 + tox-cephadm.ini | 43 + tox-docker2podman.ini | 53 + tox-external_clients.ini | 85 + tox-podman.ini | 60 + tox-rbdmirror.ini | 97 ++ tox-shrink_osd.ini | 121 ++ tox-subset_update.ini | 123 ++ tox-update.ini | 81 + tox.ini | 400 +++++ vagrant.yaml | 32 + vagrant_variables.yml.sample | 65 + 840 files changed, 52471 insertions(+) create mode 100644 .deepsource.toml create mode 100644 .fuse_hidden0000003200000001 create mode 100644 .fuse_hidden0000010600000002 create mode 100644 .fuse_hidden0000011600000001 create mode 100644 .mergify.yml create mode 100644 .readthedocs.yaml create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.rst create mode 100644 Vagrantfile create mode 100644 ansible.cfg create mode 100644 ceph-ansible.spec.in create mode 100755 contrib/backport_to_stable_branch.sh create mode 100755 contrib/push-roles-to-ansible-galaxy.sh create mode 100644 contrib/rundep.sample create mode 100755 contrib/rundep_installer.sh create mode 100644 contrib/snapshot_vms.sh create mode 100644 contrib/vagrant_variables.yml.atomic create mode 100644 contrib/vagrant_variables.yml.linode create mode 100644 contrib/vagrant_variables.yml.openstack create mode 100644 dashboard.yml create mode 100644 docs/.gitignore create mode 100644 docs/Makefile create mode 100644 docs/source/_static/.empty create mode 100644 docs/source/_templates/.empty create mode 100644 docs/source/conf.py create mode 100644 docs/source/day-2/osds.rst create mode 100644 docs/source/day-2/purge.rst create mode 100644 docs/source/day-2/upgrade.rst create mode 100644 docs/source/dev/index.rst create mode 100644 docs/source/glossary.rst create mode 100644 docs/source/index.rst create mode 100644 docs/source/installation/containerized.rst create mode 100644 docs/source/installation/methods.rst create mode 100644 docs/source/installation/non-containerized.rst create mode 100644 docs/source/osds/scenarios.rst create mode 100644 docs/source/rbdmirror/index.rst create mode 100644 docs/source/testing/development.rst create mode 100644 docs/source/testing/glossary.rst create mode 100644 docs/source/testing/index.rst create mode 100644 docs/source/testing/layout.rst create mode 100644 docs/source/testing/modifying.rst create mode 100644 docs/source/testing/running.rst create mode 100644 docs/source/testing/scenarios.rst create mode 100644 docs/source/testing/tests.rst create mode 100644 docs/source/testing/tox.rst create mode 100644 docs/tox.ini create mode 100644 dummy-ansible-hosts create mode 100755 generate_group_vars_sample.sh create mode 100644 group_vars/all.yml create mode 100644 group_vars/all.yml copy.sample create mode 100644 group_vars/clients.yml.sample create mode 100644 group_vars/exporters.yml.sample create mode 100644 group_vars/mdss.yml.sample create mode 100644 group_vars/mgrs.yml create mode 100644 group_vars/mgrs.yml.sample create mode 100644 group_vars/mons.yml create mode 100644 group_vars/mons.yml.sample create mode 100644 group_vars/nfss.yml.sample create mode 100644 group_vars/osds.yml create mode 100644 group_vars/osds.yml copy.sample create mode 100644 group_vars/rbdmirrors.yml.sample create mode 100644 group_vars/rgwloadbalancers.yml.sample create mode 100644 group_vars/rgws.yml.sample create mode 100644 infrastructure-playbooks/README.md create mode 100644 infrastructure-playbooks/add-mon.yml create mode 100644 infrastructure-playbooks/backup-and-restore-ceph-files.yml create mode 100644 infrastructure-playbooks/ceph-keys.yml create mode 100644 infrastructure-playbooks/cephadm-adopt.yml create mode 100644 infrastructure-playbooks/cephadm.yml create mode 100644 infrastructure-playbooks/docker-to-podman.yml create mode 100644 infrastructure-playbooks/gather-ceph-logs.yml create mode 100644 infrastructure-playbooks/lv-create.yml create mode 100644 infrastructure-playbooks/lv-teardown.yml create mode 100644 infrastructure-playbooks/purge-cluster.yml create mode 120000 infrastructure-playbooks/purge-container-cluster.yml create mode 100644 infrastructure-playbooks/purge-dashboard.yml create mode 100644 infrastructure-playbooks/rgw-add-users-buckets.yml create mode 100644 infrastructure-playbooks/rolling_update.yml create mode 100644 infrastructure-playbooks/shrink-mds.yml create mode 100644 infrastructure-playbooks/shrink-mgr.yml create mode 100644 infrastructure-playbooks/shrink-mon.yml create mode 100644 infrastructure-playbooks/shrink-osd.yml create mode 100644 infrastructure-playbooks/shrink-rbdmirror.yml create mode 100644 infrastructure-playbooks/shrink-rgw.yml create mode 100644 infrastructure-playbooks/storage-inventory.yml create mode 100644 infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml create mode 100644 infrastructure-playbooks/take-over-existing-cluster.yml create mode 100644 infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml create mode 100644 infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml create mode 100644 infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml create mode 100644 infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml create mode 100644 infrastructure-playbooks/untested-by-ci/purge-multisite.yml create mode 100644 infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml create mode 100644 infrastructure-playbooks/untested-by-ci/replace-osd.yml create mode 100644 infrastructure-playbooks/vars/lv_vars.yaml.sample create mode 100644 library/__init__.py create mode 100644 library/__pycache__/ceph_config.cpython-314.pyc create mode 100644 library/__pycache__/ceph_crush.cpython-314.pyc create mode 100644 library/__pycache__/ceph_crush_rule_info.cpython-314.pyc create mode 100644 library/__pycache__/ceph_ec_profile.cpython-314.pyc create mode 100644 library/__pycache__/ceph_key.cpython-314.pyc create mode 100644 library/__pycache__/ceph_key_info.cpython-314.pyc create mode 100644 library/__pycache__/ceph_mgr_module.cpython-314.pyc create mode 100644 library/__pycache__/ceph_osd_flag.cpython-314.pyc create mode 100644 library/__pycache__/ceph_pool.cpython-314.pyc create mode 100644 library/__pycache__/ceph_volume.cpython-314.pyc create mode 100644 library/__pycache__/radosgw_user.cpython-314.pyc create mode 100644 library/ceph_add_users_buckets.py create mode 100644 library/ceph_authtool.py create mode 100644 library/ceph_config.py create mode 100644 library/ceph_crush.py create mode 100644 library/ceph_crush_rule.py create mode 100644 library/ceph_crush_rule_info.py create mode 100644 library/ceph_dashboard_user.py create mode 100644 library/ceph_ec_profile.py create mode 100644 library/ceph_fs.py create mode 100644 library/ceph_key.py create mode 100644 library/ceph_key_info.py create mode 100644 library/ceph_mgr_module.py create mode 100644 library/ceph_orch_apply.py create mode 100644 library/ceph_osd.py create mode 100644 library/ceph_osd_flag.py create mode 100644 library/ceph_pool.py create mode 100644 library/ceph_volume.py create mode 100644 library/ceph_volume_simple_activate.py create mode 100644 library/ceph_volume_simple_scan.py create mode 100644 library/cephadm_adopt.py create mode 100644 library/cephadm_bootstrap.py create mode 100644 library/radosgw_caps.py create mode 100644 library/radosgw_realm.py create mode 100644 library/radosgw_user.py create mode 100644 library/radosgw_zone.py create mode 100644 library/radosgw_zonegroup.py create mode 100644 module_utils/__init__.py create mode 100644 module_utils/ca_common.py create mode 100644 plugins/callback/__pycache__/installer_checkpoint.cpython-310.pyc create mode 100644 plugins/callback/__pycache__/installer_checkpoint.cpython-314.pyc create mode 100644 plugins/callback/installer_checkpoint.py create mode 100644 plugins/filter/__init__.py create mode 100644 plugins/filter/__pycache__/dict2dict.cpython-310.pyc create mode 100644 plugins/filter/__pycache__/dict2dict.cpython-314.pyc create mode 100644 plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-310.pyc create mode 100644 plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-314.pyc create mode 100644 plugins/filter/dict2dict.py create mode 100644 plugins/filter/ipaddrs_in_ranges.py create mode 100644 profiles/rgw-keystone-v2 create mode 100644 profiles/rgw-keystone-v3 create mode 100644 profiles/rgw-radosgw-static-website create mode 100644 profiles/rgw-usage-log create mode 100644 raw_install_python.yml create mode 100644 requirements.txt create mode 100644 requirements.yml create mode 100644 roles/ceph-client/LICENSE create mode 100644 roles/ceph-client/README.md create mode 100644 roles/ceph-client/defaults/main.yml create mode 100644 roles/ceph-client/meta/main.yml create mode 100644 roles/ceph-client/tasks/create_users_keys.yml create mode 100644 roles/ceph-client/tasks/main.yml create mode 100644 roles/ceph-client/tasks/pre_requisite.yml create mode 100644 roles/ceph-common/LICENSE create mode 100644 roles/ceph-common/README.md create mode 100644 roles/ceph-common/defaults/main.yml create mode 100644 roles/ceph-common/files/cephstable.asc create mode 100644 roles/ceph-common/files/cephstablerhcs.asc create mode 100644 roles/ceph-common/meta/main.yml create mode 100644 roles/ceph-common/tasks/configure_cluster_name.yml create mode 100644 roles/ceph-common/tasks/configure_memory_allocator.yml create mode 100644 roles/ceph-common/tasks/configure_repository.yml create mode 100644 roles/ceph-common/tasks/create_rbd_client_dir.yml create mode 100644 roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml create mode 100644 roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml create mode 100644 roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml create mode 100644 roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml create mode 100644 roles/ceph-common/tasks/installs/debian_community_repository.yml create mode 100644 roles/ceph-common/tasks/installs/debian_custom_repository.yml create mode 100644 roles/ceph-common/tasks/installs/debian_dev_repository.yml create mode 100644 roles/ceph-common/tasks/installs/debian_uca_repository.yml create mode 100644 roles/ceph-common/tasks/installs/install_debian_packages.yml create mode 100644 roles/ceph-common/tasks/installs/install_on_clear.yml create mode 100644 roles/ceph-common/tasks/installs/install_on_debian.yml create mode 100644 roles/ceph-common/tasks/installs/install_redhat_packages.yml create mode 100644 roles/ceph-common/tasks/installs/install_suse_packages.yml create mode 100644 roles/ceph-common/tasks/installs/redhat_community_repository.yml create mode 100644 roles/ceph-common/tasks/installs/redhat_custom_repository.yml create mode 100644 roles/ceph-common/tasks/installs/redhat_dev_repository.yml create mode 100644 roles/ceph-common/tasks/installs/suse_obs_repository.yml create mode 100644 roles/ceph-common/tasks/main.yml create mode 100644 roles/ceph-common/tasks/selinux.yml create mode 100644 roles/ceph-common/vars/main.yml create mode 100644 roles/ceph-config/LICENSE create mode 100644 roles/ceph-config/README.md create mode 100644 roles/ceph-config/meta/main.yml create mode 100644 roles/ceph-config/tasks/create_ceph_initial_dirs.yml create mode 100644 roles/ceph-config/tasks/main.yml create mode 100644 roles/ceph-config/tasks/rgw_systemd_environment_file.yml create mode 100644 roles/ceph-config/templates/ceph.conf.j2 create mode 100644 roles/ceph-container-common/README.md create mode 100644 roles/ceph-container-common/defaults/main.yml create mode 100644 roles/ceph-container-common/files/ceph.target create mode 100644 roles/ceph-container-common/meta/main.yml create mode 100644 roles/ceph-container-common/tasks/fetch_image.yml create mode 100644 roles/ceph-container-common/tasks/main.yml create mode 100644 roles/ceph-container-common/tasks/prerequisites.yml create mode 100644 roles/ceph-container-common/tasks/registry.yml create mode 100644 roles/ceph-container-common/tasks/release.yml create mode 100644 roles/ceph-container-engine/README.md create mode 100644 roles/ceph-container-engine/meta/main.yml create mode 100644 roles/ceph-container-engine/tasks/main.yml create mode 100644 roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml create mode 100644 roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml create mode 100644 roles/ceph-container-engine/templates/docker-proxy.conf.j2 create mode 120000 roles/ceph-container-engine/vars/CentOS-8.yml create mode 100644 roles/ceph-container-engine/vars/CentOS-9.yml create mode 100644 roles/ceph-container-engine/vars/Debian.yml create mode 100644 roles/ceph-container-engine/vars/RedHat-8.yml create mode 100644 roles/ceph-container-engine/vars/RedHat.yml create mode 100644 roles/ceph-container-engine/vars/Ubuntu.yml create mode 100644 roles/ceph-crash/meta/main.yml create mode 100644 roles/ceph-crash/tasks/main.yml create mode 100644 roles/ceph-crash/tasks/systemd.yml create mode 100644 roles/ceph-crash/templates/ceph-crash.service.j2 create mode 100644 roles/ceph-dashboard/meta/main.yml create mode 100644 roles/ceph-dashboard/tasks/configure_dashboard.yml create mode 100644 roles/ceph-dashboard/tasks/configure_grafana_layouts.yml create mode 100644 roles/ceph-dashboard/tasks/main.yml create mode 100644 roles/ceph-defaults/README.md create mode 100644 roles/ceph-defaults/defaults/main.yml create mode 100644 roles/ceph-defaults/meta/main.yml create mode 100644 roles/ceph-defaults/tasks/main.yml create mode 100644 roles/ceph-defaults/vars/main.yml create mode 100644 roles/ceph-exporter/defaults/main.yml create mode 100644 roles/ceph-exporter/meta/main.yml create mode 100644 roles/ceph-exporter/tasks/main.yml create mode 100644 roles/ceph-exporter/tasks/systemd.yml create mode 100644 roles/ceph-exporter/templates/ceph-exporter.service.j2 create mode 100644 roles/ceph-facts/README.md create mode 100644 roles/ceph-facts/meta/main.yml create mode 100644 roles/ceph-facts/tasks/container_binary.yml create mode 100644 roles/ceph-facts/tasks/devices.yml create mode 100644 roles/ceph-facts/tasks/facts.yml create mode 100644 roles/ceph-facts/tasks/get_def_crush_rule_name.yml create mode 100644 roles/ceph-facts/tasks/grafana.yml create mode 100644 roles/ceph-facts/tasks/main.yml create mode 100644 roles/ceph-facts/tasks/set_monitor_address.yml create mode 100644 roles/ceph-facts/tasks/set_radosgw_address.yml create mode 100644 roles/ceph-fetch-keys/LICENSE create mode 100644 roles/ceph-fetch-keys/README.md create mode 100644 roles/ceph-fetch-keys/defaults/main.yml create mode 100644 roles/ceph-fetch-keys/meta/main.yml create mode 100644 roles/ceph-fetch-keys/tasks/main.yml create mode 100644 roles/ceph-grafana/meta/main.yml create mode 100644 roles/ceph-grafana/tasks/configure_grafana.yml create mode 100644 roles/ceph-grafana/tasks/main.yml create mode 100644 roles/ceph-grafana/tasks/setup_container.yml create mode 100644 roles/ceph-grafana/tasks/systemd.yml create mode 100644 roles/ceph-grafana/templates/dashboards-ceph-dashboard.yml.j2 create mode 100644 roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2 create mode 100644 roles/ceph-grafana/templates/grafana-server.service.j2 create mode 100644 roles/ceph-grafana/templates/grafana.ini.j2 create mode 100644 roles/ceph-handler/LICENSE create mode 100644 roles/ceph-handler/README.md create mode 100644 roles/ceph-handler/handlers/main.yml create mode 100644 roles/ceph-handler/meta/main.yml create mode 100644 roles/ceph-handler/tasks/check_running_cluster.yml create mode 100644 roles/ceph-handler/tasks/check_running_containers.yml create mode 100644 roles/ceph-handler/tasks/check_socket_non_container.yml create mode 100644 roles/ceph-handler/tasks/handler_crash.yml create mode 100644 roles/ceph-handler/tasks/handler_exporter.yml create mode 100644 roles/ceph-handler/tasks/handler_mdss.yml create mode 100644 roles/ceph-handler/tasks/handler_mgrs.yml create mode 100644 roles/ceph-handler/tasks/handler_mons.yml create mode 100644 roles/ceph-handler/tasks/handler_nfss.yml create mode 100644 roles/ceph-handler/tasks/handler_osds.yml create mode 100644 roles/ceph-handler/tasks/handler_rbdmirrors.yml create mode 100644 roles/ceph-handler/tasks/handler_rgws.yml create mode 100644 roles/ceph-handler/tasks/main.yml create mode 100644 roles/ceph-handler/templates/restart_mds_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_mon_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_osd_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 create mode 100644 roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 create mode 100644 roles/ceph-infra/handlers/main.yml create mode 100644 roles/ceph-infra/meta/main.yml create mode 100644 roles/ceph-infra/tasks/configure_firewall.yml create mode 100644 roles/ceph-infra/tasks/dashboard_firewall.yml create mode 100644 roles/ceph-infra/tasks/main.yml create mode 100644 roles/ceph-infra/tasks/setup_ntp.yml create mode 100644 roles/ceph-infra/templates/logrotate.conf.j2 create mode 100644 roles/ceph-mds/LICENSE create mode 100644 roles/ceph-mds/README.md create mode 100644 roles/ceph-mds/defaults/main.yml create mode 100644 roles/ceph-mds/files/ceph-mds.target create mode 100644 roles/ceph-mds/meta/main.yml create mode 100644 roles/ceph-mds/tasks/common.yml create mode 100644 roles/ceph-mds/tasks/containerized.yml create mode 100644 roles/ceph-mds/tasks/create_mds_filesystems.yml create mode 100644 roles/ceph-mds/tasks/main.yml create mode 100644 roles/ceph-mds/tasks/non_containerized.yml create mode 100644 roles/ceph-mds/tasks/systemd.yml create mode 100644 roles/ceph-mds/templates/ceph-mds.service.d-overrides.j2 create mode 100644 roles/ceph-mds/templates/ceph-mds.service.j2 create mode 100644 roles/ceph-mgr/LICENSE create mode 100644 roles/ceph-mgr/README.md create mode 100644 roles/ceph-mgr/defaults/main.yml create mode 100644 roles/ceph-mgr/files/ceph-mgr.target create mode 100644 roles/ceph-mgr/meta/main.yml create mode 100644 roles/ceph-mgr/tasks/common.yml create mode 100644 roles/ceph-mgr/tasks/main.yml create mode 100644 roles/ceph-mgr/tasks/mgr_modules.yml create mode 100644 roles/ceph-mgr/tasks/pre_requisite.yml create mode 100644 roles/ceph-mgr/tasks/start_mgr.yml create mode 100644 roles/ceph-mgr/tasks/systemd.yml create mode 100644 roles/ceph-mgr/templates/ceph-mgr.service.d-overrides.j2 create mode 100644 roles/ceph-mgr/templates/ceph-mgr.service.j2 create mode 100644 roles/ceph-mon/LICENSE create mode 100644 roles/ceph-mon/README.md create mode 100644 roles/ceph-mon/defaults/main.yml create mode 100644 roles/ceph-mon/files/ceph-mon.target create mode 100644 roles/ceph-mon/meta/main.yml create mode 100644 roles/ceph-mon/tasks/ceph_keys.yml create mode 100644 roles/ceph-mon/tasks/deploy_monitors.yml create mode 100644 roles/ceph-mon/tasks/main.yml create mode 100644 roles/ceph-mon/tasks/secure_cluster.yml create mode 100644 roles/ceph-mon/tasks/start_monitor.yml create mode 100644 roles/ceph-mon/tasks/systemd.yml create mode 100644 roles/ceph-mon/templates/ceph-mon.service.d-overrides.j2 create mode 100644 roles/ceph-mon/templates/ceph-mon.service.j2 create mode 100644 roles/ceph-nfs/LICENSE create mode 100644 roles/ceph-nfs/README.md create mode 100644 roles/ceph-nfs/defaults/main.yml create mode 100644 roles/ceph-nfs/meta/main.yml create mode 100644 roles/ceph-nfs/tasks/create_rgw_nfs_user.yml create mode 100644 roles/ceph-nfs/tasks/main.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_container.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml create mode 100644 roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml create mode 100644 roles/ceph-nfs/tasks/start_nfs.yml create mode 100644 roles/ceph-nfs/tasks/systemd.yml create mode 100644 roles/ceph-nfs/templates/ceph-nfs.service.j2 create mode 100644 roles/ceph-nfs/templates/ganesha.conf.j2 create mode 100644 roles/ceph-nfs/templates/idmap.conf.j2 create mode 100644 roles/ceph-nfs/templates/systemd-run.j2 create mode 100644 roles/ceph-node-exporter/meta/main.yml create mode 100644 roles/ceph-node-exporter/tasks/main.yml create mode 100644 roles/ceph-node-exporter/tasks/setup_container.yml create mode 100644 roles/ceph-node-exporter/tasks/systemd.yml create mode 100644 roles/ceph-node-exporter/templates/node_exporter.service.j2 create mode 100644 roles/ceph-osd/LICENSE create mode 100644 roles/ceph-osd/README.md create mode 100644 roles/ceph-osd/defaults/main.yml create mode 100644 roles/ceph-osd/files/ceph-osd.target create mode 100644 roles/ceph-osd/meta/main.yml create mode 100644 roles/ceph-osd/tasks/common.yml create mode 100644 roles/ceph-osd/tasks/crush_rules.yml create mode 100644 roles/ceph-osd/tasks/main.yml create mode 100644 roles/ceph-osd/tasks/scenarios/lvm-batch.yml create mode 100644 roles/ceph-osd/tasks/scenarios/lvm.yml create mode 100644 roles/ceph-osd/tasks/start_osds.yml create mode 100644 roles/ceph-osd/tasks/system_tuning.yml create mode 100644 roles/ceph-osd/tasks/systemd.yml create mode 100644 roles/ceph-osd/templates/ceph-osd.service.d-overrides.j2 create mode 100644 roles/ceph-osd/templates/ceph-osd.service.j2 create mode 100644 roles/ceph-osd/templates/systemd-run.j2 create mode 100644 roles/ceph-osd/templates/tmpfiles_hugepage.j2 create mode 100644 roles/ceph-osd/vars/main.yml create mode 100644 roles/ceph-prometheus/files/ceph_dashboard.yml create mode 100644 roles/ceph-prometheus/handlers/main.yml create mode 100644 roles/ceph-prometheus/meta/main.yml create mode 100644 roles/ceph-prometheus/tasks/main.yml create mode 100644 roles/ceph-prometheus/tasks/setup_container.yml create mode 100644 roles/ceph-prometheus/tasks/systemd.yml create mode 100644 roles/ceph-prometheus/templates/alertmanager.service.j2 create mode 100644 roles/ceph-prometheus/templates/alertmanager.yml.j2 create mode 100644 roles/ceph-prometheus/templates/prometheus.service.j2 create mode 100644 roles/ceph-prometheus/templates/prometheus.yml.j2 create mode 100644 roles/ceph-rbd-mirror/LICENSE create mode 100644 roles/ceph-rbd-mirror/README.md create mode 100644 roles/ceph-rbd-mirror/defaults/main.yml create mode 100644 roles/ceph-rbd-mirror/files/ceph-rbd-mirror.target create mode 100644 roles/ceph-rbd-mirror/meta/main.yml create mode 100644 roles/ceph-rbd-mirror/tasks/configure_mirroring.yml create mode 100644 roles/ceph-rbd-mirror/tasks/main.yml create mode 100644 roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml create mode 100644 roles/ceph-rbd-mirror/tasks/systemd.yml create mode 100644 roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.d-overrides.j2 create mode 100644 roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 create mode 100644 roles/ceph-rgw-loadbalancer/defaults/main.yml create mode 100644 roles/ceph-rgw-loadbalancer/handlers/main.yml create mode 100644 roles/ceph-rgw-loadbalancer/meta/main.yml create mode 100644 roles/ceph-rgw-loadbalancer/tasks/main.yml create mode 100644 roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml create mode 100644 roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml create mode 100644 roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 create mode 100644 roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 create mode 100644 roles/ceph-rgw/LICENSE create mode 100644 roles/ceph-rgw/README.md create mode 100644 roles/ceph-rgw/defaults/main.yml create mode 100644 roles/ceph-rgw/files/ceph-radosgw.target create mode 100644 roles/ceph-rgw/handlers/main.yml create mode 100644 roles/ceph-rgw/meta/main.yml create mode 100644 roles/ceph-rgw/tasks/common.yml create mode 100644 roles/ceph-rgw/tasks/main.yml create mode 100644 roles/ceph-rgw/tasks/openstack-keystone.yml create mode 100644 roles/ceph-rgw/tasks/pre_requisite.yml create mode 100644 roles/ceph-rgw/tasks/rgw_create_pools.yml create mode 100644 roles/ceph-rgw/tasks/start_docker_rgw.yml create mode 100644 roles/ceph-rgw/tasks/start_radosgw.yml create mode 100644 roles/ceph-rgw/tasks/systemd.yml create mode 100644 roles/ceph-rgw/templates/ceph-radosgw.service.j2 create mode 100644 roles/ceph-rgw/templates/ceph-rgw.service.d-overrides.j2 create mode 100644 roles/ceph-validate/meta/main.yml create mode 100644 roles/ceph-validate/tasks/check_devices.yml create mode 100644 roles/ceph-validate/tasks/check_eth_rgw.yml create mode 100644 roles/ceph-validate/tasks/check_ipaddr_mon.yml create mode 100644 roles/ceph-validate/tasks/check_nfs.yml create mode 100644 roles/ceph-validate/tasks/check_pools.yml create mode 100644 roles/ceph-validate/tasks/check_rbdmirror.yml create mode 100644 roles/ceph-validate/tasks/check_repository.yml create mode 100644 roles/ceph-validate/tasks/check_rgw_pools.yml create mode 100644 roles/ceph-validate/tasks/check_system.yml create mode 100644 roles/ceph-validate/tasks/main.yml create mode 100644 site-container.yml.sample create mode 100644 site.yml create mode 100644 site.yml copy.sample create mode 100644 test.yml create mode 100644 tests/README.md create mode 100644 tests/README.rst create mode 100644 tests/__pycache__/conftest.cpython-314-pytest-8.4.2.pyc create mode 100644 tests/conftest.py create mode 100644 tests/functional/.gitignore create mode 120000 tests/functional/add-mdss/Vagrantfile create mode 120000 tests/functional/add-mdss/ceph-override.json create mode 120000 tests/functional/add-mdss/container/Vagrantfile create mode 120000 tests/functional/add-mdss/container/ceph-override.json create mode 100644 tests/functional/add-mdss/container/group_vars/all create mode 100644 tests/functional/add-mdss/container/hosts create mode 100644 tests/functional/add-mdss/container/hosts-2 create mode 100644 tests/functional/add-mdss/container/vagrant_variables.yml create mode 100644 tests/functional/add-mdss/group_vars/all create mode 100644 tests/functional/add-mdss/hosts create mode 100644 tests/functional/add-mdss/hosts-2 create mode 100644 tests/functional/add-mdss/vagrant_variables.yml create mode 120000 tests/functional/add-mgrs/Vagrantfile create mode 120000 tests/functional/add-mgrs/ceph-override.json create mode 120000 tests/functional/add-mgrs/container/Vagrantfile create mode 120000 tests/functional/add-mgrs/container/ceph-override.json create mode 100644 tests/functional/add-mgrs/container/group_vars/all create mode 100644 tests/functional/add-mgrs/container/hosts create mode 100644 tests/functional/add-mgrs/container/hosts-2 create mode 100644 tests/functional/add-mgrs/container/vagrant_variables.yml create mode 100644 tests/functional/add-mgrs/group_vars/all create mode 100644 tests/functional/add-mgrs/hosts create mode 100644 tests/functional/add-mgrs/hosts-2 create mode 100644 tests/functional/add-mgrs/vagrant_variables.yml create mode 120000 tests/functional/add-mons/Vagrantfile create mode 120000 tests/functional/add-mons/ceph-override.json create mode 120000 tests/functional/add-mons/container/Vagrantfile create mode 120000 tests/functional/add-mons/container/ceph-override.json create mode 100644 tests/functional/add-mons/container/group_vars/all create mode 100644 tests/functional/add-mons/container/hosts create mode 100644 tests/functional/add-mons/container/hosts-2 create mode 100644 tests/functional/add-mons/container/vagrant_variables.yml create mode 100644 tests/functional/add-mons/group_vars/all create mode 100644 tests/functional/add-mons/hosts create mode 100644 tests/functional/add-mons/hosts-2 create mode 100644 tests/functional/add-mons/vagrant_variables.yml create mode 120000 tests/functional/add-osds/Vagrantfile create mode 120000 tests/functional/add-osds/ceph-override.json create mode 120000 tests/functional/add-osds/container/Vagrantfile create mode 120000 tests/functional/add-osds/container/ceph-override.json create mode 100644 tests/functional/add-osds/container/group_vars/all create mode 100644 tests/functional/add-osds/container/hosts create mode 100644 tests/functional/add-osds/container/hosts-2 create mode 100644 tests/functional/add-osds/container/vagrant_variables.yml create mode 100644 tests/functional/add-osds/group_vars/all create mode 100644 tests/functional/add-osds/hosts create mode 100644 tests/functional/add-osds/hosts-2 create mode 100644 tests/functional/add-osds/vagrant_variables.yml create mode 120000 tests/functional/add-rbdmirrors/Vagrantfile create mode 120000 tests/functional/add-rbdmirrors/ceph-override.json create mode 120000 tests/functional/add-rbdmirrors/container/Vagrantfile create mode 120000 tests/functional/add-rbdmirrors/container/ceph-override.json create mode 100644 tests/functional/add-rbdmirrors/container/group_vars/all create mode 100644 tests/functional/add-rbdmirrors/container/hosts create mode 100644 tests/functional/add-rbdmirrors/container/hosts-2 create mode 100644 tests/functional/add-rbdmirrors/container/vagrant_variables.yml create mode 100644 tests/functional/add-rbdmirrors/group_vars/all create mode 100644 tests/functional/add-rbdmirrors/hosts create mode 100644 tests/functional/add-rbdmirrors/hosts-2 create mode 100644 tests/functional/add-rbdmirrors/vagrant_variables.yml create mode 120000 tests/functional/add-rgws/Vagrantfile create mode 120000 tests/functional/add-rgws/ceph-override.json create mode 120000 tests/functional/add-rgws/container/Vagrantfile create mode 120000 tests/functional/add-rgws/container/ceph-override.json create mode 100644 tests/functional/add-rgws/container/group_vars/all create mode 100644 tests/functional/add-rgws/container/hosts create mode 100644 tests/functional/add-rgws/container/hosts-2 create mode 100644 tests/functional/add-rgws/container/vagrant_variables.yml create mode 100644 tests/functional/add-rgws/group_vars/all create mode 100644 tests/functional/add-rgws/group_vars/rgws create mode 100644 tests/functional/add-rgws/hosts create mode 100644 tests/functional/add-rgws/hosts-2 create mode 100644 tests/functional/add-rgws/vagrant_variables.yml create mode 120000 tests/functional/all-in-one/Vagrantfile create mode 120000 tests/functional/all-in-one/ceph-override.json create mode 120000 tests/functional/all-in-one/container/Vagrantfile create mode 120000 tests/functional/all-in-one/container/ceph-override.json create mode 100644 tests/functional/all-in-one/container/group_vars/all create mode 100644 tests/functional/all-in-one/container/hosts create mode 100644 tests/functional/all-in-one/container/vagrant_variables.yml create mode 100644 tests/functional/all-in-one/group_vars/all create mode 100644 tests/functional/all-in-one/hosts create mode 100644 tests/functional/all-in-one/vagrant_variables.yml create mode 120000 tests/functional/all_daemons/Vagrantfile create mode 100644 tests/functional/all_daemons/ceph-override.json create mode 120000 tests/functional/all_daemons/container/Vagrantfile create mode 120000 tests/functional/all_daemons/container/ceph-override.json create mode 100644 tests/functional/all_daemons/container/group_vars/all create mode 100644 tests/functional/all_daemons/container/group_vars/clients create mode 100644 tests/functional/all_daemons/container/group_vars/iscsigws create mode 100644 tests/functional/all_daemons/container/group_vars/mons create mode 100644 tests/functional/all_daemons/container/group_vars/osds create mode 100644 tests/functional/all_daemons/container/group_vars/rgws create mode 100644 tests/functional/all_daemons/container/hosts create mode 100644 tests/functional/all_daemons/container/vagrant_variables.yml create mode 100644 tests/functional/all_daemons/group_vars/all create mode 100644 tests/functional/all_daemons/group_vars/clients create mode 100644 tests/functional/all_daemons/group_vars/iscsigws create mode 100644 tests/functional/all_daemons/group_vars/mons create mode 100644 tests/functional/all_daemons/group_vars/nfss create mode 100644 tests/functional/all_daemons/group_vars/osds create mode 100644 tests/functional/all_daemons/group_vars/rgws create mode 100644 tests/functional/all_daemons/hosts create mode 100644 tests/functional/all_daemons/hosts-switch-to-containers create mode 100644 tests/functional/all_daemons/vagrant_variables.yml create mode 120000 tests/functional/all_daemons_ipv6/Vagrantfile create mode 100644 tests/functional/all_daemons_ipv6/ceph-override.json create mode 120000 tests/functional/all_daemons_ipv6/container/Vagrantfile create mode 120000 tests/functional/all_daemons_ipv6/container/ceph-override.json create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/all create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/clients create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/iscsigws create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/mons create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/osds create mode 100644 tests/functional/all_daemons_ipv6/container/group_vars/rgws create mode 100644 tests/functional/all_daemons_ipv6/container/hosts create mode 100644 tests/functional/all_daemons_ipv6/container/vagrant_variables.yml create mode 100644 tests/functional/all_daemons_ipv6/group_vars/all create mode 100644 tests/functional/all_daemons_ipv6/group_vars/clients create mode 100644 tests/functional/all_daemons_ipv6/group_vars/iscsigws create mode 100644 tests/functional/all_daemons_ipv6/group_vars/mons create mode 100644 tests/functional/all_daemons_ipv6/group_vars/nfss create mode 100644 tests/functional/all_daemons_ipv6/group_vars/osds create mode 100644 tests/functional/all_daemons_ipv6/group_vars/rgws create mode 100644 tests/functional/all_daemons_ipv6/hosts create mode 100644 tests/functional/all_daemons_ipv6/vagrant_variables.yml create mode 120000 tests/functional/cephadm/Vagrantfile create mode 100644 tests/functional/cephadm/group_vars/all create mode 100644 tests/functional/cephadm/hosts create mode 100644 tests/functional/cephadm/vagrant_variables.yml create mode 100644 tests/functional/collect-logs.yml create mode 120000 tests/functional/collocation/Vagrantfile create mode 120000 tests/functional/collocation/ceph-override.json create mode 120000 tests/functional/collocation/container/Vagrantfile create mode 120000 tests/functional/collocation/container/ceph-override.json create mode 100644 tests/functional/collocation/container/group_vars/all create mode 100644 tests/functional/collocation/container/group_vars/clients create mode 100644 tests/functional/collocation/container/group_vars/osds create mode 100644 tests/functional/collocation/container/group_vars/rgws create mode 100644 tests/functional/collocation/container/hosts create mode 100644 tests/functional/collocation/container/vagrant_variables.yml create mode 100644 tests/functional/collocation/group_vars/all create mode 100644 tests/functional/collocation/group_vars/clients create mode 100644 tests/functional/collocation/group_vars/osds create mode 100644 tests/functional/collocation/group_vars/rgws create mode 100644 tests/functional/collocation/hosts create mode 100644 tests/functional/collocation/vagrant_variables.yml create mode 100644 tests/functional/dev_setup.yml create mode 120000 tests/functional/docker2podman/Vagrantfile create mode 100644 tests/functional/docker2podman/ceph-override.json create mode 100644 tests/functional/docker2podman/group_vars/all create mode 100644 tests/functional/docker2podman/group_vars/clients create mode 100644 tests/functional/docker2podman/group_vars/iscsigws create mode 100644 tests/functional/docker2podman/group_vars/mons create mode 100644 tests/functional/docker2podman/group_vars/osds create mode 100644 tests/functional/docker2podman/group_vars/rgws create mode 100644 tests/functional/docker2podman/hosts create mode 100644 tests/functional/docker2podman/vagrant_variables.yml create mode 120000 tests/functional/external_clients/Vagrantfile create mode 120000 tests/functional/external_clients/ceph-override.json create mode 120000 tests/functional/external_clients/container/Vagrantfile create mode 120000 tests/functional/external_clients/container/ceph-override.json create mode 100644 tests/functional/external_clients/container/inventory/external_clients-hosts create mode 100644 tests/functional/external_clients/container/inventory/group_vars/all create mode 100644 tests/functional/external_clients/container/inventory/group_vars/clients create mode 100644 tests/functional/external_clients/container/inventory/hosts create mode 100644 tests/functional/external_clients/container/vagrant_variables.yml create mode 100644 tests/functional/external_clients/inventory/external_clients-hosts create mode 100644 tests/functional/external_clients/inventory/group_vars/all create mode 100644 tests/functional/external_clients/inventory/group_vars/clients create mode 100644 tests/functional/external_clients/inventory/hosts create mode 100644 tests/functional/external_clients/vagrant_variables.yml create mode 100644 tests/functional/external_clients_admin_key.yml create mode 120000 tests/functional/infra_lv_create/Vagrantfile create mode 100644 tests/functional/infra_lv_create/group_vars/all create mode 100644 tests/functional/infra_lv_create/hosts create mode 100644 tests/functional/infra_lv_create/vagrant_variables.yml create mode 120000 tests/functional/lvm-auto-discovery/Vagrantfile create mode 120000 tests/functional/lvm-auto-discovery/ceph-override.json create mode 120000 tests/functional/lvm-auto-discovery/container/Vagrantfile create mode 120000 tests/functional/lvm-auto-discovery/container/ceph-override.json create mode 100644 tests/functional/lvm-auto-discovery/container/group_vars/all create mode 100644 tests/functional/lvm-auto-discovery/container/hosts create mode 100644 tests/functional/lvm-auto-discovery/container/vagrant_variables.yml create mode 100644 tests/functional/lvm-auto-discovery/group_vars/all create mode 100644 tests/functional/lvm-auto-discovery/hosts create mode 100644 tests/functional/lvm-auto-discovery/vagrant_variables.yml create mode 120000 tests/functional/lvm-batch/Vagrantfile create mode 120000 tests/functional/lvm-batch/ceph-override.json create mode 120000 tests/functional/lvm-batch/container/Vagrantfile create mode 120000 tests/functional/lvm-batch/container/ceph-override.json create mode 100644 tests/functional/lvm-batch/container/group_vars/all create mode 100644 tests/functional/lvm-batch/container/hosts create mode 100644 tests/functional/lvm-batch/container/vagrant_variables.yml create mode 100644 tests/functional/lvm-batch/group_vars/all create mode 100644 tests/functional/lvm-batch/hosts create mode 100644 tests/functional/lvm-batch/vagrant_variables.yml create mode 120000 tests/functional/lvm-osds/Vagrantfile create mode 120000 tests/functional/lvm-osds/ceph-override.json create mode 120000 tests/functional/lvm-osds/container/Vagrantfile create mode 120000 tests/functional/lvm-osds/container/ceph-override.json create mode 100644 tests/functional/lvm-osds/container/group_vars/all create mode 100644 tests/functional/lvm-osds/container/hosts create mode 100644 tests/functional/lvm-osds/container/vagrant_variables.yml create mode 100644 tests/functional/lvm-osds/group_vars/all create mode 100644 tests/functional/lvm-osds/hosts create mode 100644 tests/functional/lvm-osds/vagrant_variables.yml create mode 100644 tests/functional/lvm_setup.yml create mode 120000 tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/hosts create mode 100644 tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml create mode 120000 tests/functional/podman/Vagrantfile create mode 120000 tests/functional/podman/ceph-override.json create mode 100644 tests/functional/podman/group_vars/all create mode 100644 tests/functional/podman/group_vars/clients create mode 100644 tests/functional/podman/group_vars/iscsigws create mode 100644 tests/functional/podman/group_vars/mons create mode 100644 tests/functional/podman/group_vars/osds create mode 100644 tests/functional/podman/group_vars/rgws create mode 100644 tests/functional/podman/hosts create mode 100644 tests/functional/podman/vagrant_variables.yml create mode 100644 tests/functional/rbd_map_devices.yml create mode 100644 tests/functional/rbdmirror.yml create mode 120000 tests/functional/rbdmirror/Vagrantfile create mode 120000 tests/functional/rbdmirror/container/Vagrantfile create mode 100644 tests/functional/rbdmirror/container/group_vars/all create mode 100644 tests/functional/rbdmirror/container/hosts create mode 120000 tests/functional/rbdmirror/container/secondary/Vagrantfile create mode 100644 tests/functional/rbdmirror/container/secondary/group_vars/all create mode 100644 tests/functional/rbdmirror/container/secondary/hosts create mode 100644 tests/functional/rbdmirror/container/secondary/vagrant_variables.yml create mode 100644 tests/functional/rbdmirror/container/vagrant_variables.yml create mode 100644 tests/functional/rbdmirror/group_vars/all create mode 100644 tests/functional/rbdmirror/hosts create mode 120000 tests/functional/rbdmirror/secondary/Vagrantfile create mode 100644 tests/functional/rbdmirror/secondary/group_vars/all create mode 100644 tests/functional/rbdmirror/secondary/hosts create mode 100644 tests/functional/rbdmirror/secondary/vagrant_variables.yml create mode 100644 tests/functional/rbdmirror/vagrant_variables.yml create mode 100644 tests/functional/reboot.yml create mode 100644 tests/functional/setup.yml create mode 120000 tests/functional/shrink_mds/Vagrantfile create mode 120000 tests/functional/shrink_mds/ceph-override.json create mode 120000 tests/functional/shrink_mds/container/Vagrantfile create mode 120000 tests/functional/shrink_mds/container/ceph-override.json create mode 100644 tests/functional/shrink_mds/container/group_vars/all create mode 100644 tests/functional/shrink_mds/container/group_vars/mons create mode 100644 tests/functional/shrink_mds/container/group_vars/osds create mode 100644 tests/functional/shrink_mds/container/hosts create mode 100644 tests/functional/shrink_mds/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_mds/group_vars/all create mode 100644 tests/functional/shrink_mds/group_vars/mons create mode 100644 tests/functional/shrink_mds/group_vars/osds create mode 100644 tests/functional/shrink_mds/hosts create mode 100644 tests/functional/shrink_mds/vagrant_variables.yml create mode 120000 tests/functional/shrink_mgr/Vagrantfile create mode 120000 tests/functional/shrink_mgr/ceph-override.json create mode 120000 tests/functional/shrink_mgr/container/Vagrantfile create mode 120000 tests/functional/shrink_mgr/container/ceph-override.json create mode 100644 tests/functional/shrink_mgr/container/group_vars/all create mode 100644 tests/functional/shrink_mgr/container/group_vars/mons create mode 100644 tests/functional/shrink_mgr/container/group_vars/osds create mode 100644 tests/functional/shrink_mgr/container/hosts create mode 100644 tests/functional/shrink_mgr/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_mgr/group_vars/all create mode 100644 tests/functional/shrink_mgr/group_vars/mons create mode 100644 tests/functional/shrink_mgr/group_vars/osds create mode 100644 tests/functional/shrink_mgr/hosts create mode 100644 tests/functional/shrink_mgr/vagrant_variables.yml create mode 120000 tests/functional/shrink_mon/Vagrantfile create mode 120000 tests/functional/shrink_mon/ceph-override.json create mode 120000 tests/functional/shrink_mon/container/Vagrantfile create mode 120000 tests/functional/shrink_mon/container/ceph-override.json create mode 100644 tests/functional/shrink_mon/container/group_vars/all create mode 100644 tests/functional/shrink_mon/container/group_vars/mons create mode 100644 tests/functional/shrink_mon/container/group_vars/osds create mode 100644 tests/functional/shrink_mon/container/hosts create mode 100644 tests/functional/shrink_mon/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_mon/group_vars/all create mode 100644 tests/functional/shrink_mon/group_vars/mons create mode 100644 tests/functional/shrink_mon/group_vars/osds create mode 100644 tests/functional/shrink_mon/hosts create mode 100644 tests/functional/shrink_mon/hosts-switch-to-containers create mode 100644 tests/functional/shrink_mon/vagrant_variables.yml create mode 120000 tests/functional/shrink_osd/Vagrantfile create mode 120000 tests/functional/shrink_osd/ceph-override.json create mode 120000 tests/functional/shrink_osd/container/Vagrantfile create mode 120000 tests/functional/shrink_osd/container/ceph-override.json create mode 100644 tests/functional/shrink_osd/container/group_vars/all create mode 100644 tests/functional/shrink_osd/container/hosts create mode 100644 tests/functional/shrink_osd/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_osd/group_vars/all create mode 100644 tests/functional/shrink_osd/group_vars/osds create mode 100644 tests/functional/shrink_osd/hosts create mode 100644 tests/functional/shrink_osd/vagrant_variables.yml create mode 120000 tests/functional/shrink_rbdmirror/Vagrantfile create mode 120000 tests/functional/shrink_rbdmirror/ceph-override.json create mode 120000 tests/functional/shrink_rbdmirror/container/Vagrantfile create mode 120000 tests/functional/shrink_rbdmirror/container/ceph-override.json create mode 100644 tests/functional/shrink_rbdmirror/container/group_vars/all create mode 100644 tests/functional/shrink_rbdmirror/container/group_vars/mons create mode 100644 tests/functional/shrink_rbdmirror/container/group_vars/osds create mode 100644 tests/functional/shrink_rbdmirror/container/hosts create mode 100644 tests/functional/shrink_rbdmirror/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_rbdmirror/group_vars/all create mode 100644 tests/functional/shrink_rbdmirror/group_vars/mons create mode 100644 tests/functional/shrink_rbdmirror/group_vars/osds create mode 100644 tests/functional/shrink_rbdmirror/hosts create mode 100644 tests/functional/shrink_rbdmirror/vagrant_variables.yml create mode 120000 tests/functional/shrink_rgw/Vagrantfile create mode 120000 tests/functional/shrink_rgw/ceph-override.json create mode 120000 tests/functional/shrink_rgw/container/Vagrantfile create mode 120000 tests/functional/shrink_rgw/container/ceph-override.json create mode 100644 tests/functional/shrink_rgw/container/group_vars/all create mode 100644 tests/functional/shrink_rgw/container/group_vars/mons create mode 100644 tests/functional/shrink_rgw/container/group_vars/osds create mode 100644 tests/functional/shrink_rgw/container/group_vars/rgws create mode 100644 tests/functional/shrink_rgw/container/hosts create mode 100644 tests/functional/shrink_rgw/container/vagrant_variables.yml create mode 100644 tests/functional/shrink_rgw/group_vars/all create mode 100644 tests/functional/shrink_rgw/group_vars/mons create mode 100644 tests/functional/shrink_rgw/group_vars/osds create mode 100644 tests/functional/shrink_rgw/group_vars/rgws create mode 100644 tests/functional/shrink_rgw/hosts create mode 100644 tests/functional/shrink_rgw/vagrant_variables.yml create mode 120000 tests/functional/subset_update/Vagrantfile create mode 100644 tests/functional/subset_update/ceph-override.json create mode 120000 tests/functional/subset_update/container/Vagrantfile create mode 120000 tests/functional/subset_update/container/ceph-override.json create mode 100644 tests/functional/subset_update/container/group_vars/all create mode 100644 tests/functional/subset_update/container/group_vars/clients create mode 100644 tests/functional/subset_update/container/group_vars/iscsigws create mode 100644 tests/functional/subset_update/container/group_vars/mons create mode 100644 tests/functional/subset_update/container/group_vars/osds create mode 100644 tests/functional/subset_update/container/group_vars/rgws create mode 100644 tests/functional/subset_update/container/hosts create mode 100644 tests/functional/subset_update/container/vagrant_variables.yml create mode 100644 tests/functional/subset_update/group_vars/all create mode 100644 tests/functional/subset_update/group_vars/clients create mode 100644 tests/functional/subset_update/group_vars/iscsigws create mode 100644 tests/functional/subset_update/group_vars/mons create mode 100644 tests/functional/subset_update/group_vars/nfss create mode 100644 tests/functional/subset_update/group_vars/osds create mode 100644 tests/functional/subset_update/group_vars/rgws create mode 100644 tests/functional/subset_update/hosts create mode 100644 tests/functional/subset_update/vagrant_variables.yml create mode 100644 tests/functional/tests/__init__.py create mode 100644 tests/functional/tests/grafana/test_grafana.py create mode 100644 tests/functional/tests/mds/__init__.py create mode 100644 tests/functional/tests/mds/test_mds.py create mode 100644 tests/functional/tests/mgr/__init__.py create mode 100644 tests/functional/tests/mgr/test_mgr.py create mode 100644 tests/functional/tests/mon/__init__.py create mode 100644 tests/functional/tests/mon/test_mons.py create mode 100644 tests/functional/tests/nfs/test_nfs_ganesha.py create mode 100644 tests/functional/tests/node-exporter/test_node_exporter.py create mode 100644 tests/functional/tests/osd/__init__.py create mode 100644 tests/functional/tests/osd/test_osds.py create mode 100644 tests/functional/tests/rbd-mirror/__init__.py create mode 100644 tests/functional/tests/rbd-mirror/test_rbd_mirror.py create mode 100644 tests/functional/tests/rgw/__init__.py create mode 100644 tests/functional/tests/rgw/test_rgw.py create mode 100644 tests/functional/tests/test_install.py create mode 100644 tests/inventories/single-machine.yml create mode 100644 tests/library/ca_test_common.py create mode 100644 tests/library/test_ceph_crush.py create mode 100644 tests/library/test_ceph_crush_rule.py create mode 100644 tests/library/test_ceph_dashboard_user.py create mode 100644 tests/library/test_ceph_ec_profile.py create mode 100644 tests/library/test_ceph_fs.py create mode 100644 tests/library/test_ceph_key.py create mode 100644 tests/library/test_ceph_mgr_module.py create mode 100644 tests/library/test_ceph_osd.py create mode 100644 tests/library/test_ceph_osd_flag.py create mode 100644 tests/library/test_ceph_pool.py create mode 100644 tests/library/test_ceph_volume.py create mode 100644 tests/library/test_ceph_volume_simple_activate.py create mode 100644 tests/library/test_ceph_volume_simple_scan.py create mode 100644 tests/library/test_cephadm_adopt.py create mode 100644 tests/library/test_cephadm_bootstrap.py create mode 100644 tests/library/test_radosgw_caps.py create mode 100644 tests/library/test_radosgw_realm.py create mode 100644 tests/library/test_radosgw_user.py create mode 100644 tests/library/test_radosgw_zone.py create mode 100644 tests/library/test_radosgw_zonegroup.py create mode 100644 tests/module_utils/test_ca_common.py create mode 100644 tests/plugins/filter/test_ipaddrs_in_ranges.py create mode 100644 tests/pytest.ini create mode 100644 tests/requirements.txt create mode 100644 tests/scripts/generate_ssh_config.sh create mode 100644 tests/scripts/vagrant_up.sh create mode 100755 tests/scripts/workflows/defaults.sh create mode 100755 tests/scripts/workflows/signed-off.sh create mode 100644 tox-cephadm.ini create mode 100644 tox-docker2podman.ini create mode 100644 tox-external_clients.ini create mode 100644 tox-podman.ini create mode 100644 tox-rbdmirror.ini create mode 100644 tox-shrink_osd.ini create mode 100644 tox-subset_update.ini create mode 100644 tox-update.ini create mode 100644 tox.ini create mode 100644 vagrant.yaml create mode 100644 vagrant_variables.yml.sample diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 0000000..2bc248c --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,18 @@ +version = 1 + +test_patterns = ["tests/**"] + +exclude_patterns = [ + "roles/**", + "profiles/**", + "infrastructure-playbooks/**", + "group_vars/**", + "contrib/**" +] + +[[analyzers]] +name = "python" +enabled = true + + [analyzers.meta] + runtime_version = "3.x.x" \ No newline at end of file diff --git a/.fuse_hidden0000003200000001 b/.fuse_hidden0000003200000001 new file mode 100644 index 0000000..e69de29 diff --git a/.fuse_hidden0000010600000002 b/.fuse_hidden0000010600000002 new file mode 100644 index 0000000..e69de29 diff --git a/.fuse_hidden0000011600000001 b/.fuse_hidden0000011600000001 new file mode 100644 index 0000000..e69de29 diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 0000000..424a053 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,63 @@ +pull_request_rules: +# Backports + - actions: + backport: + branches: + - stable-3.0 + conditions: + - label=backport-stable-3.0 + name: backport stable-3.0 + - actions: + backport: + branches: + - stable-3.1 + conditions: + - label=backport-stable-3.1 + name: backport stable-3.1 + - actions: + backport: + branches: + - stable-3.2 + conditions: + - label=backport-stable-3.2 + name: backport stable-3.2 + - actions: + backport: + branches: + - stable-4.0 + conditions: + - label=backport-stable-4.0 + name: backport stable-4.0 + - actions: + backport: + branches: + - stable-5.0 + conditions: + - label=backport-stable-5.0 + name: backport stable-5.0 + - actions: + backport: + branches: + - stable-6.0 + conditions: + - label=backport-stable-6.0 + name: backport stable-6.0 + - actions: + backport: + branches: + - stable-7.0 + conditions: + - label=backport-stable-7.0 + name: backport stable-7.0 + - actions: + backport: + branches: + - stable-8.0 + conditions: + - label=backport-stable-8.0 + name: backport stable-8.0 +commands_restrictions: + backport: + conditions: + - base=main + - number<0 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..f7db4bb --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,10 @@ +version: 2 + +build: + os: "ubuntu-22.04" + tools: + python: "3.9" + +sphinx: + # Path to your Sphinx configuration file. + configuration: docs/source/conf.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..05a78c7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,101 @@ +# Contributing to ceph-ansible + +1. Follow the [commit guidelines](#commit-guidelines) + +## Commit guidelines + +- All commits should have a subject and a body +- The commit subject should briefly describe what the commit changes +- The commit body should describe the problem addressed and the chosen solution + - What was the problem and solution? Why that solution? Were there alternative ideas? +- Wrap commit subjects and bodies to 80 characters +- Sign-off your commits +- Add a best-effort scope designation to commit subjects. This could be a directory name, file name, + or the name of a logical grouping of code. Examples: + - library: add a placeholder module for the validate action plugin + - site.yml: combine validate play with fact gathering play +- Commits linked with an issue should trace them with : + - Fixes: #2653 + +[Suggested reading.](https://chris.beams.io/posts/git-commit/) + +## Pull requests + +### Jenkins CI + +We use Jenkins to run several tests on each pull request. + +If you don't want to run a build for a particular pull request, because all you are changing is the +README for example, add the text `[skip ci]` to the PR title. + +### Merging strategy + +Merging PR is controlled by [mergify](https://mergify.io/) by the following rules: + +- at least one approuval from a maintainer +- a SUCCESS from the CI pipeline "ceph-ansible PR Pipeline" + +If you work is not ready for review/merge, please request the DNM label via a comment or the title of your PR. +This will prevent the engine merging your pull request. + +### Backports (maintainers only) + +If you wish to see your work from 'main' being backported to a stable branch you can ping a maintainer +so he will set the backport label on your PR. Once the PR from main is merged, a backport PR will be created by mergify, +if there is a cherry-pick conflict you must resolv it by pulling the branch. + +**NEVER** push directly into a stable branch, **unless** the code from main has diverged so much that the files don't exist in the stable branch. +If that happens, inform the maintainers of the reasons why you pushed directly into a stable branch, if the reason is invalid, maintainers will immediatly close your pull request. + +## Good to know + +### Sample files + +The sample files we provide in `group_vars/` are versionned, +they are a copy of what their respective `./roles//defaults/main.yml` contain. + +It means if you are pushing a patch modifying one of these files: + +- `./roles/ceph-mds/defaults/main.yml` +- `./roles/ceph-mgr/defaults/main.yml` +- `./roles/ceph-fetch-keys/defaults/main.yml` +- `./roles/ceph-rbd-mirror/defaults/main.yml` +- `./roles/ceph-defaults/defaults/main.yml` +- `./roles/ceph-osd/defaults/main.yml` +- `./roles/ceph-nfs/defaults/main.yml` +- `./roles/ceph-client/defaults/main.yml` +- `./roles/ceph-common/defaults/main.yml` +- `./roles/ceph-mon/defaults/main.yml` +- `./roles/ceph-rgw/defaults/main.yml` +- `./roles/ceph-container-common/defaults/main.yml` +- `./roles/ceph-common-coreos/defaults/main.yml` + +You will have to get the corresponding sample file updated, there is a script which do it for you. +You must run `./generate_group_vars_sample.sh` before you commit your changes so you are guaranteed to have consistent content for these files. + +### Keep your branch up-to-date + +Sometimes, a pull request can be subject to long discussion, reviews and comments, meantime, `main` +moves forward so let's try to keep your branch rebased on main regularly to avoid huge conflict merge. +A rebased branch is more likely to be merged easily & shorter. + +### Organize your commits + +Do not split your commits unecessary, we are used to see pull request with useless additional commits like +"I'm addressing reviewer's comments". So, please, squash and/or amend them as much as possible. + +Similarly, split them when needed, if you are modifying several parts in ceph-ansible or pushing a large +patch you may have to split yours commit properly so it's better to understand your work. +Some recommandations: + +- one fix = one commit, +- do not mix multiple topics in a single commit, +- if you PR contains a large number of commits that are each other totally unrelated, it should probably even be split in several PRs. + +If you've broken your work up into a set of sequential changes and each commit pass the tests on their own then that's fine. +If you've got commits fixing typos or other problems introduced by previous commits in the same PR, then those should be squashed before merging. + +If you are new to Git, these links might help: + +- [https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History) +- [http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..93d81cf --- /dev/null +++ b/Makefile @@ -0,0 +1,113 @@ +# Makefile for constructing RPMs. +# Try "make" (for SRPMS) or "make rpm" + +NAME = ceph-ansible + +# Set the RPM package NVR from "git describe". +# Examples: +# +# A "git describe" value of "v2.2.0beta1" would create an NVR +# "ceph-ansible-2.2.0-0.beta1.1.el8" +# +# A "git describe" value of "v2.2.0rc1" would create an NVR +# "ceph-ansible-2.2.0-0.rc1.1.el8" +# +# A "git describe" value of "v2.2.0rc1-1-gc465f85" would create an NVR +# "ceph-ansible-2.2.0-0.rc1.1.gc465f85.el8" +# +# A "git describe" value of "v2.2.0" creates an NVR +# "ceph-ansible-2.2.0-1.el8" + +DIST ?= "el8" +MOCK_CONFIG ?= "centos-stream+epel-8-x86_64" +TAG := $(shell git describe --tags --abbrev=0 --match 'v*') +VERSION := $(shell echo $(TAG) | sed 's/^v//') +COMMIT := $(shell git rev-parse HEAD) +SHORTCOMMIT := $(shell echo $(COMMIT) | cut -c1-7) +RELEASE := $(shell git describe --tags --match 'v*' \ + | sed 's/^v//' \ + | sed 's/^[^-]*-//' \ + | sed 's/-/./') +ifeq ($(VERSION),$(RELEASE)) + RELEASE = 1 +endif +ifneq (,$(findstring alpha,$(VERSION))) + ALPHA := $(shell echo $(VERSION) | sed 's/.*alpha/alpha/') + RELEASE := 0.$(ALPHA).$(RELEASE) + VERSION := $(subst $(ALPHA),,$(VERSION)) +endif +ifneq (,$(findstring beta,$(VERSION))) + BETA := $(shell echo $(VERSION) | sed 's/.*beta/beta/') + RELEASE := 0.$(BETA).$(RELEASE) + VERSION := $(subst $(BETA),,$(VERSION)) +endif +ifneq (,$(findstring rc,$(VERSION))) + RC := $(shell echo $(VERSION) | sed 's/.*rc/rc/') + RELEASE := 0.$(RC).$(RELEASE) + VERSION := $(subst $(RC),,$(VERSION)) +endif + +ifneq (,$(shell echo $(VERSION) | grep [a-zA-Z])) + # If we still have alpha characters in our Git tag string, we don't know + # how to translate that into a sane RPM version/release. Bail out. + $(error cannot translate Git tag version $(VERSION) to an RPM NVR) +endif + +NVR := $(NAME)-$(VERSION)-$(RELEASE).$(DIST) + +all: srpm + +# Testing only +echo: + echo COMMIT $(COMMIT) + echo VERSION $(VERSION) + echo RELEASE $(RELEASE) + echo NVR $(NVR) + +clean: + rm -rf dist/ + rm -rf ceph-ansible-$(VERSION)-$(SHORTCOMMIT).tar.gz + rm -rf $(NVR).src.rpm + +dist: + git archive --format=tar.gz --prefix=ceph-ansible-$(VERSION)/ HEAD > ceph-ansible-$(VERSION)-$(SHORTCOMMIT).tar.gz + +spec: + sed ceph-ansible.spec.in \ + -e 's/@COMMIT@/$(COMMIT)/' \ + -e 's/@VERSION@/$(VERSION)/' \ + -e 's/@RELEASE@/$(RELEASE)/' \ + > ceph-ansible.spec + +srpm: dist spec + rpmbuild -bs ceph-ansible.spec \ + --define "_topdir ." \ + --define "_sourcedir ." \ + --define "_srcrpmdir ." \ + --define "dist .$(DIST)" + +rpm: dist srpm + mock -r $(MOCK_CONFIG) rebuild $(NVR).src.rpm \ + --resultdir=. \ + --define "dist .$(DIST)" + +tag: + $(eval BRANCH := $(shell git rev-parse --abbrev-ref HEAD)) + $(eval LASTNUM := $(shell echo $(TAG) \ + | sed -E "s/.*[^0-9]([0-9]+)$$/\1/")) + $(eval NEXTNUM=$(shell echo $$(($(LASTNUM)+1)))) + $(eval NEXTTAG=$(shell echo $(TAG) | sed "s/$(LASTNUM)$$/$(NEXTNUM)/")) + if [[ "$(TAG)" == "$(git describe --tags --match 'v*')" ]]; then \ + echo "$(SHORTCOMMIT) on $(BRANCH) is already tagged as $(TAG)"; \ + exit 1; \ + fi + if [[ "$(BRANCH)" != "master" || "$(BRANCH)" != "main" ]] && \ + ! [[ "$(BRANCH)" =~ ^stable- ]]; then \ + echo Cannot tag $(BRANCH); \ + exit 1; \ + fi + @echo Tagging Git branch $(BRANCH) + git tag $(NEXTTAG) + @echo run \'git push origin $(NEXTTAG)\' to push to GitHub. + +.PHONY: dist rpm srpm tag diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..b9f2348 --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +Ceph Ansible +============== + + The project is still maintained for the time being but it is encouraged to migrate to `cephadm `_. + +Ansible playbooks for Ceph, the distributed object, block, and file storage platform. + +Please refer to our hosted documentation here: https://docs.ceph.com/projects/ceph-ansible/en/latest/ +You can view documentation for our ``stable-*`` branches by substituting ``main`` in the link +above for the name of the branch. For example: https://docs.ceph.com/projects/ceph-ansible/en/stable-8.0/ diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..8a57019 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,605 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require 'yaml' +require 'resolv' +VAGRANTFILE_API_VERSION = '2' + +if File.file?(File.join(File.dirname(__FILE__), 'vagrant_variables.yml')) then + vagrant_variables_file = 'vagrant_variables.yml' +else + vagrant_variables_file = 'vagrant_variables.yml.sample' +end + +config_file=File.expand_path(File.join(File.dirname(__FILE__), vagrant_variables_file)) + +settings=YAML.load_file(config_file) + +LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : "" +NMONS = settings['mon_vms'] +NOSDS = settings['osd_vms'] +NMDSS = settings['mds_vms'] +NRGWS = settings['rgw_vms'] +NNFSS = settings['nfs_vms'] +NRBD_MIRRORS = settings['rbd_mirror_vms'] +CLIENTS = settings['client_vms'] +MGRS = settings['mgr_vms'] +PUBLIC_SUBNET = settings['public_subnet'] +CLUSTER_SUBNET = settings['cluster_subnet'] +BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box'] +CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX +BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url'] +SYNC_DIR = settings['vagrant_sync_dir'] +MEMORY = settings['memory'] +ETH = settings['eth'] +DOCKER = settings['docker'] +USER = settings['ssh_username'] +DEBUG = settings['debug'] + +ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode') +DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false) + +"#{PUBLIC_SUBNET}" =~ Resolv::IPv6::Regex ? IPV6 = true : IPV6 = false + +$last_ip_pub_digit = 9 +$last_ip_cluster_digit = 9 + +ansible_provision = proc do |ansible| + if DOCKER then + ansible.playbook = 'site-container.yml' + if settings['skip_tags'] + ansible.skip_tags = settings['skip_tags'] + end + else + ansible.playbook = 'site.yml' + end + + # Note: Can't do ranges like mon[0-2] in groups because + # these aren't supported by Vagrant, see + # https://github.com/mitchellh/vagrant/issues/3539 + ansible.groups = { + 'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" }, + 'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" }, + 'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" }, + 'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" }, + 'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" }, + 'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" }, + 'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" }, + 'mgrs' => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" }, + } + + if IPV6 then + ansible.extra_vars = { + cluster_network: "#{CLUSTER_SUBNET}/64", + journal_size: 100, + public_network: "#{PUBLIC_SUBNET}/64", + } + else + ansible.extra_vars = { + cluster_network: "#{CLUSTER_SUBNET}.0/24", + journal_size: 100, + public_network: "#{PUBLIC_SUBNET}.0/24", + } + end + + # In a production deployment, these should be secret + if DOCKER then + ansible.extra_vars = ansible.extra_vars.merge({ + containerized_deployment: 'true', + ceph_mon_docker_subnet: ansible.extra_vars[:public_network], + devices: settings['disks'], + radosgw_interface: ETH, + generate_fsid: 'true', + }) + else + ansible.extra_vars = ansible.extra_vars.merge({ + devices: settings['disks'], + radosgw_interface: ETH, + os_tuning_params: settings['os_tuning_params'], + }) + end + + if BOX == 'linode' then + ansible.sudo = true + # Use radosgw_address_block instead of radosgw_interface: + ansible.extra_vars.delete(:radosgw_interface) + ansible.extra_vars = ansible.extra_vars.merge({ + cluster_network: "#{CLUSTER_SUBNET}.0/16", + devices: ['/dev/sdc'], # hardcode leftover disk + monitor_address_block: "#{PUBLIC_SUBNET}.0/16", + radosgw_address_block: "#{PUBLIC_SUBNET}.0/16", + public_network: "#{PUBLIC_SUBNET}.0/16", + }) + end + + if DEBUG then + ansible.verbose = '-vvvv' + end + ansible.limit = 'all' +end + +def create_vmdk(name, size) + dir = Pathname.new(__FILE__).expand_path.dirname + path = File.join(dir, '.vagrant', name + '.vmdk') + `vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \ + 2>&1 > /dev/null` unless File.exist?(path) +end + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = BOX + config.vm.box_url = BOX_URL + config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048 + config.ssh.private_key_path = settings['ssh_private_key_path'] + config.ssh.username = USER + + # When using libvirt, avoid errors like: + # "host doesn't support requested feature: CPUID.01H:EDX.ds [bit 21]" + config.vm.provider :libvirt do |lv| + lv.cpu_mode = 'host-passthrough' + lv.disk_driver :cache => 'unsafe' + lv.graphics_type = 'none' + lv.cpus = 2 + end + + # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox + if DISABLE_SYNCED_FOLDER + config.vm.provider :virtualbox do |v,override| + override.vm.synced_folder '.', SYNC_DIR, disabled: true + end + config.vm.provider :libvirt do |v,override| + override.vm.synced_folder '.', SYNC_DIR, disabled: true + end + end + + if BOX == 'openstack' + # OpenStack VMs + config.vm.provider :openstack do |os| + config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true + config.ssh.pty = true + os.openstack_auth_url = settings['os_openstack_auth_url'] + os.username = settings['os_username'] + os.password = settings['os_password'] + os.tenant_name = settings['os_tenant_name'] + os.region = settings['os_region'] + os.flavor = settings['os_flavor'] + os.image = settings['os_image'] + os.keypair_name = settings['os_keypair_name'] + os.security_groups = ['default'] + + if settings['os_networks'] then + os.networks = settings['os_networks'] + end + + if settings['os_floating_ip_pool'] then + os.floating_ip_pool = settings['os_floating_ip_pool'] + end + + config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell" + end + elsif BOX == 'linode' + config.vm.provider :linode do |provider, override| + provider.token = ENV['LINODE_API_KEY'] + provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS' + provider.datacenter = settings['cloud_datacenter'] + provider.plan = MEMORY.to_s + provider.private_networking = true + # root install generally takes <1GB + provider.xvda_size = 4*1024 + # add some swap as the Linode distros require it + provider.swap_size = 128 + end + end + + (0..NMONS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon| + mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}" + if ASSIGN_STATIC_IP && !IPV6 + mon.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + + # Virtualbox + mon.vm.provider :virtualbox do |vb,override| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + mon.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + mon.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + mon.vm.provider "parallels" do |prl| + prl.name = "ceph-mon#{i}" + prl.memory = "#{MEMORY}" + end + + mon.vm.provider :linode do |provider| + provider.label = mon.vm.hostname + end + end + end + + (0..MGRS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}mgr#{i}" do |mgr| + mgr.vm.hostname = "#{LABEL_PREFIX}mgr#{i}" + if ASSIGN_STATIC_IP && !IPV6 + mgr.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + # Virtualbox + mgr.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + mgr.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + mgr.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + mgr.vm.provider "parallels" do |prl| + prl.name = "ceph-mgr#{i}" + prl.memory = "#{MEMORY}" + end + + mgr.vm.provider :linode do |provider| + provider.label = mgr.vm.hostname + end + end + end + + (0..CLIENTS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}client#{i}" do |client| + client.vm.box = CLIENT_BOX + client.vm.hostname = "#{LABEL_PREFIX}client#{i}" + if ASSIGN_STATIC_IP && !IPV6 + client.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + # Virtualbox + client.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + client.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + client.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + client.vm.provider "parallels" do |prl| + prl.name = "ceph-client#{i}" + prl.memory = "#{MEMORY}" + end + + client.vm.provider :linode do |provider| + provider.label = client.vm.hostname + end + end + end + + (0..NRGWS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw| + rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}" + if ASSIGN_STATIC_IP && !IPV6 + rgw.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + + # Virtualbox + rgw.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + rgw.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + rgw.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + rgw.vm.provider "parallels" do |prl| + prl.name = "ceph-rgw#{i}" + prl.memory = "#{MEMORY}" + end + + rgw.vm.provider :linode do |provider| + provider.label = rgw.vm.hostname + end + end + end + + (0..NNFSS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs| + nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}" + if ASSIGN_STATIC_IP && !IPV6 + nfs.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + + # Virtualbox + nfs.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + nfs.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + nfs.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + + # Parallels + nfs.vm.provider "parallels" do |prl| + prl.name = "ceph-nfs#{i}" + prl.memory = "#{MEMORY}" + end + + nfs.vm.provider :linode do |provider| + provider.label = nfs.vm.hostname + end + end + end + + (0..NMDSS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds| + mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}" + if ASSIGN_STATIC_IP && !IPV6 + mds.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + # Virtualbox + mds.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + mds.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + mds.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + # Parallels + mds.vm.provider "parallels" do |prl| + prl.name = "ceph-mds#{i}" + prl.memory = "#{MEMORY}" + end + + mds.vm.provider :linode do |provider| + provider.label = mds.vm.hostname + end + end + end + + (0..NRBD_MIRRORS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}rbd-mirror#{i}" do |rbd_mirror| + rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}" + if ASSIGN_STATIC_IP && !IPV6 + rbd_mirror.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + end + # Virtualbox + rbd_mirror.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + rbd_mirror.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + rbd_mirror.vm.provider :libvirt do |lv,override| + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}", + :netmask => "64" + end + end + # Parallels + rbd_mirror.vm.provider "parallels" do |prl| + prl.name = "ceph-rbd-mirror#{i}" + prl.memory = "#{MEMORY}" + end + + rbd_mirror.vm.provider :linode do |provider| + provider.label = rbd_mirror.vm.hostname + end + end + end + + (0..NOSDS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd| + osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}" + if ASSIGN_STATIC_IP && !IPV6 + osd.vm.network :private_network, + :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}" + osd.vm.network :private_network, + :ip => "#{CLUSTER_SUBNET}.#{$last_ip_cluster_digit+=1}" + end + # Virtualbox + osd.vm.provider :virtualbox do |vb| + # Create our own controller for consistency and to remove VM dependency + unless File.exist?("disk-#{i}-0.vdi") + # Adding OSD Controller; + # once the first disk is there assuming we don't need to do this + vb.customize ['storagectl', :id, + '--name', 'OSD Controller', + '--add', 'scsi'] + end + + (0..2).each do |d| + unless File.exist?("disk-#{i}-#{d}.vdi") + vb.customize ['createhd', + '--filename', "disk-#{i}-#{d}", + '--size', '11000'] + end + vb.customize ['storageattach', :id, + '--storagectl', 'OSD Controller', + '--port', 3 + d, + '--device', 0, + '--type', 'hdd', + '--medium', "disk-#{i}-#{d}.vdi"] + end + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + osd.vm.provider :vmware_fusion do |v| + (0..1).each do |d| + v.vmx["scsi0:#{d + 1}.present"] = 'TRUE' + v.vmx["scsi0:#{d + 1}.fileName"] = + create_vmdk("disk-#{i}-#{d}", '11000MB') + end + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + driverletters = ('a'..'z').to_a + osd.vm.provider :libvirt do |lv,override| + # always make /dev/sd{a/b/c} so that CI can ensure that + # virtualbox and libvirt will have the same devices to use for OSDs + (0..2).each do |d| + lv.storage :file, :device => "hd#{driverletters[d]}", :size => '50G', :bus => "ide" + end + lv.memory = MEMORY + lv.random_hostname = true + if IPV6 then + override.vm.network :private_network, + :libvirt__ipv6_address => "#{PUBLIC_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-public-network", + :netmask => "64" + override.vm.network :private_network, + :libvirt__ipv6_address => "#{CLUSTER_SUBNET}", + :libvirt__ipv6_prefix => "64", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "veryisolated", + :libvirt__network_name => "ipv6-cluster-network", + :netmask => "64" + end + end + + # Parallels + osd.vm.provider "parallels" do |prl| + prl.name = "ceph-osd#{i}" + prl.memory = "#{MEMORY}" + (0..1).each do |d| + prl.customize ["set", :id, + "--device-add", + "hdd", + "--iface", + "sata"] + end + end + + osd.vm.provider :linode do |provider| + provider.label = osd.vm.hostname + end + + # Run the provisioner after the last machine comes up + osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1) + end + end +end diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..cfa9769 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,43 @@ +# Comments inside this file must be set BEFORE the option. +# NOT after the option, otherwise the comment will be interpreted as a value to that option. + +[defaults] +ansible_managed = Please do not change this file directly since it is managed by Ansible and will be overwritten +library = ./library +module_utils = ./module_utils +action_plugins = plugins/actions +callback_plugins = plugins/callback +filter_plugins = plugins/filter +roles_path = ./roles +# Be sure the user running Ansible has permissions on the logfile +log_path = $HOME/ansible/ansible.log + +forks = 20 +host_key_checking = False +gathering = smart +fact_caching = jsonfile +fact_caching_connection = $HOME/ansible/facts +fact_caching_timeout = 7200 +nocows = 1 +callback_allowlist = profile_tasks +#stdout_callback = yaml +callback_result_format = yaml +force_valid_group_names = ignore +inject_facts_as_vars = False + +# Disable them in the context of https://review.openstack.org/#/c/469644 +retry_files_enabled = False + +# This is the default SSH timeout to use on connection attempts +# CI slaves are slow so by setting a higher value we can avoid the following error: +# Timeout (12s) waiting for privilege escalation prompt: +timeout = 60 + +[ssh_connection] +# see: https://github.com/ansible/ansible/issues/11536 +control_path = %(directory)s/%%h-%%r-%%p +ssh_args = -o ControlMaster=auto -o ControlPersist=600s +pipelining = True + +# Option to retry failed ssh executions if the failure is encountered in ssh itself +retries = 10 diff --git a/ceph-ansible.spec.in b/ceph-ansible.spec.in new file mode 100644 index 0000000..42771cb --- /dev/null +++ b/ceph-ansible.spec.in @@ -0,0 +1,62 @@ +%global commit @COMMIT@ +%global shortcommit %(c=%{commit}; echo ${c:0:7}) + +Name: ceph-ansible +Version: @VERSION@ +Release: @RELEASE@%{?dist} +Summary: Ansible playbooks for Ceph +# Some files have been copied from Ansible (GPLv3+). For example: +# plugins/actions/config_template.py +# roles/ceph-common/plugins/actions/config_template.py +License: ASL 2.0 and GPLv3+ +URL: https://github.com/ceph/ceph-ansible +Source0: %{name}-%{version}-%{shortcommit}.tar.gz +Obsoletes: ceph-iscsi-ansible <= 1.5 + +BuildArch: noarch + +BuildRequires: ansible-core >= 2.14 +Requires: ansible-core >= 2.14 + +%if 0%{?rhel} == 7 +BuildRequires: python2-devel +Requires: python2-netaddr +%else +BuildRequires: python3-devel +Requires: python3-netaddr +%endif + +%description +Ansible playbooks for Ceph + +%prep +%autosetup -p1 + +%build + +%install +mkdir -p %{buildroot}%{_datarootdir}/ceph-ansible + +for f in ansible.cfg *.yml *.sample group_vars roles library module_utils plugins infrastructure-playbooks; do + cp -a $f %{buildroot}%{_datarootdir}/ceph-ansible +done + +pushd %{buildroot}%{_datarootdir}/ceph-ansible + # These untested playbooks are too unstable for users. + rm -r infrastructure-playbooks/untested-by-ci + %if ! 0%{?fedora} && ! 0%{?centos} + # remove ability to install ceph community version + rm roles/ceph-common/tasks/installs/redhat_{community,dev}_repository.yml + %endif +popd + +%check +# Borrowed from upstream's .travis.yml: +ansible-playbook -i dummy-ansible-hosts test.yml --syntax-check + +%files +%doc README.rst +%license LICENSE +%{_datarootdir}/ceph-ansible + +%changelog diff --git a/contrib/backport_to_stable_branch.sh b/contrib/backport_to_stable_branch.sh new file mode 100755 index 0000000..028c08c --- /dev/null +++ b/contrib/backport_to_stable_branch.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +set -e +shopt -s extglob # enable extended pattern matching features + + +############# +# VARIABLES # +############# + +stable_branch=$1 +commit=$2 +bkp_branch_name=$3 +bkp_branch_name_prefix=bkp +bkp_branch=$bkp_branch_name-$bkp_branch_name_prefix-$stable_branch + + +############# +# FUNCTIONS # +############# + +verify_commit () { + for com in ${commit//,/ }; do + if [[ $(git cat-file -t "$com" 2>/dev/null) != commit ]]; then + echo "$com does not exist in your tree" + echo "Run 'git fetch origin main && git pull origin main'" + exit 1 + fi + done +} + +git_status () { + if [[ $(git status --porcelain | wc -l) -gt 0 ]]; then + echo "It looks like you have not committed changes:" + echo "" + git status --short + echo "" + echo "" + echo "Press ENTER to continue or Ctrl+c to break." + read -r + fi +} + +checkout () { + git checkout --no-track -b "$bkp_branch" origin/"$stable_branch" +} + +cherry_pick () { + local x + for com in ${commit//,/ }; do + x="$x $com" + done + # Trim the first white space and use an array + # Reference: https://github.com/koalaman/shellcheck/wiki/SC2086#exceptions + x=(${x##*( )}) + git cherry-pick -x -s "${x[@]}" +} + +push () { + git push origin "$bkp_branch" +} + +create_pr () { + hub pull-request -h ceph/ceph-ansible:"$bkp_branch" -b "$stable_branch" -F - +} + +cleanup () { + echo "Moving back to previous branch" + git checkout - + git branch -D "$bkp_branch" +} + +test_args () { + if [ $# -lt 3 ]; then + echo "Please run the script like this: ./contrib/backport_to_stable_branch.sh STABLE_BRANCH_NAME COMMIT_SHA1 BACKPORT_BRANCH_NAME" + echo "We accept multiple commits as soon as they are commas-separated." + echo "e.g: ./contrib/backport_to_stable_branch.sh stable-2.2 6892670d317698771be7e96ce9032bc27d3fd1e5,8756c553cc8c213fc4996fc5202c7b687eb645a3 my-work" + exit 1 + fi +} + + +######## +# MAIN # +######## +test_args "$@" +git_status +verify_commit +checkout +cherry_pick +push +create_pr < /dev/null; then + git remote add "$1" git@github.com:/ceph/ansible-"$1".git + fi +} + +function pull_origin { + git pull origin main +} + +function reset_hard_origin { + # let's bring everything back to normal + git checkout "$LOCAL_BRANCH" + git fetch origin --prune + git fetch --tags + git reset --hard origin/main +} + +function check_git_status { + if [[ $(git status --porcelain | wc -l) -gt 0 ]]; then + echo "It looks like the following changes haven't been committed yet" + echo "" + git status --short + echo "" + echo "" + echo "Do you really want to continue?" + echo "Press ENTER to continue or CTRL C to break" + read -r + fi +} + +function compare_tags { + # compare local tags (from https://github.com/ceph/ceph-ansible/) with distant tags (from https://github.com/ceph/ansible-ceph-$ROLE) + local tag_local + local tag_remote + for tag_local in $(git tag | grep -oE '^v[2-9].[0-9]*.[0-9]*$' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n); do + tags_array+=("$tag_local") + done + for tag_remote in $(git ls-remote --tags "$1" | grep -oE 'v[2-9].[0-9]*.[0-9]*$' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n); do + remote_tags_array+=("$tag_remote") + done + + for i in "${tags_array[@]}"; do + skip= + for j in "${remote_tags_array[@]}"; do + [[ "$i" == "$j" ]] && { skip=1; break; } + done + [[ -n $skip ]] || tag_to_apply+=("$i") + done +} + +# MAIN +goto_basedir +check_git_status +trap reset_hard_origin EXIT +trap reset_hard_origin ERR +pull_origin + +for ROLE in $ROLES; do + # For readability we use 2 variables with the same content + # so we always make sure we 'push' to a remote and 'filter' a role + REMOTE=$ROLE + check_existing_remote "$REMOTE" + reset_hard_origin + # First we filter branches by rewriting main with the content of roles/$ROLE + # this gives us a new commit history + for BRANCH in $(git branch --list --remotes "origin/stable-*" "origin/main" "origin/ansible-1.9" | cut -d '/' -f2); do + git checkout -B "$BRANCH" origin/"$BRANCH" + # use || true to avoid exiting in case of 'Found nothing to rewrite' + git filter-branch -f --prune-empty --subdirectory-filter roles/"$ROLE" || true + git push -f "$REMOTE" "$BRANCH" + done + reset_hard_origin + # then we filter tags starting from version 2.0 and push them + compare_tags "$ROLE" + if [[ ${#tag_to_apply[@]} == 0 ]]; then + echo "No new tag to push." + continue + fi + for TAG in "${tag_to_apply[@]}"; do + # use || true to avoid exiting in case of 'Found nothing to rewrite' + git filter-branch -f --prune-empty --subdirectory-filter roles/"$ROLE" "$TAG" || true + git push -f "$REMOTE" "$TAG" + reset_hard_origin + done +done +trap - EXIT ERR +popd &> /dev/null diff --git a/contrib/rundep.sample b/contrib/rundep.sample new file mode 100644 index 0000000..380e6e5 --- /dev/null +++ b/contrib/rundep.sample @@ -0,0 +1,44 @@ +#Package lines can be commented out with '#' +# +#boost-atomic +#boost-chrono +#boost-date-time +#boost-iostreams +#boost-program +#boost-random +#boost-regex +#boost-system +#boost-thread +#bzip2-libs +#cyrus-sasl-lib +#expat +#fcgi +#fuse-libs +#glibc +#keyutils-libs +#leveldb +#libaio +#libatomic_ops +#libattr +#libblkid +#libcap +#libcom_err +#libcurl +#libgcc +#libicu +#libidn +#libnghttp2 +#libpsl +#libselinux +#libssh2 +#libstdc++ +#libunistring +#nss-softokn-freebl +#openldap +#openssl-libs +#pcre +#python-nose +#python-sphinx +#snappy +#systemd-libs +#zlib diff --git a/contrib/rundep_installer.sh b/contrib/rundep_installer.sh new file mode 100755 index 0000000..6da9162 --- /dev/null +++ b/contrib/rundep_installer.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Daniel Lin +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# + +if test -f /etc/redhat-release ; then + PACKAGE_INSTALLER=yum +elif type apt-get > /dev/null 2>&1 ; then + PACKAGE_INSTALLER=apt-get +else + echo "ERROR: Package Installer could not be determined" + exit 1 +fi + +while read p; do + if [[ $p =~ ^#.* ]] ; then + continue + fi + $PACKAGE_INSTALLER install $p -y +done < $1 diff --git a/contrib/snapshot_vms.sh b/contrib/snapshot_vms.sh new file mode 100644 index 0000000..d5d2b72 --- /dev/null +++ b/contrib/snapshot_vms.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +create_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + sudo virsh shutdown "${vm}" + wait_for_shutoff "${vm}" + sudo virsh snapshot-create "${vm}" + sudo virsh start "${vm}" + done +} + +delete_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + for snapshot in $(sudo virsh snapshot-list "${vm}" --name); do + echo "deleting snapshot ${snapshot} (vm: ${vm})" + sudo virsh snapshot-delete "${vm}" "${snapshot}" + done + done +} + +revert_snapshots() { + local pattern=$1 + for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do + echo "restoring last snapshot for ${vm}" + sudo virsh snapshot-revert "${vm}" --current + sudo virsh start "${vm}" + done +} + +wait_for_shutoff() { + local vm=$1 + local retries=60 + local delay=2 + + until test "${retries}" -eq 0 + do + echo "waiting for ${vm} to be shut off... #${retries}" + sleep "${delay}" + let "retries=$retries-1" + local current_state=$(sudo virsh domstate "${vm}") + test "${current_state}" == "shut off" && return + done + echo couldnt shutoff "${vm}" + exit 1 +} + +while :; do + case $1 in + -d|--delete) + delete_snapshots "$2" + exit + ;; + -i|--interactive) + INTERACTIVE=TRUE + ;; + -s|--snapshot) + create_snapshots "$2" + ;; + -r|--revert) + revert_snapshots "$2" + ;; + --) + shift + break + ;; + *) + break + esac + + shift +done diff --git a/contrib/vagrant_variables.yml.atomic b/contrib/vagrant_variables.yml.atomic new file mode 100644 index 0000000..3adff7a --- /dev/null +++ b/contrib/vagrant_variables.yml.atomic @@ -0,0 +1,30 @@ +--- +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.0 +cluster_subnet: 192.168.1 + +# MEMORY +memory: 1024 + +disks: [ '/dev/sda', '/dev/sdb' ] + +eth: 'enp0s8' +vagrant_box: centos/atomic-host +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +vagrant_sync_dir: /home/vagrant/sync + +skip_tags: 'with_pkg' diff --git a/contrib/vagrant_variables.yml.linode b/contrib/vagrant_variables.yml.linode new file mode 100644 index 0000000..e62a3bf --- /dev/null +++ b/contrib/vagrant_variables.yml.linode @@ -0,0 +1,36 @@ +--- + +vagrant_box: 'linode' +vagrant_box_url: 'https://github.com/displague/vagrant-linode/raw/master/box/linode.box' + +# Set a label prefix for the machines in this cluster. (This is useful and necessary when running multiple clusters concurrently.) +#label_prefix: 'foo' + +ssh_username: 'vagrant' +ssh_private_key_path: '~/.ssh/id_rsa' + +cloud_distribution: 'CentOS 7' +cloud_datacenter: 'newark' + +# Memory for each Linode instance, this determines price! See Linode plans. +memory: 2048 + +# The private network on Linode, you probably don't want to change this. +public_subnet: 192.168.0 +cluster_subnet: 192.168.0 + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 1 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 + +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +# vagrant_sync_dir: /home/vagrant/sync + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/contrib/vagrant_variables.yml.openstack b/contrib/vagrant_variables.yml.openstack new file mode 100644 index 0000000..420c09c --- /dev/null +++ b/contrib/vagrant_variables.yml.openstack @@ -0,0 +1,49 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 + +# SUBNET TO USE FOR THE VMS +# Use whatever private subnet your Openstack VMs are given +public_subnet: 172.17.72 +cluster_subnet: 172.17.72 + +# For Openstack VMs, the disk will depend on what you are allocated +disks: [ '/dev/vdb' ] + +# For Openstack VMs, the lan is usually eth0 +eth: 'eth0' + +# For Openstack VMs, choose the following box instead +vagrant_box: 'openstack' + +# When using Atomic Hosts (RHEL or CentOS), uncomment the line below to skip package installation +#skip_tags: 'with_pkg' + +# Set a label prefix for the machines in this cluster to differentiate +# between different concurrent clusters e.g. your OpenStack username +label_prefix: 'your-openstack-username' + +# For deploying on OpenStack VMs uncomment these vars and assign values. +# You can use env vars for the values if it makes sense. +#ssh_username : +#ssh_private_key_path : +#os_openstack_auth_url : +#os_username : +#os_password : +#os_tenant_name : +#os_region : +#os_flavor : +#os_image : +#os_keypair_name : +#os_networks : +#os_floating_ip_pool : diff --git a/dashboard.yml b/dashboard.yml new file mode 100644 index 0000000..e998e1e --- /dev/null +++ b/dashboard.yml @@ -0,0 +1,148 @@ +--- +- name: Deploy node_exporter + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + gather_facts: false + become: true + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + + - name: Set ceph node exporter install 'In Progress' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_node_exporter: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tags: ['ceph_update_config'] + + - name: Import ceph-container-engine + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + tasks_from: registry + when: + - not containerized_deployment | bool + - ceph_docker_registry_auth | bool + + - name: Import ceph-node-exporter role + ansible.builtin.import_role: + name: ceph-node-exporter + + post_tasks: + - name: Set ceph node exporter install 'Complete' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_node_exporter: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Deploy grafana and prometheus + hosts: "{{ monitoring_group_name | default('monitoring') }}" + gather_facts: false + become: true + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + + - name: Set ceph grafana install 'In Progress' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_grafana: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + # - ansible.builtin.import_role: + # name: ceph-facts + # tags: ['ceph_update_config'] + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: grafana + tags: ['ceph_update_config'] + + - name: Import ceph-prometheus role + ansible.builtin.import_role: + name: ceph-prometheus + + - name: Import ceph-grafana role + ansible.builtin.import_role: + name: ceph-grafana + + post_tasks: + - name: Set ceph grafana install 'Complete' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_grafana: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +# using groups[] here otherwise it can't fallback to the mon if there's no mgr group. +# adding an additional | default(omit) in case where no monitors are present (external ceph cluster) +- name: Deploy dashboard + hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}" + gather_facts: false + become: true + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + + - name: Set ceph dashboard install 'In Progress' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_dashboard: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + # - name: Import ceph-facts role + # ansible.builtin.import_role: + # name: ceph-facts + # tags: ['ceph_update_config'] + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: grafana + tags: ['ceph_update_config'] + + - name: Import ceph-dashboard role + ansible.builtin.import_role: + name: ceph-dashboard + + post_tasks: + - name: Set ceph dashboard install 'Complete' + run_once: true + ansible.builtin.set_stats: + data: + installer_phase_ceph_dashboard: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..378eac2 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +build diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..4d0989c --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = ceph-ansible +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/source/_static/.empty b/docs/source/_static/.empty new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/_templates/.empty b/docs/source/_templates/.empty new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..b5d9c69 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# +# ceph-ansible documentation build configuration file, created by +# sphinx-quickstart on Wed Apr 5 11:55:38 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The root toctree document. +root_doc = 'glossary' + +# General information about the project. +project = u'ceph-ansible' +copyright = u'2017-2018, Ceph team and individual contributors' +author = u'Ceph team and individual contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'' +# The full version, including alpha/beta/rc tags. +release = u'' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ceph-ansibledoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (root_doc, 'ceph-ansible.tex', u'ceph-ansible Documentation', + u'Ceph team and individual contributors', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (root_doc, 'ceph-ansible', u'ceph-ansible Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (root_doc, 'ceph-ansible', u'ceph-ansible Documentation', + author, 'ceph-ansible', 'One line description of project.', + 'Miscellaneous'), +] + +master_doc = 'index' \ No newline at end of file diff --git a/docs/source/day-2/osds.rst b/docs/source/day-2/osds.rst new file mode 100644 index 0000000..fcc3ba8 --- /dev/null +++ b/docs/source/day-2/osds.rst @@ -0,0 +1,51 @@ +Adding/Removing OSD(s) after a cluster is deployed is a common operation that should be straight-forward to achieve. + + +Adding osd(s) +------------- + +Adding new OSD(s) on an existing host or adding a new OSD node can be achieved by running the main playbook with the ``--limit`` ansible option. +You basically need to update your host_vars/group_vars with the new hardware and/or the inventory host file with the new osd nodes being added. + +The command used would be like following: + +``ansible-playbook -vv -i site-container.yml --limit `` + +example: + +.. code-block:: shell + + $ cat hosts + [mons] + mon-node-1 + mon-node-2 + mon-node-3 + + [mgrs] + mon-node-1 + mon-node-2 + mon-node-3 + + [osds] + osd-node-1 + osd-node-2 + osd-node-3 + osd-node-99 + + $ ansible-playbook -vv -i hosts site-container.yml --limit osd-node-99 + + +Shrinking osd(s) +---------------- + +Shrinking OSDs can be done by using the shrink-osd.yml playbook provided in ``infrastructure-playbooks`` directory. + +The variable ``osd_to_kill`` is a comma separated list of OSD IDs which must be passed to the playbook (passing it as an extra var is the easiest way). + +The playbook will shrink all osds passed in ``osd_to_kill`` serially. + +example: + +.. code-block:: shell + + $ ansible-playbook -vv -i hosts infrastructure-playbooks/shrink-osd.yml -e osd_to_kill=1,2,3 diff --git a/docs/source/day-2/purge.rst b/docs/source/day-2/purge.rst new file mode 100644 index 0000000..e471733 --- /dev/null +++ b/docs/source/day-2/purge.rst @@ -0,0 +1,15 @@ +Purging the cluster +------------------- + +ceph-ansible provides two playbooks in ``infrastructure-playbooks`` for purging a Ceph cluster: ``purge-cluster.yml`` and ``purge-container-cluster.yml``. + +The names are pretty self-explanatory, ``purge-cluster.yml`` is intended to purge a non-containerized cluster whereas ``purge-container-cluster.yml`` is to purge a containerized cluster. + +example: + +.. code-block:: shell + + $ ansible-playbook -vv -i hosts infrastructure-playbooks/purge-container-cluster.yml + +.. note:: + These playbooks aren't intended to be run with the ``--limit`` option. \ No newline at end of file diff --git a/docs/source/day-2/upgrade.rst b/docs/source/day-2/upgrade.rst new file mode 100644 index 0000000..1ec658c --- /dev/null +++ b/docs/source/day-2/upgrade.rst @@ -0,0 +1,17 @@ +Upgrading the ceph cluster +-------------------------- + +ceph-ansible provides a playbook in ``infrastructure-playbooks`` for upgrading a Ceph cluster: ``rolling_update.yml``. + +This playbook could be used for both minor upgrades (X.Y to X.Z) or major upgrades (X to Y). + +Before running a major upgrade you need to update the ceph-ansible version first. + +example: + +.. code-block:: shell + + $ ansible-playbook -vv -i hosts infrastructure-playbooks/rolling_update.yml + +.. note:: + This playbook isn't intended to be run with the ``--limit`` ansible option. diff --git a/docs/source/dev/index.rst b/docs/source/dev/index.rst new file mode 100644 index 0000000..1559f77 --- /dev/null +++ b/docs/source/dev/index.rst @@ -0,0 +1,102 @@ +Contribution Guidelines +======================= + +The repository centralises all the Ansible roles. The roles are all part of the Ansible Galaxy. + +We love contribution and we love giving visibility to our contributors, this is why all the **commits must be signed-off**. + +Mailing list +------------ + +Please register the mailing list at http://lists.ceph.com/listinfo.cgi/ceph-ansible-ceph.com. + +IRC +--- + +Feel free to join us in the channel ``#ceph-ansible`` of the OFTC servers (https://www.oftc.net). + +GitHub +------ + +The main GitHub account for the project is at https://github.com/ceph/ceph-ansible/. + +Submit a patch +-------------- + +To start contributing just do: + +.. code-block:: console + + $ git checkout -b my-working-branch + $ # do your changes # + $ git add -p + +If your change impacts a variable file in a role such as ``roles/ceph-common/defaults/main.yml``, you need to generate a ``group_vars`` file: + +.. code-block:: console + + $ ./generate_group_vars_sample.sh + +You are finally ready to push your changes on GitHub: + +.. code-block:: console + + $ git commit -s + $ git push origin my-working-branch + +Worked on a change and you don't want to resend a commit for a syntax fix? + +.. code-block:: console + + $ # do your syntax change # + $ git commit --amend + $ git push -f origin my-working-branch + +Pull Request Testing +-------------------- + +Pull request testing is handled by Jenkins. All test must pass before your pull request will be merged. + +All of tests that are running are listed in the GitHub UI and will list their current status. + +If a test fails and you'd like to rerun it, comment on your pull request in the following format: + +.. code-block:: none + + jenkins test $scenario_name + +For example: + +.. code-block:: none + + jenkins test centos-non_container-all_daemons + +Backporting changes +------------------- + +If a change should be backported to a ``stable-*`` Git branch: + +- Mark your pull request with the GitHub label "Backport" so we don't lose track of it. +- Fetch the latest updates into your clone: ``git fetch`` +- Determine the latest available stable branch: + ``git branch -r --list "origin/stable-[0-9].[0-9]" | sort -r | sed 1q`` +- Create a new local branch for your pull request, based on the stable branch: + ``git checkout --no-track -b my-backported-change origin/stable-5.0`` +- Cherry-pick your change: ``git cherry-pick -x (your-sha1)`` +- Create a new pull request against the ``stable-5.0`` branch. +- Ensure that your pull request's title has the prefix "backport:", so it's clear + to reviewers what this is about. +- Add a comment in your backport pull request linking to the original (main) pull request. + +All changes to the stable branches should land in main first, so we avoid +regressions. + +Once this is done, one of the project maintainers will tag the tip of the +stable branch with your change. For example: + +.. code-block:: console + + $ git checkout stable-5.0 + $ git pull --ff-only + $ git tag v5.0.12 + $ git push origin v5.0.12 diff --git a/docs/source/glossary.rst b/docs/source/glossary.rst new file mode 100644 index 0000000..08c2a55 --- /dev/null +++ b/docs/source/glossary.rst @@ -0,0 +1,9 @@ +Glossary +======== + +.. toctree:: + :maxdepth: 3 + :caption: Contents: + + index + testing/glossary diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..8c88d5b --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,339 @@ +============ +ceph-ansible +============ + +Ansible playbooks for Ceph, the distributed filesystem. + + +Installation +============ + +GitHub +------ + +You can install directly from the source on GitHub by following these steps: + +- Clone the repository: + + .. code-block:: console + + $ git clone https://github.com/ceph/ceph-ansible.git + +- Next, you must decide which branch of ``ceph-ansible`` you wish to use. There + are stable branches to choose from or you could use the main branch: + + .. code-block:: console + + $ git checkout $branch + +- Next, use pip and the provided requirements.txt to install Ansible and other + needed Python libraries: + + .. code-block:: console + + $ pip install -r requirements.txt + +.. _ansible-on-rhel-family: + +Ansible on RHEL and CentOS +-------------------------- + +You can acquire Ansible on RHEL and CentOS by installing from `Ansible channel `_. + +On RHEL: + +.. code-block:: console + + $ subscription-manager repos --enable=rhel-7-server-ansible-2-rpms + +(CentOS does not use subscription-manager and already has "Extras" enabled by default.) + +.. code-block:: console + + $ sudo yum install ansible + +Ansible on Ubuntu +----------------- + +You can acquire Ansible on Ubuntu by using the `Ansible PPA `_. + +.. code-block:: console + + $ sudo add-apt-repository ppa:ansible/ansible + $ sudo apt update + $ sudo apt install ansible + +Ansible collections +------------------- + +In order to install third-party collections that are required for ceph-ansible, +please run: + +.. code-block:: console + + $ ansible-galaxy install -r requirements.yml + + +Releases +======== + +The following branches should be used depending on your requirements. The ``stable-*`` +branches have been QE tested and sometimes receive backport fixes throughout their lifecycle. +The ``main`` branch should be considered experimental and used with caution. + +- ``stable-3.0`` Supports Ceph versions ``jewel`` and ``luminous``. This branch requires Ansible version ``2.4``. + +- ``stable-3.1`` Supports Ceph versions ``luminous`` and ``mimic``. This branch requires Ansible version ``2.4``. + +- ``stable-3.2`` Supports Ceph versions ``luminous`` and ``mimic``. This branch requires Ansible version ``2.6``. + +- ``stable-4.0`` Supports Ceph version ``nautilus``. This branch requires Ansible version ``2.9``. + +- ``stable-5.0`` Supports Ceph version ``octopus``. This branch requires Ansible version ``2.9``. + +- ``stable-6.0`` Supports Ceph version ``pacific``. This branch requires Ansible version ``2.10``. + +- ``stable-7.0`` Supports Ceph version ``quincy``. This branch requires Ansible version ``2.15``. + +- ``main`` Supports the main (devel) branch of Ceph. This branch requires Ansible version ``2.15`` or ``2.16``. + +.. NOTE:: ``stable-3.0`` and ``stable-3.1`` branches of ceph-ansible are deprecated and no longer maintained. + +Configuration and Usage +======================= + +This project assumes you have a basic knowledge of how Ansible works and have already prepared your hosts for +configuration by Ansible. + +After you've cloned the ``ceph-ansible`` repository, selected your branch and installed Ansible then you'll need to create +your inventory file, playbook and configuration for your Ceph cluster. + +Inventory +--------- + +The Ansible inventory file defines the hosts in your cluster and what roles each host plays in your Ceph cluster. The default +location for an inventory file is ``/etc/ansible/hosts`` but this file can be placed anywhere and used with the ``-i`` flag of +``ansible-playbook``. + +An example inventory file would look like: + +.. code-block:: ini + + [mons] + mon1 + mon2 + mon3 + + [osds] + osd1 + osd2 + osd3 + +.. note:: + + For more information on Ansible inventories please refer to the Ansible documentation: http://docs.ansible.com/ansible/latest/intro_inventory.html + +Playbook +-------- + +You must have a playbook to pass to the ``ansible-playbook`` command when deploying your cluster. There is a sample playbook at the root of the ``ceph-ansible`` +project called ``site.yml.sample``. This playbook should work fine for most usages, but it does include by default every daemon group which might not be +appropriate for your cluster setup. Perform the following steps to prepare your playbook: + +- Rename the sample playbook: ``mv site.yml.sample site.yml`` + +- Modify the playbook as necessary for the requirements of your cluster + +.. note:: + + It's important the playbook you use is placed at the root of the ``ceph-ansible`` project. This is how Ansible will be able to find the roles that + ``ceph-ansible`` provides. + +Configuration Validation +------------------------ + +The ``ceph-ansible`` project provides config validation through the ``ceph-validate`` role. If you are using one of the provided playbooks this role will +be run early in the deployment as to ensure you've given ``ceph-ansible`` the correct config. This check is only making sure that you've provided the +proper config settings for your cluster, not that the values in them will produce a healthy cluster. For example, if you give an incorrect address for +``monitor_address`` then the mon will still fail to join the cluster. + +An example of a validation failure might look like: + +.. code-block:: console + + TASK [ceph-validate : validate provided configuration] ************************* + task path: /Users/andrewschoen/dev/ceph-ansible/roles/ceph-validate/tasks/main.yml:3 + Wednesday 02 May 2018 13:48:16 -0500 (0:00:06.984) 0:00:18.803 ********* + [ERROR]: [mon0] Validation failed for variable: osd_objectstore + + [ERROR]: [mon0] Given value for osd_objectstore: foo + + [ERROR]: [mon0] Reason: osd_objectstore must be either 'bluestore' or 'filestore' + + fatal: [mon0]: FAILED! => { + "changed": false + } + +Supported Validation +^^^^^^^^^^^^^^^^^^^^ + +The ``ceph-validate`` role currently supports validation of the proper config for the following +osd scenarios: + +- ``collocated`` +- ``non-collocated`` +- ``lvm`` + +The following install options are also validated by the ``ceph-validate`` role: + +- ``ceph_origin`` set to ``distro`` +- ``ceph_origin`` set to ``repository`` +- ``ceph_origin`` set to ``local`` +- ``ceph_repository`` set to ``dev`` +- ``ceph_repository`` set to ``community`` + + +Installation methods +-------------------- + +Ceph can be installed through several methods. + +.. toctree:: + :maxdepth: 1 + + installation/methods + +Configuration +------------- + +The configuration for your Ceph cluster will be set by the use of ansible variables that ``ceph-ansible`` provides. All of these options and their default +values are defined in the ``group_vars/`` directory at the root of the ``ceph-ansible`` project. Ansible will use configuration in a ``group_vars/`` directory +that is relative to your inventory file or your playbook. Inside of the ``group_vars/`` directory there are many sample Ansible configuration files that relate +to each of the Ceph daemon groups by their filename. For example, the ``osds.yml.sample`` contains all the default configuration for the OSD daemons. The ``all.yml.sample`` +file is a special ``group_vars`` file that applies to all hosts in your cluster. + +.. note:: + + For more information on setting group or host specific configuration refer to the Ansible documentation: http://docs.ansible.com/ansible/latest/intro_inventory.html#splitting-out-host-and-group-specific-data + +At the most basic level you must tell ``ceph-ansible`` what version of Ceph you wish to install, the method of installation, your clusters network settings and +how you want your OSDs configured. To begin your configuration rename each file in ``group_vars/`` you wish to use so that it does not include the ``.sample`` +at the end of the filename, uncomment the options you wish to change and provide your own value. + +An example configuration that deploys the upstream ``octopus`` version of Ceph with lvm batch method would look like this in ``group_vars/all.yml``: + +.. code-block:: yaml + + ceph_origin: repository + ceph_repository: community + public_network: "192.168.3.0/24" + cluster_network: "192.168.4.0/24" + devices: + - '/dev/sda' + - '/dev/sdb' + +The following config options are required to be changed on all installations but there could be other required options depending on your OSD scenario +selection or other aspects of your cluster. + +- ``ceph_origin`` +- ``public_network`` + + +When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option. + +``ceph.conf`` Configuration File +--------------------------------- + +The supported method for defining your ``ceph.conf`` is to use the ``ceph_conf_overrides`` variable. This allows you to specify configuration options using +an INI format. This variable can be used to override sections already defined in ``ceph.conf`` (see: ``roles/ceph-config/templates/ceph.conf.j2``) or to provide +new configuration options. + +The following sections in ``ceph.conf`` are supported: + +* ``[global]`` +* ``[mon]`` +* ``[osd]`` +* ``[mds]`` +* ``[client.rgw.{instance_name}]`` + +An example: + +.. code-block:: yaml + + ceph_conf_overrides: + global: + foo: 1234 + bar: 5678 + osd: + osd_mkfs_type: ext4 + +.. note:: + + We will no longer accept pull requests that modify the ``ceph.conf`` template unless it helps the deployment. For simple configuration tweaks + please use the ``ceph_conf_overrides`` variable. + +Full documentation for configuring each of the Ceph daemon types are in the following sections. + +OSD Configuration +----------------- + +OSD configuration was used to be set by selecting an OSD scenario and providing the configuration needed for +that scenario. As of nautilus in stable-4.0, the only scenarios available is ``lvm``. + +.. toctree:: + :maxdepth: 1 + + osds/scenarios + +Day-2 Operations +---------------- + +ceph-ansible provides a set of playbook in ``infrastructure-playbooks`` directory in order to perform some basic day-2 operations. + +.. toctree:: + :maxdepth: 1 + + day-2/osds + day-2/purge + day-2/upgrade + +RBD Mirroring +------------- + +Ceph-ansible provides the role ``ceph-rbd-mirror`` that can setup an RBD mirror replication. + +.. toctree:: + :maxdepth: 1 + + rbdmirror/index + +Contribution +============ + +See the following section for guidelines on how to contribute to ``ceph-ansible``. + +.. toctree:: + :maxdepth: 1 + + dev/index + +Testing +======= + +Documentation for writing functional testing scenarios for ``ceph-ansible``. + +* :doc:`Testing with ceph-ansible ` +* :doc:`Glossary ` + +Demos +===== + +Vagrant Demo +------------ + +Deployment from scratch on vagrant machines: https://youtu.be/E8-96NamLDo + +Bare metal demo +--------------- + +Deployment from scratch on bare metal machines: https://youtu.be/dv_PEp9qAqg diff --git a/docs/source/installation/containerized.rst b/docs/source/installation/containerized.rst new file mode 100644 index 0000000..e71bd98 --- /dev/null +++ b/docs/source/installation/containerized.rst @@ -0,0 +1,64 @@ +Containerized deployment +======================== + +Ceph-ansible supports docker and podman only in order to deploy Ceph in a containerized context. + +Configuration and Usage +----------------------- + +To deploy ceph in containers, you will need to set the ``containerized_deployment`` variable to ``true`` and use the site-container.yml.sample playbook. + +.. code-block:: yaml + + containerized_deployment: true + +The ``ceph_origin`` and ``ceph_repository`` variables aren't needed anymore in containerized deployment and are ignored. + +.. code-block:: console + + $ ansible-playbook site-container.yml.sample + +.. note:: + + The infrastructure playbooks are working for both non containerized and containerized deployment. + +Custom container image +---------------------- + +You can configure your own container register, image and tag by using the ``ceph_docker_registry``, ``ceph_docker_image`` and ``ceph_docker_image_tag`` variables. + +.. code-block:: yaml + + ceph_docker_registry: quay.io + ceph_docker_image: ceph/ceph + ceph_docker_image_tag: v19 + +.. note:: + + ``ceph_docker_image`` should have both image namespace and image name concatenated and separated by a slash character. + + ``ceph_docker_image_tag`` should be set to a fixed tag, not to any "latest" tags unless you know what you are doing. Using a "latest" tag + might make the playbook restart all the daemons deployed in your cluster since these tags are intended to be updated periodically. + +Container registry authentication +--------------------------------- + +When using a container registry with authentication then you need to set the ``ceph_docker_registry_auth`` variable to ``true`` and provide the credentials via the +``ceph_docker_registry_username`` and ``ceph_docker_registry_password`` variables + +.. code-block:: yaml + + ceph_docker_registry_auth: true + ceph_docker_registry_username: foo + ceph_docker_registry_password: bar + +Container registry behind a proxy +--------------------------------- + +When using a container registry reachable via a http(s) proxy then you need to set the ``ceph_docker_http_proxy`` and/or ``ceph_docker_https_proxy`` variables. If you need +to exclude some host for the proxy configuration to can use the ``ceph_docker_no_proxy`` variable. + +.. code-block:: yaml + + ceph_docker_http_proxy: http://192.168.42.100:8080 + ceph_docker_https_proxy: https://192.168.42.100:8080 \ No newline at end of file diff --git a/docs/source/installation/methods.rst b/docs/source/installation/methods.rst new file mode 100644 index 0000000..2c30943 --- /dev/null +++ b/docs/source/installation/methods.rst @@ -0,0 +1,12 @@ +Installation methods +==================== + +ceph-ansible can deploy Ceph either in a non-containerized context (via packages) or in a containerized context using ceph-container images. + +.. toctree:: + :maxdepth: 1 + + non-containerized + containerized + +The difference here is that you don't have the rbd command on the host when using the containerized deployment so everything related to ceph needs to be executed within a container. So in the case there is software like e.g. Open Nebula which requires that the rbd command is accessible directly on the host (non-containerized) then you have to install the rbd command by yourself on those servers outside of containers (or make sure that this software somehow runs within containers as well and that it can access rbd). diff --git a/docs/source/installation/non-containerized.rst b/docs/source/installation/non-containerized.rst new file mode 100644 index 0000000..d869214 --- /dev/null +++ b/docs/source/installation/non-containerized.rst @@ -0,0 +1,58 @@ +Non containerized deployment +============================ + +The following are all of the available options for the installing Ceph through different channels. + +We support 3 main installation methods, all managed by the ``ceph_origin`` variable: + +- ``repository``: means that you will get Ceph installed through a new repository. Later below choose between ``community`` or ``dev``. These options will be exposed through the ``ceph_repository`` variable. +- ``distro``: means that no separate repo file will be added and you will get whatever version of Ceph is included in your Linux distro. +- ``local``: means that the Ceph binaries will be copied over from the local machine (not well tested, use at your own risk) + +Origin: Repository +------------------ + +If ``ceph_origin`` is set to ``repository``, you now have the choice between a couple of repositories controlled by the ``ceph_repository`` option: + +- ``community``: fetches packages from http://download.ceph.com, the official community Ceph repositories +- ``dev``: fetches packages from shaman, a gitbuilder based package system +- ``uca``: fetches packages from Ubuntu Cloud Archive +- ``custom``: fetches packages from a specific repository + +Community repository +~~~~~~~~~~~~~~~~~~~~ + +If ``ceph_repository`` is set to ``community``, packages you will be by default installed from http://download.ceph.com, this can be changed by tweaking ``ceph_mirror``. + + +UCA repository +~~~~~~~~~~~~~~ + +If ``ceph_repository`` is set to ``uca``, packages you will be by default installed from http://ubuntu-cloud.archive.canonical.com/ubuntu, this can be changed by tweaking ``ceph_stable_repo_uca``. +You can also decide which OpenStack version the Ceph packages should come from by tweaking ``ceph_stable_openstack_release_uca``. +For example, ``ceph_stable_openstack_release_uca: queens``. + +Dev repository +~~~~~~~~~~~~~~ + +If ``ceph_repository`` is set to ``dev``, packages you will be by default installed from https://shaman.ceph.com/, this can not be tweaked. +You can obviously decide which branch to install with the help of ``ceph_dev_branch`` (defaults to 'main'). +Additionally, you can specify a SHA1 with ``ceph_dev_sha1``, defaults to 'latest' (as in latest built). + +Custom repository +~~~~~~~~~~~~~~~~~ + +If ``ceph_repository`` is set to ``custom``, packages you will be by default installed from a desired repository. +This repository is specified with ``ceph_custom_repo``, e.g: ``ceph_custom_repo: https://server.domain.com/ceph-custom-repo``. + + +Origin: Distro +-------------- + +If ``ceph_origin`` is set to ``distro``, no separate repo file will be added and you will get whatever version of Ceph is included in your Linux distro. + + +Origin: Local +------------- + +If ``ceph_origin`` is set to ``local``, the ceph binaries will be copied over from the local machine (not well tested, use at your own risk) diff --git a/docs/source/osds/scenarios.rst b/docs/source/osds/scenarios.rst new file mode 100644 index 0000000..9b8d873 --- /dev/null +++ b/docs/source/osds/scenarios.rst @@ -0,0 +1,221 @@ +OSD Scenario +============ + +As of stable-4.0, the following scenarios are not supported anymore since they are associated to ``ceph-disk``: + +* `collocated` +* `non-collocated` + +Since the Ceph luminous release, it is preferred to use the :ref:`lvm scenario +` that uses the ``ceph-volume`` provisioning tool. Any other +scenario will cause deprecation warnings. + +``ceph-disk`` was deprecated during the ceph-ansible 3.2 cycle and has been removed entirely from Ceph itself in the Nautilus version. +At present (starting from stable-4.0), there is only one scenario, which defaults to ``lvm``, see: + +* :ref:`lvm ` + +So there is no need to configure ``osd_scenario`` anymore, it defaults to ``lvm``. + +The ``lvm`` scenario mentioned above support both containerized and non-containerized cluster. +As a reminder, deploying a containerized cluster can be done by setting ``containerized_deployment`` +to ``True``. + +If you want to skip OSD creation during a ``ceph-ansible run`` +(e.g. because you have already provisioned your OSDs but disk IDs have +changed), you can skip the ``prepare_osd`` tag i.e. by specifying +``--skip-tags prepare_osd`` on the ``ansible-playbook`` command line. + +.. _osd_scenario_lvm: + +lvm +--- + +This OSD scenario uses ``ceph-volume`` to create OSDs, primarily using LVM, and +is only available when the Ceph release is luminous or newer. +It is automatically enabled. + +Other (optional) supported settings: + +- ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``. + Defaults to ``false`` if unset. + +- ``osds_per_device``: Provision more than 1 OSD (the default if unset) per device. + + +Simple configuration +^^^^^^^^^^^^^^^^^^^^ + +With this approach, most of the decisions on how devices are configured to +provision an OSD are made by the Ceph tooling (``ceph-volume lvm batch`` in +this case). There is almost no room to modify how the OSD is composed given an +input of devices. + +To use this configuration, the ``devices`` option must be populated with the +raw device paths that will be used to provision the OSDs. + + +.. note:: Raw devices must be "clean", without a gpt partition table, or + logical volumes present. + + +For example, for a node that has ``/dev/sda`` and ``/dev/sdb`` intended for +Ceph usage, the configuration would be: + + +.. code-block:: yaml + + devices: + - /dev/sda + - /dev/sdb + +In the above case, if both devices are spinning drives, 2 OSDs would be +created, each with its own collocated journal. + +Other provisioning strategies are possible, by mixing spinning and solid state +devices, for example: + +.. code-block:: yaml + + devices: + - /dev/sda + - /dev/sdb + - /dev/nvme0n1 + +Similar to the initial example, this would end up producing 2 OSDs, but data +would be placed on the slower spinning drives (``/dev/sda``, and ``/dev/sdb``) +and journals would be placed on the faster solid state device ``/dev/nvme0n1``. +The ``ceph-volume`` tool describes this in detail in +`the "batch" subcommand section `_ + +This option can also be used with ``osd_auto_discovery``, meaning that you do not need to populate +``devices`` directly and any appropriate devices found by ansible will be used instead. + +.. code-block:: yaml + + osd_auto_discovery: true + +Other (optional) supported settings: + +- ``crush_device_class``: Sets the CRUSH device class for all OSDs created with this + method (it is not possible to have a per-OSD CRUSH device class using the *simple* + configuration approach). Values *must be* a string, like + ``crush_device_class: "ssd"`` + + +Advanced configuration +^^^^^^^^^^^^^^^^^^^^^^ + +This configuration is useful when more granular control is wanted when setting +up devices and how they should be arranged to provision an OSD. It requires an +existing setup of volume groups and logical volumes (``ceph-volume`` will **not** +create these). + +To use this configuration, the ``lvm_volumes`` option must be populated with +logical volumes and volume groups. Additionally, absolute paths to partitions +*can* be used for ``journal``, ``block.db``, and ``block.wal``. + +.. note:: This configuration uses ``ceph-volume lvm create`` to provision OSDs + +Supported ``lvm_volumes`` configuration settings: + +- ``data``: The logical volume name or full path to a raw device (an LV will be + created using 100% of the raw device) + +- ``data_vg``: The volume group name, **required** if ``data`` is a logical volume. + +- ``crush_device_class``: CRUSH device class name for the resulting OSD, allows + setting set the device class for each OSD, unlike the global ``crush_device_class`` + that sets them for all OSDs. + +.. note:: If you wish to set the ``crush_device_class`` for the OSDs + when using ``devices`` you must set it using the global ``crush_device_class`` + option as shown above. There is no way to define a specific CRUSH device class + per OSD when using ``devices`` like there is for ``lvm_volumes``. + + +.. warning:: Each entry must be unique, duplicate values are not allowed + + +``bluestore`` objectstore variables: + +- ``db``: The logical volume name or full path to a partition. + +- ``db_vg``: The volume group name, **required** if ``db`` is a logical volume. + +- ``wal``: The logical volume name or full path to a partition. + +- ``wal_vg``: The volume group name, **required** if ``wal`` is a logical volume. + + +.. note:: These ``bluestore`` variables are optional optimizations. Bluestore's + ``db`` and ``wal`` will only benefit from faster devices. It is possible to + create a bluestore OSD with a single raw device. + +.. warning:: Each entry must be unique, duplicate values are not allowed + + +``bluestore`` example using raw devices: + +.. code-block:: yaml + + osd_objectstore: bluestore + lvm_volumes: + - data: /dev/sda + - data: /dev/sdb + +.. note:: Volume groups and logical volumes will be created in this case, + utilizing 100% of the devices. + +``bluestore`` example with logical volumes: + +.. code-block:: yaml + + osd_objectstore: bluestore + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + - data: data-lv2 + data_vg: data-vg2 + +.. note:: Volume groups and logical volumes must exist. + + +``bluestore`` example defining ``wal`` and ``db`` logical volumes: + +.. code-block:: yaml + + osd_objectstore: bluestore + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + db: db-lv1 + db_vg: db-vg1 + wal: wal-lv1 + wal_vg: wal-vg1 + - data: data-lv2 + data_vg: data-vg2 + db: db-lv2 + db_vg: db-vg2 + wal: wal-lv2 + wal_vg: wal-vg2 + +.. note:: Volume groups and logical volumes must exist. + + +``filestore`` example with logical volumes: + +.. code-block:: yaml + + osd_objectstore: filestore + lvm_volumes: + - data: data-lv1 + data_vg: data-vg1 + journal: journal-lv1 + journal_vg: journal-vg1 + - data: data-lv2 + data_vg: data-vg2 + journal: journal-lv2 + journal_vg: journal-vg2 + +.. note:: Volume groups and logical volumes must exist. diff --git a/docs/source/rbdmirror/index.rst b/docs/source/rbdmirror/index.rst new file mode 100644 index 0000000..e0bad8c --- /dev/null +++ b/docs/source/rbdmirror/index.rst @@ -0,0 +1,60 @@ +RBD Mirroring +============= + +There's not so much to do from the primary cluster side in order to setup an RBD mirror replication. +``ceph_rbd_mirror_configure`` has to be set to ``true`` to make ceph-ansible create the mirrored pool +defined in ``ceph_rbd_mirror_pool`` and the keyring that is going to be used to add the rbd mirror peer. + +group_vars from the primary cluster: + +.. code-block:: yaml + + ceph_rbd_mirror_configure: true + ceph_rbd_mirror_pool: rbd + +Optionnally, you can tell ceph-ansible to set the name and the secret of the keyring you want to create: + +.. code-block:: yaml + + ceph_rbd_mirror_local_user: client.rbd-mirror-peer # 'client.rbd-mirror-peer' is the default value. + ceph_rbd_mirror_local_user_secret: AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== + +This secret will be needed to add the rbd mirror peer from the secondary cluster. +If you do not enforce it as shown above, you can get it from a monitor by running the following command: +``ceph auth get {{ ceph_rbd_mirror_local_user }}`` + + +.. code-block:: shell + + $ sudo ceph auth get client.rbd-mirror-peer + +Once your variables are defined, you can run the playbook (you might want to run with --limit option): + +.. code-block:: shell + + $ ansible-playbook -vv -i hosts site-container.yml --limit rbdmirror0 + + +The configuration of the rbd mirror replication strictly speaking is done on the secondary cluster. +The rbd-mirror daemon pulls the data from the primary cluster. This is where the rbd mirror peer addition has to be done. +The configuration is similar with what was done on the primary cluster, it just needs few additional variables. + +``ceph_rbd_mirror_remote_user`` : This user must match the name defined in the variable ``ceph_rbd_mirror_local_user`` from the primary cluster. +``ceph_rbd_mirror_remote_mon_hosts`` : This must a comma separated list of the monitor addresses from the primary cluster. +``ceph_rbd_mirror_remote_key`` : This must be the same value as the user (``{{ ceph_rbd_mirror_local_user }}``) keyring secret from the primary cluster. + +group_vars from the secondary cluster: + +.. code-block:: yaml + + ceph_rbd_mirror_configure: true + ceph_rbd_mirror_pool: rbd + ceph_rbd_mirror_remote_user: client.rbd-mirror-peer # This must match the value defined in {{ ceph_rbd_mirror_local_user }} on primary cluster. + ceph_rbd_mirror_remote_mon_hosts: 1.2.3.4 + ceph_rbd_mirror_remote_key: AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== # This must match the secret of the registered keyring of the user defined in {{ ceph_rbd_mirror_local_user }} on primary cluster. + +Once you variables are defined, you can run the playbook (you might want to run with --limit option): + +.. code-block:: shell + + $ ansible-playbook -vv -i hosts site-container.yml --limit rbdmirror0 \ No newline at end of file diff --git a/docs/source/testing/development.rst b/docs/source/testing/development.rst new file mode 100644 index 0000000..d76a655 --- /dev/null +++ b/docs/source/testing/development.rst @@ -0,0 +1,4 @@ +.. _development: + +ceph-ansible testing for development +==================================== diff --git a/docs/source/testing/glossary.rst b/docs/source/testing/glossary.rst new file mode 100644 index 0000000..94a55ff --- /dev/null +++ b/docs/source/testing/glossary.rst @@ -0,0 +1,14 @@ +Glossary +======== + +.. toctree:: + :maxdepth: 1 + + index + running.rst + development.rst + scenarios.rst + modifying.rst + layout.rst + tests.rst + tox.rst diff --git a/docs/source/testing/index.rst b/docs/source/testing/index.rst new file mode 100644 index 0000000..b0064d7 --- /dev/null +++ b/docs/source/testing/index.rst @@ -0,0 +1,38 @@ +.. _testing: + +Testing +======= + +``ceph-ansible`` has the ability to test different scenarios (collocated journals +or dmcrypt OSDs for example) in an isolated, repeatable, and easy way. + +These tests can run locally with VirtualBox or via libvirt if available, which +removes the need to solely rely on a CI system like Jenkins to verify +a behavior. + +* **Getting started:** + + * :doc:`Running a Test Scenario ` + * :ref:`dependencies` + +* **Configuration and structure:** + + * :ref:`layout` + * :ref:`test_files` + * :ref:`scenario_files` + * :ref:`scenario_wiring` + +* **Adding or modifying tests:** + + * :ref:`test_conventions` + * :ref:`testinfra` + +* **Adding or modifying a scenario:** + + * :ref:`scenario_conventions` + * :ref:`scenario_environment_configuration` + * :ref:`scenario_ansible_configuration` + +* **Custom/development repositories and packages:** + + * :ref:`tox_environment_variables` diff --git a/docs/source/testing/layout.rst b/docs/source/testing/layout.rst new file mode 100644 index 0000000..26e74cf --- /dev/null +++ b/docs/source/testing/layout.rst @@ -0,0 +1,60 @@ +.. _layout: + +Layout and conventions +---------------------- + +Test files and directories follow a few conventions, which makes it easy to +create (or expect) certain interactions between tests and scenarios. + +All tests are in the ``tests`` directory. Scenarios are defined in +``tests/functional/`` and use the following convention for directory +structure: + +.. code-block:: none + + tests/functional//// + +For example: ``tests/functional/centos/7/journal-collocation`` + +Within a test scenario there are a few files that define what that specific +scenario needs for the tests, like how many OSD nodes or MON nodes. Tls + +At the very least, a scenario will need these files: + +* ``Vagrantfile``: must be symlinked from the root directory of the project +* ``hosts``: An Ansible hosts file that defines the machines part of the + cluster +* ``group_vars/all``: if any modifications are needed for deployment, this + would override them. Additionally, further customizations can be done. For + example, for OSDs that would mean adding ``group_vars/osds`` +* ``vagrant_variables.yml``: Defines the actual environment for the test, where + machines, networks, disks, linux distro/version, can be defined. + + +.. _test_conventions: + +Conventions +----------- + +Python test files (unlike scenarios) rely on paths to *map* where they belong. For +example, a file that should only test monitor nodes would live in +``ceph-ansible/tests/functional/tests/mon/``. Internally, the test runner +(``py.test``) will *mark* these as tests that should run on a monitor only. +Since the configuration of a scenario already defines what node has a given +role, then it is easier for the system to only run tests that belong to +a particular node type. + +The current convention is a bit manual, with initial path support for: + +* mon +* osd +* mds +* rgw +* journal_collocation +* all/any (if none of the above are matched, then these are run on any host) + + +.. _testinfra: + +``testinfra`` +------------- diff --git a/docs/source/testing/modifying.rst b/docs/source/testing/modifying.rst new file mode 100644 index 0000000..c670a3f --- /dev/null +++ b/docs/source/testing/modifying.rst @@ -0,0 +1,4 @@ +.. _modifying: + +Modifying (or adding) tests +=========================== diff --git a/docs/source/testing/running.rst b/docs/source/testing/running.rst new file mode 100644 index 0000000..c525e74 --- /dev/null +++ b/docs/source/testing/running.rst @@ -0,0 +1,169 @@ +.. _running_tests: + +Running Tests +============= + +Although tests run continuously in CI, a lot of effort was put into making it +easy to run in any environment, as long as a couple of requirements are met. + + +.. _dependencies: + +Dependencies +------------ + +There are some Python dependencies, which are listed in a ``requirements.txt`` +file within the ``tests/`` directory. These are meant to be installed using +Python install tools (pip in this case): + +.. code-block:: console + + pip install -r tests/requirements.txt + +For virtualization, either libvirt or VirtualBox is needed (there is native +support from the harness for both). This makes the test harness even more +flexible as most platforms will be covered by either VirtualBox or libvirt. + + +.. _running_a_scenario: + +Running a scenario +------------------ + +Tests are driven by ``tox``, a command line tool to run a matrix of tests defined in +a configuration file (``tox.ini`` in this case at the root of the project). + +For a thorough description of a scenario see :ref:`test_scenarios`. + +To run a single scenario, make sure it is available (should be defined from +``tox.ini``) by listing them: + +.. code-block:: console + + tox -l + +In this example, we will use the ``luminous-ansible2.4-xenial_cluster`` one. The +harness defaults to ``VirtualBox`` as the backend, so if you have that +installed in your system then this command should just work: + +.. code-block:: console + + tox -e luminous-ansible2.4-xenial_cluster + +And for libvirt it would be: + +.. code-block:: console + + tox -e luminous-ansible2.4-xenial_cluster -- --provider=libvirt + +.. warning:: + + Depending on the type of scenario and resources available, running + these tests locally in a personal computer can be very resource intensive. + +.. note:: + + Most test runs take between 20 and 40 minutes depending on system + resources + +The command should bring up the machines needed for the test, provision them +with ``ceph-ansible``, run the tests, and tear the whole environment down at the +end. + + +The output would look something similar to this trimmed version: + +.. code-block:: console + + luminous-ansible2.4-xenial_cluster create: /Users/alfredo/python/upstream/ceph-ansible/.tox/luminous-ansible2.4-xenial_cluster + luminous-ansible2.4-xenial_cluster installdeps: ansible==2.4.2, -r/Users/alfredo/python/upstream/ceph-ansible/tests/requirements.txt + luminous-ansible2.4-xenial_cluster runtests: commands[0] | vagrant up --no-provision --provider=virtualbox + Bringing machine 'client0' up with 'virtualbox' provider... + Bringing machine 'rgw0' up with 'virtualbox' provider... + Bringing machine 'mds0' up with 'virtualbox' provider... + Bringing machine 'mon0' up with 'virtualbox' provider... + Bringing machine 'mon1' up with 'virtualbox' provider... + Bringing machine 'mon2' up with 'virtualbox' provider... + Bringing machine 'osd0' up with 'virtualbox' provider... + ... + + +After all the nodes are up, ``ceph-ansible`` will provision them, and run the +playbook(s): + +.. code-block:: console + + ... + PLAY RECAP ********************************************************************* + client0 : ok=4 changed=0 unreachable=0 failed=0 + mds0 : ok=4 changed=0 unreachable=0 failed=0 + mon0 : ok=4 changed=0 unreachable=0 failed=0 + mon1 : ok=4 changed=0 unreachable=0 failed=0 + mon2 : ok=4 changed=0 unreachable=0 failed=0 + osd0 : ok=4 changed=0 unreachable=0 failed=0 + rgw0 : ok=4 changed=0 unreachable=0 failed=0 + ... + + +Once the whole environment is all running the tests will be sent out to the +hosts, with output similar to this: + +.. code-block:: console + + luminous-ansible2.4-xenial_cluster runtests: commands[4] | testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory=/Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster/hosts /Users/alfredo/python/upstream/ceph-ansible/tests/functional/tests + ============================ test session starts =========================== + platform darwin -- Python 2.7.8, pytest-3.0.7, py-1.4.33, pluggy-0.4.0 -- /Users/alfredo/python/upstream/ceph-ansible/.tox/luminous-ansible2.4-xenial_cluster/bin/python + cachedir: ../../../../.cache + rootdir: /Users/alfredo/python/upstream/ceph-ansible/tests, inifile: pytest.ini + plugins: testinfra-1.5.4, xdist-1.15.0 + [gw0] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster + [gw1] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster + [gw2] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster + [gw3] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster + [gw0] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] + [gw1] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] + [gw2] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] + [gw3] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] + gw0 [154] / gw1 [154] / gw2 [154] / gw3 [154] + scheduling tests via LoadScheduling + + ../../../tests/test_install.py::TestInstall::test_ceph_dir_exists[ansible:/mon0] + ../../../tests/test_install.py::TestInstall::test_ceph_dir_is_a_directory[ansible:/mon0] + ../../../tests/test_install.py::TestInstall::test_ceph_conf_is_a_file[ansible:/mon0] + ../../../tests/test_install.py::TestInstall::test_ceph_dir_is_a_directory[ansible:/mon1] + [gw2] PASSED ../../../tests/test_install.py::TestCephConf::test_ceph_config_has_mon_host_line[ansible:/mon0] + ../../../tests/test_install.py::TestInstall::test_ceph_conf_exists[ansible:/mon1] + [gw3] PASSED ../../../tests/test_install.py::TestCephConf::test_mon_host_line_has_correct_value[ansible:/mon0] + ../../../tests/test_install.py::TestInstall::test_ceph_conf_is_a_file[ansible:/mon1] + [gw1] PASSED ../../../tests/test_install.py::TestInstall::test_ceph_command_exists[ansible:/mon1] + ../../../tests/test_install.py::TestCephConf::test_mon_host_line_has_correct_value[ansible:/mon1] + ... + +Finally the whole environment gets torn down: + +.. code-block:: console + + luminous-ansible2.4-xenial_cluster runtests: commands[5] | vagrant destroy --force + ==> osd0: Forcing shutdown of VM... + ==> osd0: Destroying VM and associated drives... + ==> mon2: Forcing shutdown of VM... + ==> mon2: Destroying VM and associated drives... + ==> mon1: Forcing shutdown of VM... + ==> mon1: Destroying VM and associated drives... + ==> mon0: Forcing shutdown of VM... + ==> mon0: Destroying VM and associated drives... + ==> mds0: Forcing shutdown of VM... + ==> mds0: Destroying VM and associated drives... + ==> rgw0: Forcing shutdown of VM... + ==> rgw0: Destroying VM and associated drives... + ==> client0: Forcing shutdown of VM... + ==> client0: Destroying VM and associated drives... + + +And a brief summary of the scenario(s) that ran is displayed: + +.. code-block:: console + + ________________________________________________ summary _________________________________________________ + luminous-ansible2.4-xenial_cluster: commands succeeded + congratulations :) diff --git a/docs/source/testing/scenarios.rst b/docs/source/testing/scenarios.rst new file mode 100644 index 0000000..c05d918 --- /dev/null +++ b/docs/source/testing/scenarios.rst @@ -0,0 +1,211 @@ +.. _test_scenarios: + +Test Scenarios +============== + +Scenarios are distinct environments that describe a Ceph deployment and +configuration. Scenarios are isolated as well, and define what machines are +needed aside from any ``ceph-ansible`` configuration. + +.. _scenario_files: + +Scenario Files +============== + +The scenario is described in a ``vagrant_variables.yml`` file, which is +consumed by ``Vagrant`` when bringing up an environment. + +This yaml file is loaded in the ``Vagrantfile`` so that the settings can be +used to bring up the boxes and pass some configuration to ansible when running. + +.. note:: + + The basic layout of a scenario is covered in :ref:`layout`. + There are just a handful of required files, this is the most basic layout. + +There are just a handful of required files, these sections will cover the +required (most basic) ones. Alternatively, other ``ceph-ansible`` files can be +added to customize the behavior of a scenario deployment. + + +.. _vagrant_variables: + +``vagrant_variables.yml`` +------------------------- + +There are a few sections in the ``vagrant_variables.yml`` file which are easy +to follow (most of them are 1 line settings). + +* **docker**: (bool) Indicates if the scenario will deploy Docker daemons + +* **VMS**: (int) These integer values are just a count of how many machines will be + needed. Each supported type is listed, defaulting to 0: + + .. code-block:: yaml + + mon_vms: 0 + osd_vms: 0 + mds_vms: 0 + rgw_vms: 0 + nfs_vms: 0 + rbd_mirror_vms: 0 + client_vms: 0 + mgr_vms: 0 + + For a deployment that needs 1 MON and 1 OSD, the list would look like: + + .. code-block:: yaml + + mon_vms: 1 + osd_vms: 1 + +* **CEPH SOURCE**: (string) indicate whether a ``dev`` or ``stable`` release is + needed. A ``stable`` release will use the latest stable release of Ceph, + a ``dev`` will use ``shaman`` (http://shaman.ceph.com) + +* **SUBNETS**: These are used for configuring the network availability of each + server that will be booted as well as being used as configuration for + ``ceph-ansible`` (and eventually Ceph). The two values that are **required**: + + .. code-block:: yaml + + public_subnet: 192.168.13 + cluster_subnet: 192.168.14 + +* **MEMORY**: Memory requirements (in megabytes) for each server, e.g. + ``memory: 512`` + +* **interfaces**: some vagrant boxes (and linux distros) set specific + interfaces. For Ubuntu releases older than Xenial it was common to have + ``eth1``, for CentOS and some Xenial boxes ``enp0s8`` is used. **However** + the public Vagrant boxes normalize the interface to ``eth1`` for all boxes, + making it easier to configure them with Ansible later. + +.. warning:: + + Do *not* change the interface from ``eth1`` unless absolutely + certain that is needed for a box. Some tests that depend on that + naming will fail. + +* **disks**: The disks that will be created for each machine, for most + environments ``/dev/sd*`` style of disks will work, like: ``[ '/dev/sda', '/dev/sdb' ]`` + +* **vagrant_box**: We have published our own boxes to normalize what we test + against. These boxes are published in Atlas + (https://atlas.hashicorp.com/ceph/). Currently valid values are: + ``ceph/ubuntu-xenial``, and ``ceph/centos7`` + +The following aren't usually changed/enabled for tests, since they don't have +an impact, however they are documented here for general knowledge in case they +are needed: + +* **ssh_private_key_path**: The path to the ``id_rsa`` (or other private SSH + key) that should be used to connect to these boxes. + +* **vagrant_sync_dir**: what should be "synced" (made available on the new + servers) from the host. + +* **vagrant_disable_synced_folder**: (bool) when disabled, it will make + booting machines faster because no files need to be synced over. + +* **os_tuning_params**: These are passed onto ``ceph-ansible`` as part of the + variables for "system tunning". These shouldn't be changed. + + +.. _vagrant_file: + +``Vagrantfile`` +--------------- + +The ``Vagrantfile`` should not need to change, and it is symlinked back to the +``Vagrantfile`` that exists in the root of the project. It is linked in this +way so that a vagrant environment can be isolated to the given scenario. + + +.. _hosts_file: + +``hosts`` +--------- + +The ``hosts`` file should contain the hosts needed for the scenario. This might +seem a bit repetitive since machines are already defined in +:ref:`vagrant_variables` but it allows granular changes to hosts (for example +defining different public_network values between monitors) which can help catch issues in +``ceph-ansible`` configuration. For example: + +.. code-block:: ini + + [mons] + mon0 public_network=192.168.1.0/24 + mon1 public_network=192.168.2.0/24 + mon2 public_network=192.168.3.0/24 + +.. _group_vars: + +``group_vars`` +-------------- + +This directory holds any configuration change that will affect ``ceph-ansible`` +deployments in the same way as if ansible was executed from the root of the +project. + +The file that will need to be defined always is ``all`` where (again) certain +values like ``public_network`` and ``cluster_network`` will need to be defined +along with any customizations that ``ceph-ansible`` supports. + + +.. _scenario_wiring: + +Scenario Wiring +--------------- + +Scenarios are just meant to provide the Ceph environment for testing, but they +do need to be defined in the ``tox.ini`` so that they are available to the test +framework. To see a list of available scenarios, the following command (ran +from the root of the project) will list them, shortened for brevity: + +.. code-block:: console + + $ tox -l + ... + luminous-ansible2.4-centos7_cluster + ... + +These scenarios are made from different variables, in the above command there +are 3: + +* ``jewel``: the Ceph version to test +* ``ansible2.4``: the Ansible version to install +* ``centos7_cluster``: the name of the scenario + +The last one is important in the *wiring up* of the scenario. It is a variable +that will define in what path the scenario lives. For example, the +``changedir`` section for ``centos7_cluster`` that looks like: + +.. code-block:: ini + + centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster + +The actual tests are written for specific daemon types, for all daemon types, +and for specific use cases (e.g. journal collocation), those have their own +conventions as well which are explained in detail in :ref:`test_conventions` +and :ref:`test_files`. + +As long as a test scenario defines OSDs and MONs, the OSD tests and MON tests +will run. + + +.. _scenario_conventions: + +Conventions +----------- + +.. _scenario_environment_configuration: + +Environment configuration +------------------------- + +.. _scenario_ansible_configuration: + +Ansible configuration +--------------------- diff --git a/docs/source/testing/tests.rst b/docs/source/testing/tests.rst new file mode 100644 index 0000000..46cdce0 --- /dev/null +++ b/docs/source/testing/tests.rst @@ -0,0 +1,99 @@ +.. _tests: + +Tests +===== + +Actual tests are written in Python methods that accept optional fixtures. These +fixtures come with interesting attributes to help with remote assertions. + +As described in :ref:`test_conventions`, tests need to go into +``tests/functional/tests/``. These are collected and *mapped* to a distinct +node type, or *mapped* to run on all nodes. + +Simple Python asserts are used (these tests do not need to follow the Python +``unittest.TestCase`` base class) that make it easier to reason about failures +and errors. + +The test run is handled by ``py.test`` along with :ref:`testinfra` for handling +remote execution. + + +.. _test_files: + +Test Files +---------- + + + +.. _test_fixtures: + +Test Fixtures +============= + +Test fixtures are a powerful feature of ``py.test`` and most tests depend on +this for making assertions about remote nodes. To request them in a test +method, all that is needed is to require it as an argument. + +Fixtures are detected by name, so as long as the argument being used has the +same name, the fixture will be passed in (see `pytest fixtures`_ for more +in-depth examples). The code that follows shows a test method that will use the +``node`` fixture that contains useful information about a node in a ceph +cluster: + +.. code-block:: python + + def test_ceph_conf(self, node): + assert node['conf_path'] == "/etc/ceph/ceph.conf" + +The test is naive (the configuration path might not exist remotely) but +explains how simple it is to "request" a fixture. + +For remote execution, we can rely further on other fixtures (tests can have as +many fixtures as needed) like ``File``: + +.. code-block:: python + + def test_ceph_config_has_inital_members_line(self, node, File): + assert File(node["conf_path"]).contains("^mon initial members = .*$") + + +.. _node: + +``node`` fixture +---------------- + +The ``node`` fixture contains a few useful pieces of information about the node +where the test is being executed, this is captured once, before tests run: + +* ``address``: The IP for the ``eth1`` interface +* ``subnet``: The subnet that ``address`` belongs to +* ``vars``: all the Ansible vars set for the current run +* ``osd_ids``: a list of all the OSD IDs +* ``num_mons``: the total number of monitors for the current environment +* ``num_devices``: the number of devices for the current node +* ``num_osd_hosts``: the total number of OSD hosts +* ``total_osds``: total number of OSDs on the current node +* ``cluster_name``: the name of the Ceph cluster (which defaults to 'ceph') +* ``conf_path``: since the cluster name can change the file path for the Ceph + configuration, this gets sets according to the cluster name. +* ``cluster_address``: the address used for cluster communication. All + environments are set up with 2 interfaces, 1 being used exclusively for the + cluster +* ``docker``: A boolean that identifies a Ceph Docker cluster +* ``osds``: A list of OSD IDs, unless it is a Docker cluster, where it gets the + name of the devices (e.g. ``sda1``) + + +Other Fixtures +-------------- + +There are a lot of other fixtures provided by :ref:`testinfra` as well as +``py.test``. The full list of ``testinfra`` fixtures are available in +`testinfra_fixtures`_ + +``py.test`` builtin fixtures can be listed with ``pytest -q --fixtures`` and +they are described in `pytest builtin fixtures`_ + +.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html +.. _pytest builtin fixtures: https://docs.pytest.org/en/latest/builtin.html#builtin-fixtures-function-arguments +.. _testinfra_fixtures: https://testinfra.readthedocs.io/en/latest/modules.html#modules diff --git a/docs/source/testing/tox.rst b/docs/source/testing/tox.rst new file mode 100644 index 0000000..0ab6166 --- /dev/null +++ b/docs/source/testing/tox.rst @@ -0,0 +1,75 @@ +.. _tox: + +``tox`` +======= + +``tox`` is an automation project we use to run our testing scenarios. It gives us +the ability to create a dynamic matrix of many testing scenarios, isolated testing environments +and a provides a single entry point to run all tests in an automated and repeatable fashion. + +Documentation for tox can be found `here `_. + + +.. _tox_environment_variables: + +Environment variables +--------------------- + +When running ``tox`` we've allowed for the usage of environment variables to tweak certain settings +of the playbook run using Ansible's ``--extra-vars``. It's helpful in Jenkins jobs or for manual test +runs of ``ceph-ansible``. + +The following environent variables are available for use: + +* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``. + +* ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``. + +* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``. + +* ``CEPH_DEV_BRANCH``: (default: ``main``) This would configure the ``ceph-ansible`` variable ``ceph_dev_branch`` which defines which branch we'd + like to install from shaman.ceph.com. + +* ``CEPH_DEV_SHA1``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_dev_sha1`` which defines which sha1 we'd like + to install from shaman.ceph.com. + +* ``UPDATE_CEPH_DEV_BRANCH``: (default: ``main``) This would configure the ``ceph-ansible`` variable ``ceph_dev_branch`` which defines which branch we'd + like to update to from shaman.ceph.com. + +* ``UPDATE_CEPH_DEV_SHA1``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_dev_sha1`` which defines which sha1 we'd like + to update to from shaman.ceph.com. + + +.. _tox_sections: + +Sections +-------- + +The ``tox.ini`` file has a number of top level sections defined by ``[ ]`` and subsections within those. For complete documentation +on all subsections inside of a tox section please refer to the tox documentation. + +* ``tox`` : This section contains the ``envlist`` which is used to create our dynamic matrix. Refer to the `section here `_ for more information on how the ``envlist`` works. + +* ``purge`` : This section contains commands that only run for scenarios that purge the cluster and redeploy. You'll see this section being reused in ``testenv`` + with the following syntax: ``{[purge]commands}`` + +* ``update`` : This section contains commands taht only run for scenarios that deploy a cluster and then upgrade it to another Ceph version. + +* ``testenv`` : This is the main section of the ``tox.ini`` file and is run on every scenario. This section contains many *factors* that define conditional + settings depending on the scenarios defined in the ``envlist``. For example, the factor ``centos7_cluster`` in the ``changedir`` subsection of ``testenv`` sets + the directory that tox will change do when that factor is selected. This is an important behavior that allows us to use the same ``tox.ini`` and reuse commands while + tweaking certain sections per testing scenario. + + +.. _tox_environments: + +Modifying or Adding environments +-------------------------------- + +The tox environments are controlled by the ``envlist`` subsection of the ``[tox]`` section. Anything inside of ``{}`` is considered a *factor* and will be included +in the dynamic matrix that tox creates. Inside of ``{}`` you can include a comma separated list of the *factors*. Do not use a hyphen (``-``) as part +of the *factor* name as those are used by tox as the separator between different factor sets. + +For example, if wanted to add a new test *factor* for the next Ceph release of luminious this is how you'd accomplish that. Currently, the first factor set in our ``envlist`` +is used to define the Ceph release (``{jewel,kraken}-...``). To add luminous you'd change that to look like ``{luminous,kraken}-...``. In the ``testenv`` section +this is a subsection called ``setenv`` which allows you to provide environment variables to the tox environment and we support an environment variable called ``CEPH_STABLE_RELEASE``. To ensure that all the new tests that are created by adding the luminous *factor* you'd do this in that section: ``luminous: CEPH_STABLE_RELEASE=luminous``. diff --git a/docs/tox.ini b/docs/tox.ini new file mode 100644 index 0000000..d4a4c9c --- /dev/null +++ b/docs/tox.ini @@ -0,0 +1,10 @@ +[tox] +envlist = docs +skipsdist = True + +[testenv:docs] +basepython=python +changedir=source +deps=sphinx==1.7.9 +commands= + sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html diff --git a/dummy-ansible-hosts b/dummy-ansible-hosts new file mode 100644 index 0000000..0d5a7ea --- /dev/null +++ b/dummy-ansible-hosts @@ -0,0 +1,4 @@ +# Dummy ansible host file +# Used for syntax check by Travis +# Before committing code please run: ansible-playbook --syntax-check site.yml -i dummy-ansible-hosts +localhost diff --git a/generate_group_vars_sample.sh b/generate_group_vars_sample.sh new file mode 100755 index 0000000..d631c56 --- /dev/null +++ b/generate_group_vars_sample.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +set -euo pipefail + + +############# +# VARIABLES # +############# + +basedir=$(dirname "$0") +do_not_generate="(ceph-common|ceph-container-common|ceph-fetch-keys)$" # pipe separated list of roles we don't want to generate sample file, MUST end with '$', e.g: 'foo$|bar$' + + +############# +# FUNCTIONS # +############# + +populate_header () { + for i in $output; do + cat < "$basedir"/group_vars/"$i" +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by $(basename "$0") + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +EOF + done +} + +generate_group_vars_file () { + for i in $output; do + if [ "$(uname)" == "Darwin" ]; then + sed '/^---/d; s/^\([A-Za-z[:space:]]\)/#\1/' \ + "$defaults" >> "$basedir"/group_vars/"$i" + echo >> "$basedir"/group_vars/"$i" + elif [ "$(uname -s)" == "Linux" ]; then + sed '/^---/d; s/^\([A-Za-z[:space:]].\+\)/#\1/' \ + "$defaults" >> "$basedir"/group_vars/"$i" + echo >> "$basedir"/group_vars/"$i" + else + echo "Unsupported platform" + exit 1 + fi + done +} + +######## +# MAIN # +######## + +for role in "$basedir"/roles/ceph-*; do + rolename=$(basename "$role") + + if [[ $rolename == "ceph-defaults" ]]; then + output="all.yml.sample" + elif [[ $rolename == "ceph-fetch-keys" ]]; then + output="ceph-fetch-keys.yml.sample" + elif [[ $rolename == "ceph-rbd-mirror" ]]; then + output="rbdmirrors.yml.sample" + elif [[ $rolename == "ceph-rgw-loadbalancer" ]]; then + output="rgwloadbalancers.yml.sample" + else + output="${rolename:5}s.yml.sample" + fi + + defaults="$role"/defaults/main.yml + if [[ ! -f $defaults ]]; then + continue + fi + + if ! echo "$rolename" | grep -qE "$do_not_generate"; then + populate_header + generate_group_vars_file + fi +done diff --git a/group_vars/all.yml b/group_vars/all.yml new file mode 100644 index 0000000..0cd5081 --- /dev/null +++ b/group_vars/all.yml @@ -0,0 +1,666 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +###################################### +# Releases name to number dictionary # +###################################### +ceph_release_num: + dumpling: 0.67 + emperor: 0.72 + firefly: 0.80 + giant: 0.87 + hammer: 0.94 + infernalis: 9 + jewel: 10 + kraken: 11 + luminous: 12 + mimic: 13 + nautilus: 14 + octopus: 15 + pacific: 16 + quincy: 17 + reef: 18 + squid: 19 + dev: 99 + + +# The 'cluster' variable determines the name of the cluster. +# Changing the default value to something else means that you will +# need to change all the command line calls as well, for example if +# your cluster name is 'foo': +# "ceph health" will become "ceph --cluster foo health" +# +# An easier way to handle this is to use the environment variable CEPH_ARGS +# So run: "export CEPH_ARGS="--cluster foo" +# With that you will be able to run "ceph health" normally +cluster: ceph + +# Inventory host group variables +mon_group_name: mons +osd_group_name: osds +#rgw_group_name: rgws +#mds_group_name: mdss +#nfs_group_name: nfss +#rbdmirror_group_name: rbdmirrors +#client_group_name: clients +mgr_group_name: mgrs +#rgwloadbalancer_group_name: rgwloadbalancers +#monitoring_group_name: monitoring +adopt_label_group_names: + - "{{ mon_group_name }}" + - "{{ osd_group_name }}" +# - "{{ rgw_group_name }}" +# - "{{ mds_group_name }}" +# - "{{ nfs_group_name }}" +# - "{{ rbdmirror_group_name }}" +# - "{{ client_group_name }}" + - "{{ mgr_group_name }}" +# - "{{ rgwloadbalancer_group_name }}" +# - "{{ monitoring_group_name }}" + +# If configure_firewall is true, then ansible will try to configure the +# appropriate firewalling rules so that Ceph daemons can communicate +# with each others. +configure_firewall: false + +# Open ports on corresponding nodes if firewall is installed on it +#ceph_mon_firewall_zone: public +#ceph_mgr_firewall_zone: public +#ceph_osd_firewall_zone: public +#ceph_rgw_firewall_zone: public +#ceph_mds_firewall_zone: public +#ceph_nfs_firewall_zone: public +#ceph_rbdmirror_firewall_zone: public +#ceph_dashboard_firewall_zone: public +#ceph_rgwloadbalancer_firewall_zone: public + +# cephadm account for remote connections +cephadm_ssh_user: root +cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa" +cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub" +cephadm_mgmt_network: "{{ public_network }}" + +############ +# PACKAGES # +############ +#debian_package_dependencies: [] + +#centos_package_dependencies: +# - epel-release +# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}" + +#redhat_package_dependencies: [] + +#suse_package_dependencies: [] + +# Whether or not to install the ceph-test package. +ceph_test: false + +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured +ntp_service_enabled: true + +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +ntp_daemon_type: chronyd + +# This variable determines if ceph packages can be updated. If False, the +# package resources will use "state=present". If True, they will use +# "state=latest". +#upgrade_ceph_packages: false + +#ceph_use_distro_backports: false # DEBIAN ONLY +#ceph_directories_mode: "0755" + +########### +# INSTALL # +########### +# ORIGIN SOURCE +# +# Choose between: +# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'dev' or 'obs' +# - 'distro' means that no separate repo file will be added +# you will get whatever version of Ceph is included in your Linux distro. +# 'local' means that the ceph binaries will be copied over from the local machine +ceph_origin: repository +#valid_ceph_origins: +# - repository +# - distro +# - local + + +ceph_repository: community +#valid_ceph_repository: +# - community +# - dev +# - uca +# - custom +# - obs + + +# REPOSITORY: COMMUNITY VERSION +# +# Enabled when ceph_repository == 'community' +# +ceph_mirror: https://download.ceph.com +ceph_stable_key: https://download.ceph.com/keys/release.asc +ceph_stable_release: squid +ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" + +#nfs_ganesha_stable: true # use stable repos for nfs-ganesha +#centos_release_nfs: centos-release-nfs-ganesha4 +#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu +#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com +#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA +#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu + +# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions +# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ +# for more info read: https://github.com/ceph/ceph-ansible/issues/305 +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" + +# REPOSITORY: UBUNTU CLOUD ARCHIVE +# +# Enabled when ceph_repository == 'uca' +# +# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive +# usually has newer Ceph releases than the normal distro repository. +# +# +#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" +#ceph_stable_openstack_release_uca: queens +#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}" + +# REPOSITORY: openSUSE OBS +# +# Enabled when ceph_repository == 'obs' +# +# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository +# usually has newer Ceph releases than the normal distro repository. +# +# +#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/" + +# REPOSITORY: DEV +# +# Enabled when ceph_repository == 'dev' +# +#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack +#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) + +#nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous +#nfs_ganesha_flavor: "ceph_main" + + +# REPOSITORY: CUSTOM +# +# Enabled when ceph_repository == 'custom' +# +# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be +# a URL to the .repo file to be installed on the targets. For deb, +# ceph_custom_repo should be the URL to the repo base. +# +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +#ceph_custom_repo: https://server.domain.com/ceph-custom-repo + + +# ORIGIN: LOCAL CEPH INSTALLATION +# +# Enabled when ceph_repository == 'local' +# +# Path to DESTDIR of the ceph install +# ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +# use_installer: false +# Root directory for ceph-ansible +# ansible_dir: "/path/to/ceph-ansible" + + +###################### +# CEPH CONFIGURATION # +###################### + +## Ceph options +# +# Each cluster requires a unique, consistent filesystem ID. By +# default, the playbook generates one for you. +# If you want to customize how the fsid is +# generated, you may find it useful to disable fsid generation to +# avoid cluttering up your ansible repo. If you set `generate_fsid` to +# false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT +#fsid: "{{ cluster_uuid.stdout }}" +generate_fsid: true + +ceph_conf_key_directory: /etc/ceph + +ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}" + +# Permissions for keyring files in /etc/ceph +ceph_keyring_permissions: '0600' + +#cephx: true + +# Cluster configuration +ceph_cluster_conf: + global: + public_network: "{{ public_network | default(omit) }}" + cluster_network: "{{ cluster_network | default(omit) }}" + osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}" + # ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}" + ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}" + osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}" + +## Client options +# +#rbd_cache: "true" +#rbd_cache_writethrough_until_flush: "true" +#rbd_concurrent_management_ops: 20 + +#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +#rbd_client_directory_owner: ceph +#rbd_client_directory_group: ceph +#rbd_client_directory_mode: "0755" + +#rbd_client_log_path: /var/log/ceph +#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor +#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor + +## Monitor options +# set to either ipv4 or ipv6, whichever your network is using +ip_version: ipv4 + +mon_host_v1: + enabled: true + suffix: ':6789' +mon_host_v2: + suffix: ':3300' + +#enable_ceph_volume_debug: false + +########## +# CEPHFS # +########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# target_size_ratio: 0.2 +#cephfs: cephfs # name of the ceph filesystem +#cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +#cephfs_metadata_pool: +# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" +#cephfs_pools: +# - "{{ cephfs_data_pool }}" +# - "{{ cephfs_metadata_pool }}" + +## OSD options +# +#lvmetad_disabled: false +#is_hci: false +#hci_safety_factor: 0.2 +#non_hci_safety_factor: 0.7 +#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}" +#osd_memory_target: 4294967296 +#journal_size: 5120 # OSD journal size in MB +#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. +public_network: 192.168.1.0/24 +cluster_network: "{{ public_network | regex_replace(' ', '') }}" +#osd_mkfs_type: xfs +#osd_mkfs_options_xfs: -f -i size=2048 +#osd_mount_options_xfs: noatime,largeio,inode64,swalloc +osd_objectstore: bluestore + +# Any device containing these patterns in their path will be excluded. +#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" + +## MDS options +# +#mds_max_mds: 1 + +## Rados Gateway options +# +#radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/latest/radosgw/frontends/ + +#radosgw_frontend_port: 8080 +# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format. +#radosgw_frontend_ssl_certificate: "" +#radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate +#radosgw_frontend_options: "" +#radosgw_thread_pool_size: 512 + + +# You must define either radosgw_interface, radosgw_address. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. +# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. +#radosgw_interface: interface +#radosgw_address: x.x.x.x +#radosgw_address_block: subnet +#radosgw_keystone_ssl: false # activate this when using keystone PKI keys +#radosgw_num_instances: 1 +#rgw_zone: default # This is used for rgw instance client names. + + +## Testing mode +# enable this mode _only_ when you have a single node +# if you don't want it keep the option commented +# common_single_host_mode: true + +## Handlers - restarting daemons after a config change +# if for whatever reasons the content of your ceph configuration changes +# ceph daemons will be restarted as well. At the moment, we can not detect +# which config option changed so all the daemons will be restarted. Although +# this restart will be serialized for each node, in between a health check +# will be performed so we make sure we don't move to the next node until +# ceph is not healthy +# Obviously between the checks (for monitors to be in quorum and for osd's pgs +# to be clean) we have to wait. These retries and delays can be configurable +# for both monitors and osds. +# +# Monitor handler checks +#handler_health_mon_check_retries: 10 +#handler_health_mon_check_delay: 20 +# +# OSD handler checks +#handler_health_osd_check_retries: 40 +#handler_health_osd_check_delay: 30 +#handler_health_osd_check: true +# +# MDS handler checks +#handler_health_mds_check_retries: 5 +#handler_health_mds_check_delay: 10 +# +# RGW handler checks +#handler_health_rgw_check_retries: 5 +#handler_health_rgw_check_delay: 10 +#handler_rgw_use_haproxy_maintenance: false + +# NFS handler checks +#handler_health_nfs_check_retries: 5 +#handler_health_nfs_check_delay: 10 + +# RBD MIRROR handler checks +#handler_health_rbd_mirror_check_retries: 5 +#handler_health_rbd_mirror_check_delay: 10 + +# MGR handler checks +#handler_health_mgr_check_retries: 5 +#handler_health_mgr_check_delay: 10 + +## health mon/osds check retries/delay: + +#health_mon_check_retries: 20 +#health_mon_check_delay: 10 +#health_osd_check_retries: 20 +#health_osd_check_delay: 10 + +############## +# RBD-MIRROR # +############## + +#ceph_rbd_mirror_pool: "rbd" + +############### +# NFS-GANESHA # +############### +# +# Access type options +# +# Enable NFS File access +# If set to true, then ganesha is set up to export the root of the +# Ceph filesystem, and ganesha's attribute and directory caching is disabled +# as much as possible since libcephfs clients also caches the same +# information. +# +# Set this to true to enable File access via NFS. Requires an MDS role. +#nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" + + +################### +# CONFIG OVERRIDE # +################### + +# Ceph configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# +# When configuring RGWs, make sure you use the form [client.rgw.*] +# instead of [client.radosgw.*]. +# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible. +# +# The following sections are supported: [global], [mon], [osd], [mds], [client] +# +# Example: +# ceph_conf_overrides: +# global: +# foo: 1234 +# bar: 5678 +# "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": +# rgw_zone: zone1 +# +#ceph_conf_overrides: {} + + +############# +# OS TUNING # +############# + +#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}" +#os_tuning_params: +# - { name: fs.file-max, value: 26234859 } +# - { name: vm.zone_reclaim_mode, value: 0 } +# - { name: vm.swappiness, value: 10 } +# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } + +# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES +# Set this to a byte value (e.g. 134217728) +# A value of 0 will leave the package default. +#ceph_tcmalloc_max_total_thread_cache: 134217728 + + +########## +# DOCKER # +########## +#ceph_docker_image: "ceph/ceph" +#ceph_docker_image_tag: v19 +#ceph_docker_registry: quay.io +#ceph_docker_registry_auth: false +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: +#ceph_docker_no_proxy: "localhost,127.0.0.1" +## Client only docker image - defaults to {{ ceph_docker_image }} +#ceph_client_docker_image: "{{ ceph_docker_image }}" +#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +#ceph_client_docker_registry: "{{ ceph_docker_registry }}" +containerized_deployment: false +#container_binary: +#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" +#ceph_common_container_params: +# envs: +# NODE_NAME: "{{ ansible_facts['hostname'] }}" +# CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" +# TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES: "{{ ceph_tcmalloc_max_total_thread_cache }}" +# args: +# - --setuser=ceph +# - --setgroup=ceph +# - --default-log-to-file=false +# - --default-log-to-stderr=true +# - --default-log-stderr-prefix="debug " +# volumes: +# - /var/lib/ceph/crash:/var/lib/ceph/crash:z +# - /var/run/ceph:/var/run/ceph:z +# - /var/log/ceph:/var/log/ceph:z +# - /etc/ceph:/etc/ceph:z +# - /etc/localtime:/etc/localtime:ro + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +#rolling_update: false + +##################### +# Docker pull retry # +##################### +#docker_pull_retry: 3 +#docker_pull_timeout: "300s" + + +############# +# DASHBOARD # +############# +dashboard_enabled: false +# Choose http or https +# For https, you should set dashboard.crt/key and grafana.crt/key +# If you define the dashboard_crt and dashboard_key variables, but leave them as '', +# then we will autogenerate a cert and keyfile +#dashboard_protocol: https +#dashboard_port: 8443 +# set this variable to the network you want the dashboard to listen on. (Default to public_network) +#dashboard_network: "{{ public_network }}" +#dashboard_admin_user: admin +#dashboard_admin_user_ro: false +# This variable must be set with a strong custom password when dashboard_enabled is True +# dashboard_admin_password: p@ssw0rd +# We only need this for SSL (https) connections +#dashboard_crt: '' +#dashboard_key: '' +#dashboard_certificate_cn: ceph-dashboard +#dashboard_tls_external: false +#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" +#dashboard_rgw_api_user_id: ceph-dashboard +#dashboard_rgw_api_admin_resource: '' +#dashboard_rgw_api_no_ssl_verify: false +#dashboard_frontend_vip: '' +#dashboard_disabled_features: [] +#prometheus_frontend_vip: '' +#alertmanager_frontend_vip: '' +#node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0" +#node_exporter_port: 9100 +#grafana_admin_user: admin +# This variable must be set with a strong custom password when dashboard_enabled is True +# grafana_admin_password: admin +# We only need this for SSL (https) connections +#grafana_crt: '' +#grafana_key: '' +# When using https, please fill with a hostname for which grafana_crt is valid. +#grafana_server_fqdn: '' +#grafana_container_image: "docker.io/grafana/grafana:6.7.4" +#grafana_container_cpu_period: 100000 +#grafana_container_cpu_cores: 2 +# container_memory is in GB +#grafana_container_memory: 4 +#grafana_uid: 472 +#grafana_datasource: Dashboard +#grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard" +#grafana_dashboard_version: main +#grafana_dashboard_files: +# - ceph-cluster.json +# - cephfs-overview.json +# - host-details.json +# - hosts-overview.json +# - osd-device-details.json +# - osds-overview.json +# - pool-detail.json +# - pool-overview.json +# - radosgw-detail.json +# - radosgw-overview.json +# - radosgw-sync-overview.json +# - rbd-details.json +# - rbd-overview.json +#grafana_plugins: +# - vonage-status-panel +# - grafana-piechart-panel +#grafana_allow_embedding: true +#grafana_port: 3000 +#grafana_network: "{{ public_network }}" +#grafana_conf_overrides: {} +#prometheus_container_image: "docker.io/prom/prometheus:v2.7.2" +#prometheus_container_cpu_period: 100000 +#prometheus_container_cpu_cores: 2 +# container_memory is in GB +#prometheus_container_memory: 4 +#prometheus_data_dir: /var/lib/prometheus +#prometheus_conf_dir: /etc/prometheus +#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image +#prometheus_port: 9092 +#prometheus_conf_overrides: {} +# Uncomment out this variable if you need to customize the retention period for prometheus storage. +# set it to '30d' if you want to retain 30 days of data. +# prometheus_storage_tsdb_retention_time: 15d +#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" +#alertmanager_container_cpu_period: 100000 +#alertmanager_container_cpu_cores: 2 +# container_memory is in GB +#alertmanager_container_memory: 4 +#alertmanager_data_dir: /var/lib/alertmanager +#alertmanager_conf_dir: /etc/alertmanager +#alertmanager_port: 9093 +#alertmanager_cluster_port: 9094 +#alertmanager_conf_overrides: {} +#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}" + +#no_log_on_ceph_key_tasks: true + +############### +# DEPRECATION # +############### + + +###################################################### +# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # +# *DO NOT* MODIFY THEM # +###################################################### + +#container_exec_cmd: +#docker: false +#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" diff --git a/group_vars/all.yml copy.sample b/group_vars/all.yml copy.sample new file mode 100644 index 0000000..7580464 --- /dev/null +++ b/group_vars/all.yml copy.sample @@ -0,0 +1,667 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +###################################### +# Releases name to number dictionary # +###################################### +#ceph_release_num: +# dumpling: 0.67 +# emperor: 0.72 +# firefly: 0.80 +# giant: 0.87 +# hammer: 0.94 +# infernalis: 9 +# jewel: 10 +# kraken: 11 +# luminous: 12 +# mimic: 13 +# nautilus: 14 +# octopus: 15 +# pacific: 16 +# quincy: 17 +# reef: 18 +# squid: 19 +# dev: 99 + + +# The 'cluster' variable determines the name of the cluster. +# Changing the default value to something else means that you will +# need to change all the command line calls as well, for example if +# your cluster name is 'foo': +# "ceph health" will become "ceph --cluster foo health" +# +# An easier way to handle this is to use the environment variable CEPH_ARGS +# So run: "export CEPH_ARGS="--cluster foo" +# With that you will be able to run "ceph health" normally +#cluster: ceph + +# Inventory host group variables +#mon_group_name: mons +#osd_group_name: osds +#rgw_group_name: rgws +#mds_group_name: mdss +#nfs_group_name: nfss +#rbdmirror_group_name: rbdmirrors +#client_group_name: clients +#mgr_group_name: mgrs +#rgwloadbalancer_group_name: rgwloadbalancers +#monitoring_group_name: monitoring +#adopt_label_group_names: +# - "{{ mon_group_name }}" +# - "{{ osd_group_name }}" +# - "{{ rgw_group_name }}" +# - "{{ mds_group_name }}" +# - "{{ nfs_group_name }}" +# - "{{ rbdmirror_group_name }}" +# - "{{ client_group_name }}" +# - "{{ mgr_group_name }}" +# - "{{ rgwloadbalancer_group_name }}" +# - "{{ monitoring_group_name }}" + +# If configure_firewall is true, then ansible will try to configure the +# appropriate firewalling rules so that Ceph daemons can communicate +# with each others. +#configure_firewall: true + +# Open ports on corresponding nodes if firewall is installed on it +#ceph_mon_firewall_zone: public +#ceph_mgr_firewall_zone: public +#ceph_osd_firewall_zone: public +#ceph_rgw_firewall_zone: public +#ceph_mds_firewall_zone: public +#ceph_nfs_firewall_zone: public +#ceph_rbdmirror_firewall_zone: public +#ceph_dashboard_firewall_zone: public +#ceph_rgwloadbalancer_firewall_zone: public + +# cephadm account for remote connections +#cephadm_ssh_user: root +#cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa" +#cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub" +#cephadm_mgmt_network: "{{ public_network }}" + +############ +# PACKAGES # +############ +#debian_package_dependencies: [] + +#centos_package_dependencies: +# - epel-release +# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}" + +#redhat_package_dependencies: [] + +#suse_package_dependencies: [] + +# Whether or not to install the ceph-test package. +#ceph_test: false + +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured +#ntp_service_enabled: true + +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +#ntp_daemon_type: chronyd + +# This variable determines if ceph packages can be updated. If False, the +# package resources will use "state=present". If True, they will use +# "state=latest". +#upgrade_ceph_packages: false + +#ceph_use_distro_backports: false # DEBIAN ONLY +#ceph_directories_mode: "0755" + +########### +# INSTALL # +########### +# ORIGIN SOURCE +# +# Choose between: +# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'dev' or 'obs' +# - 'distro' means that no separate repo file will be added +# you will get whatever version of Ceph is included in your Linux distro. +# 'local' means that the ceph binaries will be copied over from the local machine +#ceph_origin: dummy +#valid_ceph_origins: +# - repository +# - distro +# - local + + +#ceph_repository: dummy +#valid_ceph_repository: +# - community +# - dev +# - uca +# - custom +# - obs + + +# REPOSITORY: COMMUNITY VERSION +# +# Enabled when ceph_repository == 'community' +# +#ceph_mirror: https://download.ceph.com +#ceph_stable_key: https://download.ceph.com/keys/release.asc +#ceph_stable_release: squid +#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" + +#nfs_ganesha_stable: true # use stable repos for nfs-ganesha +#centos_release_nfs: centos-release-nfs-ganesha4 +#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu +#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com +#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA +#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu + +# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions +# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ +# for more info read: https://github.com/ceph/ceph-ansible/issues/305 +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" + +# REPOSITORY: UBUNTU CLOUD ARCHIVE +# +# Enabled when ceph_repository == 'uca' +# +# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive +# usually has newer Ceph releases than the normal distro repository. +# +# +#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" +#ceph_stable_openstack_release_uca: queens +#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}" + +# REPOSITORY: openSUSE OBS +# +# Enabled when ceph_repository == 'obs' +# +# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository +# usually has newer Ceph releases than the normal distro repository. +# +# +#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/" + +# REPOSITORY: DEV +# +# Enabled when ceph_repository == 'dev' +# +#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack +#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) + +#nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous +#nfs_ganesha_flavor: "ceph_main" + + +# REPOSITORY: CUSTOM +# +# Enabled when ceph_repository == 'custom' +# +# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be +# a URL to the .repo file to be installed on the targets. For deb, +# ceph_custom_repo should be the URL to the repo base. +# +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +#ceph_custom_repo: https://server.domain.com/ceph-custom-repo + + +# ORIGIN: LOCAL CEPH INSTALLATION +# +# Enabled when ceph_repository == 'local' +# +# Path to DESTDIR of the ceph install +# ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +# use_installer: false +# Root directory for ceph-ansible +# ansible_dir: "/path/to/ceph-ansible" + + +###################### +# CEPH CONFIGURATION # +###################### + +## Ceph options +# +# Each cluster requires a unique, consistent filesystem ID. By +# default, the playbook generates one for you. +# If you want to customize how the fsid is +# generated, you may find it useful to disable fsid generation to +# avoid cluttering up your ansible repo. If you set `generate_fsid` to +# false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT +#fsid: "{{ cluster_uuid.stdout }}" +#generate_fsid: true + +#ceph_conf_key_directory: /etc/ceph + +#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}" + +# Permissions for keyring files in /etc/ceph +#ceph_keyring_permissions: '0600' + +#cephx: true + +# Cluster configuration +#ceph_cluster_conf: +# global: +# public_network: "{{ public_network | default(omit) }}" +# cluster_network: "{{ cluster_network | default(omit) }}" +# osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}" +# ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}" +# ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}" +# osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}" + +## Client options +# +#rbd_cache: "true" +#rbd_cache_writethrough_until_flush: "true" +#rbd_concurrent_management_ops: 20 + +#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +#rbd_client_directory_owner: ceph +#rbd_client_directory_group: ceph +#rbd_client_directory_mode: "0755" + +#rbd_client_log_path: /var/log/ceph +#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor +#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor + +## Monitor options +# set to either ipv4 or ipv6, whichever your network is using +#ip_version: ipv4 + +#mon_host_v1: +# enabled: true +# suffix: ':6789' +#mon_host_v2: +# suffix: ':3300' + +#enable_ceph_volume_debug: false + +########## +# CEPHFS # +########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# target_size_ratio: 0.2 +#cephfs: cephfs # name of the ceph filesystem +#cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +#cephfs_metadata_pool: +# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" +#cephfs_pools: +# - "{{ cephfs_data_pool }}" +# - "{{ cephfs_metadata_pool }}" + +## OSD options +# +#lvmetad_disabled: false +#is_hci: false +#hci_safety_factor: 0.2 +#non_hci_safety_factor: 0.7 +#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}" +#osd_memory_target: 4294967296 +#journal_size: 5120 # OSD journal size in MB +#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. +#public_network: 0.0.0.0/0 +#cluster_network: "{{ public_network | regex_replace(' ', '') }}" +#osd_mkfs_type: xfs +#osd_mkfs_options_xfs: -f -i size=2048 +#osd_mount_options_xfs: noatime,largeio,inode64,swalloc +#osd_objectstore: bluestore + +# Any device containing these patterns in their path will be excluded. +#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" + +## MDS options +# +#mds_max_mds: 1 + +## Rados Gateway options +# +#radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/latest/radosgw/frontends/ + +#radosgw_frontend_port: 8080 +# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format. +#radosgw_frontend_ssl_certificate: "" +#radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate +#radosgw_frontend_options: "" +#radosgw_thread_pool_size: 512 + + +# You must define either radosgw_interface, radosgw_address. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. +# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. +#radosgw_interface: interface +#radosgw_address: x.x.x.x +#radosgw_address_block: subnet +#radosgw_keystone_ssl: false # activate this when using keystone PKI keys +#radosgw_num_instances: 1 +#rgw_zone: default # This is used for rgw instance client names. + + +## Testing mode +# enable this mode _only_ when you have a single node +# if you don't want it keep the option commented +# common_single_host_mode: true + +## Handlers - restarting daemons after a config change +# if for whatever reasons the content of your ceph configuration changes +# ceph daemons will be restarted as well. At the moment, we can not detect +# which config option changed so all the daemons will be restarted. Although +# this restart will be serialized for each node, in between a health check +# will be performed so we make sure we don't move to the next node until +# ceph is not healthy +# Obviously between the checks (for monitors to be in quorum and for osd's pgs +# to be clean) we have to wait. These retries and delays can be configurable +# for both monitors and osds. +# +# Monitor handler checks +#handler_health_mon_check_retries: 10 +#handler_health_mon_check_delay: 20 +# +# OSD handler checks +#handler_health_osd_check_retries: 40 +#handler_health_osd_check_delay: 30 +#handler_health_osd_check: true +# +# MDS handler checks +#handler_health_mds_check_retries: 5 +#handler_health_mds_check_delay: 10 +# +# RGW handler checks +#handler_health_rgw_check_retries: 5 +#handler_health_rgw_check_delay: 10 +#handler_rgw_use_haproxy_maintenance: false + +# NFS handler checks +#handler_health_nfs_check_retries: 5 +#handler_health_nfs_check_delay: 10 + +# RBD MIRROR handler checks +#handler_health_rbd_mirror_check_retries: 5 +#handler_health_rbd_mirror_check_delay: 10 + +# MGR handler checks +#handler_health_mgr_check_retries: 5 +#handler_health_mgr_check_delay: 10 + +## health mon/osds check retries/delay: + +#health_mon_check_retries: 20 +#health_mon_check_delay: 10 +#health_osd_check_retries: 20 +#health_osd_check_delay: 10 + +############## +# RBD-MIRROR # +############## + +#ceph_rbd_mirror_pool: "rbd" + +############### +# NFS-GANESHA # +############### +# +# Access type options +# +# Enable NFS File access +# If set to true, then ganesha is set up to export the root of the +# Ceph filesystem, and ganesha's attribute and directory caching is disabled +# as much as possible since libcephfs clients also caches the same +# information. +# +# Set this to true to enable File access via NFS. Requires an MDS role. +#nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" + + +################### +# CONFIG OVERRIDE # +################### + +# Ceph configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# +# When configuring RGWs, make sure you use the form [client.rgw.*] +# instead of [client.radosgw.*]. +# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible. +# +# The following sections are supported: [global], [mon], [osd], [mds], [client] +# +# Example: +# ceph_conf_overrides: +# global: +# foo: 1234 +# bar: 5678 +# "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": +# rgw_zone: zone1 +# +#ceph_conf_overrides: {} + + +############# +# OS TUNING # +############# + +#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}" +#os_tuning_params: +# - { name: fs.file-max, value: 26234859 } +# - { name: vm.zone_reclaim_mode, value: 0 } +# - { name: vm.swappiness, value: 10 } +# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } + +# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES +# Set this to a byte value (e.g. 134217728) +# A value of 0 will leave the package default. +#ceph_tcmalloc_max_total_thread_cache: 134217728 + + +########## +# DOCKER # +########## +#ceph_docker_image: "ceph/ceph" +#ceph_docker_image_tag: v19 +#ceph_docker_registry: quay.io +#ceph_docker_registry_auth: false +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: +#ceph_docker_no_proxy: "localhost,127.0.0.1" +## Client only docker image - defaults to {{ ceph_docker_image }} +#ceph_client_docker_image: "{{ ceph_docker_image }}" +#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +#ceph_client_docker_registry: "{{ ceph_docker_registry }}" +#containerized_deployment: false +#container_binary: +#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" +#ceph_common_container_params: +# envs: +# NODE_NAME: "{{ ansible_facts['hostname'] }}" +# CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" +# TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES: "{{ ceph_tcmalloc_max_total_thread_cache }}" +# args: +# - --setuser=ceph +# - --setgroup=ceph +# - --default-log-to-file=false +# - --default-log-to-stderr=true +# - --default-log-stderr-prefix="debug " +# volumes: +# - /var/lib/ceph/crash:/var/lib/ceph/crash:z +# - /var/run/ceph:/var/run/ceph:z +# - /var/log/ceph:/var/log/ceph:z +# - /etc/ceph:/etc/ceph:z +# - /etc/localtime:/etc/localtime:ro + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +#rolling_update: false + +##################### +# Docker pull retry # +##################### +#docker_pull_retry: 3 +#docker_pull_timeout: "300s" + + +############# +# DASHBOARD # +############# +#dashboard_enabled: true +# Choose http or https +# For https, you should set dashboard.crt/key and grafana.crt/key +# If you define the dashboard_crt and dashboard_key variables, but leave them as '', +# then we will autogenerate a cert and keyfile +#dashboard_protocol: https +#dashboard_port: 8443 +# set this variable to the network you want the dashboard to listen on. (Default to public_network) +#dashboard_network: "{{ public_network }}" +#dashboard_admin_user: admin +#dashboard_admin_user_ro: false +# This variable must be set with a strong custom password when dashboard_enabled is True +# dashboard_admin_password: p@ssw0rd +# We only need this for SSL (https) connections +#dashboard_crt: '' +#dashboard_key: '' +#dashboard_certificate_cn: ceph-dashboard +#dashboard_tls_external: false +#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" +#dashboard_rgw_api_user_id: ceph-dashboard +#dashboard_rgw_api_admin_resource: '' +#dashboard_rgw_api_no_ssl_verify: false +#dashboard_frontend_vip: '' +#dashboard_disabled_features: [] +#prometheus_frontend_vip: '' +#alertmanager_frontend_vip: '' +#node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0" +#node_exporter_port: 9100 +#grafana_admin_user: admin +# This variable must be set with a strong custom password when dashboard_enabled is True +# grafana_admin_password: admin +# We only need this for SSL (https) connections +#grafana_crt: '' +#grafana_key: '' +# When using https, please fill with a hostname for which grafana_crt is valid. +#grafana_server_fqdn: '' +#grafana_container_image: "docker.io/grafana/grafana:6.7.4" +#grafana_container_cpu_period: 100000 +#grafana_container_cpu_cores: 2 +# container_memory is in GB +#grafana_container_memory: 4 +#grafana_uid: 472 +#grafana_datasource: Dashboard +#grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard" +#grafana_dashboard_version: main +#grafana_dashboard_files: +# - ceph-cluster.json +# - cephfs-overview.json +# - host-details.json +# - hosts-overview.json +# - osd-device-details.json +# - osds-overview.json +# - pool-detail.json +# - pool-overview.json +# - radosgw-detail.json +# - radosgw-overview.json +# - radosgw-sync-overview.json +# - rbd-details.json +# - rbd-overview.json +#grafana_plugins: +# - vonage-status-panel +# - grafana-piechart-panel +#grafana_allow_embedding: true +#grafana_port: 3000 +#grafana_network: "{{ public_network }}" +#grafana_conf_overrides: {} +#prometheus_container_image: "docker.io/prom/prometheus:v2.7.2" +#prometheus_container_cpu_period: 100000 +#prometheus_container_cpu_cores: 2 +# container_memory is in GB +#prometheus_container_memory: 4 +#prometheus_data_dir: /var/lib/prometheus +#prometheus_conf_dir: /etc/prometheus +#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image +#prometheus_port: 9092 +#prometheus_conf_overrides: {} +# Uncomment out this variable if you need to customize the retention period for prometheus storage. +# set it to '30d' if you want to retain 30 days of data. +# prometheus_storage_tsdb_retention_time: 15d +#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" +#alertmanager_container_cpu_period: 100000 +#alertmanager_container_cpu_cores: 2 +# container_memory is in GB +#alertmanager_container_memory: 4 +#alertmanager_data_dir: /var/lib/alertmanager +#alertmanager_conf_dir: /etc/alertmanager +#alertmanager_port: 9093 +#alertmanager_cluster_port: 9094 +#alertmanager_conf_overrides: {} +#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}" + +#no_log_on_ceph_key_tasks: true + +############### +# DEPRECATION # +############### + + +###################################################### +# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # +# *DO NOT* MODIFY THEM # +###################################################### + +#container_exec_cmd: +#docker: false +#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" + diff --git a/group_vars/clients.yml.sample b/group_vars/clients.yml.sample new file mode 100644 index 0000000..f358a40 --- /dev/null +++ b/group_vars/clients.yml.sample @@ -0,0 +1,50 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +########### +# GENERAL # +########### + +# Even though Client nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on Client nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +#copy_admin_key: false + +#user_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# test: +# name: "test" +# application: "rbd" +# target_size_ratio: 0.2 +#test: +# name: "test" +# application: "rbd" +#test2: +# name: "test2" +# application: "rbd" +#pools: +# - "{{ test }}" +# - "{{ test2 }}" + +# Generate a keyring using ceph-authtool CLI or python. +# Eg: +# $ ceph-authtool --gen-print-key +# or +# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('`. diff --git a/infrastructure-playbooks/add-mon.yml b/infrastructure-playbooks/add-mon.yml new file mode 100644 index 0000000..78709a6 --- /dev/null +++ b/infrastructure-playbooks/add-mon.yml @@ -0,0 +1,128 @@ +--- +# This playbook is used to add a new MON to +# an existing cluster. It can run from any machine. Even if the fetch +# directory is not present it will be created. +# +# Ensure that all monitors are present in the mons +# group in your inventory so that the ceph configuration file +# is created correctly for the new OSD(s). +- name: Pre-requisites operations for adding new monitor(s) + hosts: mons + gather_facts: false + vars: + delegate_facts_host: true + become: true + pre_tasks: + - name: Import raw_install_python tasks + ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" + + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups[mon_group_name] }}" + run_once: true + when: delegate_facts_host | bool + tasks: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-validate role + ansible.builtin.import_role: + name: ceph-validate + + - name: Import ceph-infra role + ansible.builtin.import_role: + name: ceph-infra + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + when: containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + +- name: Deploy Ceph monitors + hosts: mons + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mon role + ansible.builtin.import_role: + name: ceph-mon + + - name: Import ceph-crash role + ansible.builtin.import_role: + name: ceph-crash + when: containerized_deployment | bool + + - name: Import ceph-exporter role + ansible.builtin.import_role: + name: ceph-exporter + when: containerized_deployment | bool + +- name: Update config file on OSD nodes + hosts: osds + gather_facts: true + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config diff --git a/infrastructure-playbooks/backup-and-restore-ceph-files.yml b/infrastructure-playbooks/backup-and-restore-ceph-files.yml new file mode 100644 index 0000000..f17d9b3 --- /dev/null +++ b/infrastructure-playbooks/backup-and-restore-ceph-files.yml @@ -0,0 +1,109 @@ +--- +# Copyright Red Hat +# SPDX-License-Identifier: Apache-2.0 +# +# This playbook can help in order to backup some Ceph files and restore them later. +# +# Usage: +# +# ansible-playbook -i backup-and-restore-ceph-files.yml -e backup_dir= -e mode= -e target_node= +# +# Required run-time variables +# ------------------ +# backup_dir : a path where files will be read|write. +# mode : tell the playbook either to backup or restore files. +# target_node : the name of the node being processed, it must match the name set in the inventory. +# +# Examples +# -------- +# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01 +# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01 + +- name: Backup and restore Ceph files + hosts: localhost + become: true + gather_facts: true + tasks: + - name: Exit playbook, if user did not set the source node + ansible.builtin.fail: + msg: > + "You must pass the node name: -e target_node=. + The name must match what is set in your inventory." + when: + - target_node is not defined + or target_node not in groups.get('all', []) + + - name: Exit playbook, if user did not set the backup directory + ansible.builtin.fail: + msg: > + "you must pass the backup directory path: -e backup_dir=" + when: backup_dir is not defined + + - name: Exit playbook, if user did not set the playbook mode (backup|restore) + ansible.builtin.fail: + msg: > + "you must pass the mode: -e mode=" + when: + - mode is not defined + or mode not in ['backup', 'restore'] + + - name: Gather facts on source node + ansible.builtin.setup: + delegate_to: "{{ target_node }}" + delegate_facts: true + + - name: Backup mode + when: mode == 'backup' + block: + - name: Create a temp directory + ansible.builtin.tempfile: + state: directory + suffix: ansible-archive-ceph + register: tmp_dir + delegate_to: "{{ target_node }}" + + - name: Archive files + community.general.archive: + path: "{{ item }}" + dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" + format: tar + mode: "0644" + delegate_to: "{{ target_node }}" + loop: + - /etc/ceph + - /var/lib/ceph + + - name: Create backup directory + become: false + ansible.builtin.file: + path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}" + state: directory + mode: "0755" + + - name: Backup files + ansible.builtin.fetch: + src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" + dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" + flat: true + loop: + - /etc/ceph + - /var/lib/ceph + delegate_to: "{{ target_node }}" + + - name: Remove temp directory + ansible.builtin.file: + path: "{{ tmp_dir.path }}" + state: absent + delegate_to: "{{ target_node }}" + + - name: Restore mode + when: mode == 'restore' + block: + - name: Unarchive files + ansible.builtin.unarchive: + src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" + dest: "{{ item | dirname }}" + loop: + - /etc/ceph + - /var/lib/ceph + delegate_to: "{{ target_node }}" diff --git a/infrastructure-playbooks/ceph-keys.yml b/infrastructure-playbooks/ceph-keys.yml new file mode 100644 index 0000000..d75ebd3 --- /dev/null +++ b/infrastructure-playbooks/ceph-keys.yml @@ -0,0 +1,74 @@ +--- +# This playbook is used to manage CephX Keys +# You will find examples below on how the module can be used on daily operations +# +# It currently runs on localhost + +- name: CephX key management examples + hosts: localhost + gather_facts: false + vars: + cluster: ceph + container_exec_cmd: "docker exec ceph-nano" + keys_to_info: + - client.admin + - mds.0 + keys_to_delete: + - client.leseb + - client.leseb1 + - client.pythonnnn + keys_to_create: + - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" }, mode: "0600" } + - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" } + - { name: client.path, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" } + + tasks: + - name: Create ceph key(s) module + ceph_key: + name: "{{ item.name }}" + caps: "{{ item.caps }}" + cluster: "{{ cluster }}" + secret: "{{ item.key | default('') }}" + containerized: "{{ container_exec_cmd | default(False) }}" + with_items: "{{ keys_to_create }}" + + - name: Update ceph key(s) + ceph_key: + name: "{{ item.name }}" + state: update + caps: "{{ item.caps }}" + cluster: "{{ cluster }}" + containerized: "{{ container_exec_cmd | default(False) }}" + with_items: "{{ keys_to_create }}" + + - name: Delete ceph key(s) + ceph_key: + name: "{{ item }}" + state: absent + cluster: "{{ cluster }}" + containerized: "{{ container_exec_cmd | default(False) }}" + with_items: "{{ keys_to_delete }}" + + - name: Info ceph key(s) + ceph_key_info: + name: "{{ item }}" + state: info + cluster: "{{ cluster }}" + containerized: "{{ container_exec_cmd }}" + register: key_info + ignore_errors: true + with_items: "{{ keys_to_info }}" + + - name: List ceph key(s) + ceph_key_info: + state: list + cluster: "{{ cluster }}" + containerized: "{{ container_exec_cmd | default(False) }}" + register: list_keys + ignore_errors: true + + - name: Fetch_initial_keys # noqa: ignore-errors + ceph_key: + state: fetch_initial_keys + cluster: "{{ cluster }}" + ignore_errors: true diff --git a/infrastructure-playbooks/cephadm-adopt.yml b/infrastructure-playbooks/cephadm-adopt.yml new file mode 100644 index 0000000..25cc200 --- /dev/null +++ b/infrastructure-playbooks/cephadm-adopt.yml @@ -0,0 +1,1552 @@ +--- +# +# This playbook does a cephadm adopt for all the Ceph services +# + +- name: Confirm whether user really meant to adopt the cluster by cephadm + hosts: localhost + connection: local + become: false + gather_facts: false + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to adopt the cluster by cephadm ? + default: 'no' + private: false + tasks: + - name: Exit playbook, if user did not mean to adopt the cluster by cephadm + ansible.builtin.fail: + msg: > + Exiting cephadm-adopt playbook, cluster was NOT adopted. + To adopt the cluster, either say 'yes' on the prompt or + use `-e ireallymeanit=yes` on the command line when + invoking the playbook + when: ireallymeanit != 'yes' + + - name: Import_role ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + +- name: Gather facts and prepare system for cephadm + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + become: true + any_errors_fatal: true + gather_facts: false + vars: + delegate_facts_host: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" + run_once: true + when: delegate_facts_host | bool + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Set_fact ceph_cmd + ansible.builtin.set_fact: + ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:ro -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}" + + - name: Check pools have an application enabled + ansible.builtin.command: "{{ ceph_cmd }} health detail --format json" + register: health_detail + run_once: true + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Check for POOL_APP_NOT_ENABLED warning + ansible.builtin.fail: + msg: "Make sure all your pool have an application enabled." + run_once: true + delegate_to: localhost + when: + - (health_detail.stdout | default('{}', True) | from_json)['status'] == "HEALTH_WARN" + - "'POOL_APP_NOT_ENABLED' in (health_detail.stdout | default('{}', True) | from_json)['checks']" + + - name: Get the ceph version + ansible.builtin.command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version" + changed_when: false + register: ceph_version_out + + - name: Set_fact ceph_version + ansible.builtin.set_fact: + ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}" + + - name: Fail on pre octopus ceph releases + ansible.builtin.fail: + msg: > + Your Ceph version {{ ceph_version }} is not supported for this operation. + Please upgrade your cluster with the rolling_update.yml playbook first. + when: ceph_version is version('15.2', '<') + + - name: Check if it is atomic host + ansible.builtin.stat: + path: /run/ostree-booted + register: stat_ostree + + - name: Set_fact is_atomic + ansible.builtin.set_fact: + is_atomic: "{{ stat_ostree.stat.exists }}" + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + tasks_from: registry.yml + when: + - not containerized_deployment | bool + - ceph_docker_registry_auth | bool + + - name: Pulling Ceph container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + register: docker_image + until: docker_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + when: + - not containerized_deployment | bool + - inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) + + - name: Configure repository for installing cephadm + when: containerized_deployment | bool + tags: with_pkg + block: + - name: Set_fact ceph_origin + ansible.builtin.set_fact: + ceph_origin: repository + when: ceph_origin == 'dummy' + + - name: Set_fact ceph_repository + ansible.builtin.set_fact: + ceph_repository: community + when: ceph_repository == 'dummy' + + - name: Validate repository variables + ansible.builtin.import_role: + name: ceph-validate + tasks_from: check_repository.yml + + - name: Configure repository + ansible.builtin.import_role: + name: ceph-common + tasks_from: "configure_repository.yml" + + - name: Install cephadm requirements + tags: with_pkg + ansible.builtin.package: + name: ['python3', 'lvm2'] + register: result + until: result is succeeded + + - name: Install cephadm + tags: with_pkg + ansible.builtin.package: + name: cephadm + register: result + until: result is succeeded + + - name: Install cephadm mgr module + tags: with_pkg + ansible.builtin.package: + name: ceph-mgr-cephadm + register: result + until: result is succeeded + when: + - not containerized_deployment | bool + - mgr_group_name in group_names + + - name: Get current fsid + ansible.builtin.command: "{{ ceph_cmd }} fsid" + register: current_fsid + run_once: true + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Get a minimal ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config generate-minimal-conf" + register: minimal_config + run_once: true + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Set_fact fsid + ansible.builtin.set_fact: + fsid: "{{ current_fsid.stdout }}" + run_once: true + + - name: Enable cephadm mgr module + ceph_mgr_module: + name: cephadm + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set cephadm as orchestrator backend + ansible.builtin.command: "{{ ceph_cmd }} orch set backend cephadm" + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Check if there is an existing ssh keypair + ansible.builtin.stat: + path: "{{ item }}" + loop: + - "{{ cephadm_ssh_priv_key_path }}" + - "{{ cephadm_ssh_pub_key_path }}" + register: ssh_keys + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set fact + ansible.builtin.set_fact: + stat_ssh_key_pair: "{{ ssh_keys.results | map(attribute='stat.exists') | list }}" + + - name: Fail if either ssh public or private key is missing + ansible.builtin.fail: + msg: "One part of the ssh keypair of user {{ cephadm_ssh_user }} is missing" + when: + - false in stat_ssh_key_pair + - true in stat_ssh_key_pair + + - name: Generate cephadm ssh key if there is none + ansible.builtin.command: "{{ ceph_cmd }} cephadm generate-key" + when: not true in stat_ssh_key_pair + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Use existing user keypair for remote connections + when: not false in stat_ssh_key_pair + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + ansible.builtin.command: > + {{ container_binary + ' run --rm --net=host --security-opt label=disable + -v /etc/ceph:/etc/ceph:z + -v /var/lib/ceph:/var/lib/ceph:ro + -v /var/run/ceph:/var/run/ceph:z + -v ' + item.1 + ':/etc/ceph/cephadm.' + item.0 + ':ro --entrypoint=ceph '+ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} + --cluster {{ cluster }} cephadm set-{{ item.0 }}-key -i /etc/ceph/cephadm.{{ item.0 }} + changed_when: false + with_together: + - ['pub', 'priv'] + - ['{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}'] + + - name: Get the cephadm ssh pub key + ansible.builtin.command: "{{ ceph_cmd }} cephadm get-pub-key" + changed_when: false + run_once: true + register: cephadm_pubpkey + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Allow cephadm key + ansible.posix.authorized_key: + user: "{{ cephadm_ssh_user }}" + key: '{{ cephadm_pubpkey.stdout }}' + + - name: Set cephadm ssh user to {{ cephadm_ssh_user }} + ansible.builtin.command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}" + changed_when: false + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Set cephadm ssh config + ansible.builtin.command: "{{ ceph_cmd }} cephadm set-ssh-config -i {{ cephadm_ssh_config_path }}" + changed_when: false + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + when: cephadm_ssh_config_path is defined + + - name: Run cephadm prepare-host + ansible.builtin.command: cephadm prepare-host + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set default container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set container image base in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set dashboard container image in ceph mgr configuration + when: dashboard_enabled | bool + run_once: true + block: + - name: Set alertmanager container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set grafana container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set node-exporter container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Set prometheus container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + + - name: Enable the osd memory autotune for hci environment + ansible.builtin.command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true" + changed_when: false + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + when: is_hci | bool + + - name: Set autotune_memory_target_ratio + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Manage nodes with cephadm - ipv4 + ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv4 + + - name: Manage nodes with cephadm - ipv6 + ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv6 + + - name: Add ceph label for core component + ansible.builtin.command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Get the client.admin keyring + ceph_key_info: + name: client.admin + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + register: client_admin_keyring + + - name: Copy the client.admin keyring + ansible.builtin.copy: + dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" + content: "{{ client_admin_keyring.stdout + '\n' }}" + owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + run_once: true + delegate_to: "{{ item }}" + with_items: + - "{{ groups.get(osd_group_name, []) }}" + - "{{ groups.get(mds_group_name, []) }}" + - "{{ groups.get(rgw_group_name, []) }}" + - "{{ groups.get(mgr_group_name, []) }}" + - "{{ groups.get(rbdmirror_group_name, []) }}" + + - name: Assimilate ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf" + changed_when: false + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Set_fact cephadm_cmd + ansible.builtin.set_fact: + cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}" + + - name: Set container registry info + ansible.builtin.command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}" + changed_when: false + no_log: true + run_once: true + delegate_to: '{{ groups[mon_group_name][0] }}' + when: ceph_docker_registry_auth | bool + + - name: Remove logrotate configuration + ansible.builtin.file: + path: /etc/logrotate.d/ceph + state: absent + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Store existing rbd mirror peers in monitor config store + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" + become: true + any_errors_fatal: true + gather_facts: true + tasks: + - name: Store existing rbd mirror peers in monitor config store + when: + - ceph_rbd_mirror_configure | default(True) | bool + - ceph_rbd_mirror_remote_user is defined + - ceph_rbd_mirror_remote_cluster is defined + block: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-validate + ansible.builtin.import_role: + name: ceph-validate + tasks_from: check_rbdmirror.yml + + - name: Import container_binary + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Set_fact rbd_cmd + ansible.builtin.set_fact: + rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }} -n client.rbd-mirror.{{ ansible_facts['hostname'] }} -k /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" + + - name: Set_fact admin_rbd_cmd + ansible.builtin.set_fact: + admin_rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}" + - name: Get mirror pool info + ansible.builtin.command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json" + register: mirror_pool_info + changed_when: false + + - name: Set_fact mirror_peer_found + ansible.builtin.set_fact: + mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list) }}" + + - name: Remove current rbd mirror peer, add new peer into mon config store + when: mirror_peer_uuid | length > 0 + block: + - name: Get remote user keyring + ansible.builtin.slurp: + src: "/etc/ceph/{{ ceph_rbd_mirror_remote_cluster }}.{{ ceph_rbd_mirror_remote_user }}.keyring" + register: remote_user_keyring + + - name: Get quorum_status + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + register: quorum_status + run_once: true + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set_fact mon_ip_list + ansible.builtin.set_fact: + mon_ip_list: "{{ mon_ip_list | default([]) | union([item['addr'].split(':')[0]]) }}" + loop: "{{ (quorum_status.stdout | default('{}') | from_json)['monmap']['mons'] }}" + run_once: true + + - name: Remove current mirror peer + ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list)[0] }}" + delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" + changed_when: false + + - name: Get remote user keyring secret + ansible.builtin.set_fact: + remote_user_keyring_secret: "{{ item.split('=', 1)[1] | trim }}" + with_items: "{{ (remote_user_keyring.content | b64decode).split('\n') }}" + when: "'key = ' in item" + + - name: Create a temporary file + ansible.builtin.tempfile: + path: /etc/ceph + state: file + suffix: _ceph-ansible + register: tmp_file + delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" + + - name: Write secret to temporary file + ansible.builtin.copy: + dest: "{{ tmp_file.path }}" + content: "{{ remote_user_keyring_secret }}" + mode: preserve + delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" + + - name: Re-add mirror peer + ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}" + delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" + changed_when: false + + - name: Rm temporary file + ansible.builtin.file: + path: "{{ tmp_file.path }}" + state: absent + delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" + + +- name: Adopt ceph mon daemons + hosts: "{{ mon_group_name|default('mons') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Adopt mon daemon + cephadm_adopt: + name: "mon.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + + - name: Reset failed ceph-mon systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove ceph-mon systemd files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-mon@.service + - /etc/systemd/system/ceph-mon@.service.d + - /etc/systemd/system/ceph-mon.target + + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" + changed_when: false + register: ceph_health_raw + until: > + ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"] + retries: "{{ health_mon_check_retries }}" + delay: "{{ health_mon_check_delay }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Adopt ceph mgr daemons + hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Adopt mgr daemon + cephadm_adopt: + name: "mgr.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + + - name: Reset failed ceph-mgr systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove ceph-mgr systemd files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-mgr@.service + - /etc/systemd/system/ceph-mgr@.service.d + - /etc/systemd/system/ceph-mgr.target + +- name: Set osd flags + hosts: "{{ osd_group_name|default('osds') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + register: pool_list + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + check_mode: false + + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + register: balancer_status_adopt + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + check_mode: false + + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: + pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" + run_once: true + with_items: "{{ pool_list.stdout | default('{}') | from_json }}" + + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: (balancer_status_adopt.stdout | from_json)['active'] | bool + + - name: Disable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: false + with_items: "{{ pools_pgautoscaler_mode }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Set osd flags + ceph_osd_flag: + cluster: "{{ cluster }}" + name: "{{ item }}" + state: present + with_items: + - noout + - nodeep-scrub + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Adopt ceph osd daemons + hosts: "{{ osd_group_name|default('osd') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + when: containerized_deployment | bool + + - name: Get osd list + ceph_volume: + cluster: "{{ cluster }}" + action: list + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: osd_list + + - name: Set osd fsid for containerized deployment + ansible.builtin.lineinfile: + path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid' + line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}" + owner: '{{ ceph_uid }}' + group: '{{ ceph_uid }}' + create: true + mode: "0644" + with_dict: '{{ osd_list.stdout | from_json }}' + when: containerized_deployment | bool + + - name: Set osd type for containerized deployment + ansible.builtin.lineinfile: + path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type' + line: 'bluestore' + owner: '{{ ceph_uid }}' + group: '{{ ceph_uid }}' + create: true + mode: "0644" + loop: '{{ (osd_list.stdout | from_json).keys() | list }}' + when: containerized_deployment | bool + + - name: Adopt osd daemon + cephadm_adopt: + name: "osd.{{ item }}" + cluster: "{{ cluster }}" + image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + loop: '{{ (osd_list.stdout | from_json).keys() | list }}' + + - name: Remove ceph-osd systemd and ceph-osd-run.sh files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-osd@.service + - /etc/systemd/system/ceph-osd@.service.d + - /etc/systemd/system/ceph-osd.target + - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh" + + - name: Remove osd directory + ansible.builtin.file: + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" + state: absent + loop: '{{ (osd_list.stdout | from_json).keys() | list }}' + + - name: Remove any legacy directories in /var/lib/ceph/mon (workaround) + ansible.builtin.file: + path: "/var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}" + state: absent + + - name: Waiting for clean pgs... + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json" + changed_when: false + register: ceph_health_post + until: > + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0) + and + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs) + delegate_to: "{{ groups[mon_group_name][0] }}" + retries: "{{ health_osd_check_retries }}" + delay: "{{ health_osd_check_delay }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Unset osd flags + hosts: "{{ osd_group_name|default('osds') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Re-enable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: true + with_items: "{{ pools_pgautoscaler_mode }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Unset osd flags + ceph_osd_flag: + cluster: "{{ cluster }}" + name: "{{ item }}" + state: absent + with_items: + - noout + - nodeep-scrub + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: (balancer_status_adopt.stdout | from_json)['active'] | bool + +- name: Redeploy mds daemons + hosts: "{{ mds_group_name|default('mdss') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Update the placement of metadata hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'" + run_once: true + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Stop and remove legacy ceph mds daemons + hosts: "{{ mds_group_name|default('mdss') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop and disable ceph-mds systemd service + ansible.builtin.service: + name: "ceph-mds@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Stop and disable ceph-mds systemd target + ansible.builtin.service: + name: ceph-mds.target + state: stopped + enabled: false + failed_when: false + + - name: Reset failed ceph-mds systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove ceph-mds systemd files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-mds@.service + - /etc/systemd/system/ceph-mds@.service.d + - /etc/systemd/system/ceph-mds.target + + - name: Remove legacy ceph mds data + ansible.builtin.file: + path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}" + state: absent + +- name: Redeploy rgw daemons + hosts: "{{ rgw_group_name | default('rgws') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_radosgw_address.yml + + - name: Import rgw ssl certificate into kv store + when: radosgw_frontend_ssl_certificate | length > 0 + block: + - name: Slurp rgw ssl certificate + ansible.builtin.slurp: + src: "{{ radosgw_frontend_ssl_certificate }}" + register: rgw_ssl_cert + + - name: Store ssl certificate in kv store + ansible.builtin.command: > + {{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} + config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }} -i - + args: + stdin: "{{ rgw_ssl_cert.content | b64decode }}" + stdin_add_newline: false + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set_fact rgw_subnet + ansible.builtin.set_fact: + rgw_subnet: "--networks {{ radosgw_address_block }}" + when: + - radosgw_address_block is defined + - radosgw_address_block != 'subnet' + + - name: Update the placement of radosgw hosts + ceph_orch_apply: + fsid: "{{ fsid }}" + cluster: "{{ cluster }}" + spec: | + service_type: rgw + service_id: {{ ansible_facts['hostname'] }} + placement: + count_per_host: {{ radosgw_num_instances }} + hosts: + - {{ ansible_facts['nodename'] }} + {% if rgw_subnet is defined %} + networks: {{ radosgw_address_block.split(',') | list if ',' in radosgw_address_block else radosgw_address_block | string }} + {% endif %} + spec: + rgw_frontend_port: {{ radosgw_frontend_port }} + {% if radosgw_frontend_ssl_certificate | length > 0 %} + {{ "ssl: true" }} + {% endif %} + extra_container_args: + - -v + - /etc/pki/ca-trust:/etc/pki/ca-trust:ro + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Stop and remove legacy ceph rgw daemons + hosts: "{{ rgw_group_name|default('rgws') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_radosgw_address.yml + + - name: Stop and disable ceph-radosgw systemd service + ansible.builtin.service: + name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: stopped + enabled: false + failed_when: false + loop: '{{ rgw_instances }}' + + - name: Stop and disable ceph-radosgw systemd target + ansible.builtin.service: + name: ceph-radosgw.target + state: stopped + enabled: false + failed_when: false + + - name: Reset failed ceph-radosgw systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + loop: '{{ rgw_instances }}' + when: containerized_deployment | bool + + - name: Remove ceph-radosgw systemd files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-radosgw@.service + - /etc/systemd/system/ceph-radosgw@.service.d + - /etc/systemd/system/ceph-radosgw.target + + - name: Remove legacy ceph radosgw data + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: absent + loop: '{{ rgw_instances }}' + + - name: Remove legacy ceph radosgw directory + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}" + state: absent + +- name: Stop and remove legacy ceph nfs daemons + hosts: "{{ nfs_group_name|default('nfss') }}" + tags: 'ceph_nfs_adopt' + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + tasks_from: create_rgw_nfs_user.yml + + - name: Enable ceph mgr nfs module + ceph_mgr_module: + name: "nfs" + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Stop and disable ceph-nfs systemd service + ansible.builtin.service: + name: "ceph-nfs@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Reset failed ceph-nfs systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove ceph-nfs systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-nfs@.service + - /etc/systemd/system/ceph-nfs@.service.d + + - name: Remove legacy ceph radosgw directory + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}" + state: absent + + - name: Create nfs ganesha cluster + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Create cephfs export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: nfs_file_gw | bool + + - name: Create rgw export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: nfs_obj_gw | bool + +- name: Redeploy rbd-mirror daemons + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Update the placement of rbd-mirror hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'" + run_once: true + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Stop and remove legacy rbd-mirror daemons + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop and disable rbd-mirror systemd service + ansible.builtin.service: + name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Stop and disable rbd-mirror systemd target + ansible.builtin.service: + name: ceph-rbd-mirror.target + state: stopped + enabled: false + failed_when: false + + - name: Reset failed rbd-mirror systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + when: containerized_deployment | bool + + - name: Remove rbd-mirror systemd files + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/ceph-rbd-mirror@.service + - /etc/systemd/system/ceph-rbd-mirror@.service.d + - /etc/systemd/system/ceph-rbd-mirror.target + + +- name: Redeploy ceph-crash daemons + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop and disable ceph-crash systemd service + ansible.builtin.service: + name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + state: stopped + enabled: false + failed_when: false + + - name: Remove ceph-crash systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/ceph-crash@.service + state: absent + + - name: Update the placement of ceph-crash hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'" + run_once: true + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + +- name: Redeploy ceph-exporter daemons + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop and disable ceph-exporter systemd service + ansible.builtin.service: + name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + state: stopped + enabled: false + failed_when: false + + - name: Remove ceph-exporter systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/ceph-exporter@.service + state: absent + + - name: Update the placement of ceph-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply ceph-exporter --placement='label:ceph'" + run_once: true + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + +- name: Redeploy alertmanager/grafana/prometheus daemons + hosts: "{{ monitoring_group_name|default('monitoring') }}" + serial: 1 + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Check whether a ceph config file is present + ansible.builtin.stat: + path: "/etc/ceph/{{ cluster }}.conf" + register: ceph_config + + - name: Ensure /etc/ceph is present + ansible.builtin.file: + path: /etc/ceph + state: directory + owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + + - name: Write a ceph.conf with minimal config + ansible.builtin.copy: + dest: "/etc/ceph/{{ cluster }}.conf" + content: "{{ minimal_config.stdout }}" + owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + when: not ceph_config.stat.exists | bool + + - name: With dashboard enabled + when: dashboard_enabled | bool + block: + - name: Ensure alertmanager/prometheus data directories are present + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0755" + with_items: + - "{{ alertmanager_data_dir }}" + - "{{ prometheus_data_dir }}" + + # (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service + - name: Stop and disable alertmanager systemd unit + ansible.builtin.service: + name: alertmanager + state: stopped + enabled: false + failed_when: false + + # (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml + - name: Create alertmanager config symlink + ansible.builtin.file: + path: /etc/prometheus/alertmanager.yml + src: '{{ alertmanager_conf_dir }}/alertmanager.yml' + state: link + + # (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/ + - name: Create alertmanager data symlink + ansible.builtin.file: + path: '{{ prometheus_data_dir }}/alertmanager' + src: '{{ alertmanager_data_dir }}' + state: link + + - name: Adopt alertmanager daemon + cephadm_adopt: + name: "alertmanager.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + image: "{{ alertmanager_container_image }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + + - name: Remove alertmanager systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/alertmanager.service + state: absent + + - name: Remove the legacy alertmanager data + ansible.builtin.file: + path: '{{ alertmanager_data_dir }}' + state: absent + + - name: Stop and disable prometheus systemd unit + ansible.builtin.service: + name: prometheus + state: stopped + enabled: false + failed_when: false + + - name: Remove alertmanager data symlink + ansible.builtin.file: + path: '{{ prometheus_data_dir }}/alertmanager' + state: absent + + # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/ + - name: Tmp copy the prometheus data + ansible.builtin.copy: + src: '{{ prometheus_data_dir }}/' + dest: /var/lib/prom_metrics + owner: 65534 + group: 65534 + mode: preserve + remote_src: true + + # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/ + - name: Restore the prometheus data + ansible.builtin.copy: + src: /var/lib/prom_metrics/ + dest: /var/lib/prometheus/metrics + owner: 65534 + group: 65534 + mode: preserve + remote_src: true + + - name: Remove the tmp prometheus data copy + ansible.builtin.file: + path: /var/lib/prom_metrics + state: absent + + - name: Adopt prometheus daemon + cephadm_adopt: + name: "prometheus.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + image: "{{ prometheus_container_image }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + + - name: Remove prometheus systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/prometheus.service + state: absent + + - name: Remove the legacy prometheus data + ansible.builtin.file: + path: '{{ prometheus_data_dir }}' + state: absent + + # (workaround) cephadm adopt grafana only stops grafana systemd service + - name: Stop and disable grafana systemd unit + ansible.builtin.service: + name: grafana-server + state: stopped + enabled: false + failed_when: false + + - name: Adopt grafana daemon + cephadm_adopt: + name: "grafana.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + image: "{{ grafana_container_image }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + firewalld: "{{ true if configure_firewall | bool else false }}" + + - name: Remove grafana systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/grafana-server.service + state: absent + + - name: Remove the legacy grafana data + ansible.builtin.file: + path: /var/lib/grafana + state: absent + +- name: Redeploy node-exporter daemons + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: With dashboard enabled + when: dashboard_enabled | bool + block: + - name: Stop and disable node-exporter systemd service + ansible.builtin.service: + name: node_exporter + state: stopped + enabled: false + failed_when: false + + - name: Remove node_exporter systemd unit file + ansible.builtin.file: + path: /etc/systemd/system/node_exporter.service + state: absent + + - name: Update the placement of node-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'" + run_once: true + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + +- name: Adjust placement daemons + hosts: "{{ mon_group_name|default('mons') }}[0]" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Update the placement of monitor hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set_fact mgr_placement + ansible.builtin.set_fact: + mgr_placement_count: "{{ groups.get(mgr_group_name, []) | length if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name, []) | length }}" + + - name: Set_fact mgr_placement_label + ansible.builtin.set_fact: + mgr_placement_label: "{{ mgr_group_name if groups.get(mgr_group_name, []) | length > 0 else mon_group_name }}" + + - name: Update the placement of manager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: With dashboard enabled + when: dashboard_enabled | bool and groups.get(monitoring_group_name, []) | length > 0 + block: + - name: Update the placement of alertmanager hosts + ceph_orch_apply: + fsid: "{{ fsid }}" + cluster: "{{ cluster }}" + spec: | + service_type: alertmanager + service_id: "{{ ansible_facts['hostname'] }}" + placement: + label: "{{ monitoring_group_name }}" + count: "{{ groups.get(monitoring_group_name, []) | length }}" + {% if grafana_server_addr is defined %} + networks: {{ grafana_server_addr.split(',') | list if ',' in grafana_server_addr else grafana_server_addr | string }} + {% endif %} + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of grafana hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of prometheus hosts + ceph_orch_apply: + fsid: "{{ fsid }}" + cluster: "{{ cluster }}" + spec: | + service_name: prometheus + service_id: "{{ ansible_facts['hostname'] }}" + placement: + label: {{ monitoring_group_name }} + count: {{ groups.get(monitoring_group_name, []) | length }} + {% if grafana_server_addr is defined %} + networks: {{ grafana_server_addr.split(',') | list if ',' in grafana_server_addr else grafana_server_addr | string }} + {% endif %} + {% if prometheus_port is defined and prometheus_port != 9095 %} + spec: + port: {{ prometheus_port }} + {% endif %} + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Show ceph orchestrator status + hosts: "{{ mon_group_name|default('mons') }}[0]" + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Show ceph orchestrator services + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Show ceph orchestrator daemons + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Inform users about cephadm + ansible.builtin.debug: + msg: | + This Ceph cluster is now managed by cephadm. Any new changes to the + cluster need to be achieved by using the cephadm CLI and you don't + need to use ceph-ansible playbooks anymore. diff --git a/infrastructure-playbooks/cephadm.yml b/infrastructure-playbooks/cephadm.yml new file mode 100644 index 0000000..b08e7f2 --- /dev/null +++ b/infrastructure-playbooks/cephadm.yml @@ -0,0 +1,383 @@ +--- +- name: Gather facts and prepare system for cephadm + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + become: true + gather_facts: false + vars: + delegate_facts_host: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Validate if monitor group doesn't exist or empty + ansible.builtin.fail: + msg: "you must add a [mons] group and add at least one node." + run_once: true + when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0 + + - name: Validate if manager group doesn't exist or empty + ansible.builtin.fail: + msg: "you must add a [mgrs] group and add at least one node." + run_once: true + when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0 + + - name: Validate dashboard configuration + when: dashboard_enabled | bool + run_once: true + block: + - name: Fail if [monitoring] group doesn't exist or empty + ansible.builtin.fail: + msg: "you must add a [monitoring] group and add at least one node." + when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0 + + - name: Fail when dashboard_admin_password is not set + ansible.builtin.fail: + msg: "you must set dashboard_admin_password." + when: dashboard_admin_password is undefined + + - name: Validate container registry credentials + ansible.builtin.fail: + msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' + when: + - ceph_docker_registry_auth | bool + - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or + (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0) + + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] }}" + run_once: true + when: delegate_facts_host | bool + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Check if it is atomic host + ansible.builtin.stat: + path: /run/ostree-booted + register: stat_ostree + + - name: Set_fact is_atomic + ansible.builtin.set_fact: + is_atomic: "{{ stat_ostree.stat.exists }}" + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + tasks_from: registry.yml + when: ceph_docker_registry_auth | bool + + - name: Configure repository for installing cephadm + vars: + ceph_origin: repository + ceph_repository: community + block: + - name: Validate repository variables + ansible.builtin.import_role: + name: ceph-validate + tasks_from: check_repository.yml + + - name: Configure repository + ansible.builtin.import_role: + name: ceph-common + tasks_from: "configure_repository.yml" + + - name: Install cephadm requirements + ansible.builtin.package: + name: ['python3', 'lvm2'] + register: result + until: result is succeeded + + - name: Install cephadm + ansible.builtin.package: + name: cephadm + register: result + until: result is succeeded + + - name: Set_fact cephadm_cmd + ansible.builtin.set_fact: + cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}" + +- name: Bootstrap the cluster + hosts: "{{ mon_group_name|default('mons') }}[0]" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_monitor_address.yml + + - name: Create /etc/ceph directory + ansible.builtin.file: + path: /etc/ceph + state: directory + mode: "0755" + + - name: Bootstrap the new cluster + cephadm_bootstrap: + mon_ip: "{{ _monitor_addresses[inventory_hostname] }}" + image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + docker: "{{ true if container_binary == 'docker' else false }}" + pull: false + dashboard: "{{ dashboard_enabled }}" + dashboard_user: "{{ dashboard_admin_user if dashboard_enabled | bool else omit }}" + dashboard_password: "{{ dashboard_admin_password if dashboard_enabled | bool else omit }}" + monitoring: false + firewalld: "{{ configure_firewall }}" + ssh_user: "{{ cephadm_ssh_user | default('root') }}" + ssh_config: "{{ cephadm_ssh_config | default(omit) }}" + + - name: Set default container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set container image base in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set dashboard container image in ceph mgr configuration + when: dashboard_enabled | bool + block: + - name: Set alertmanager container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set grafana container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set node-exporter container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Set prometheus container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Add the other nodes + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Get the cephadm ssh pub key + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key" + changed_when: false + run_once: true + register: cephadm_pubpkey + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Allow cephadm key + ansible.posix.authorized_key: + user: "{{ cephadm_ssh_user | default('root') }}" + key: '{{ cephadm_pubpkey.stdout }}' + + - name: Run cephadm prepare-host + ansible.builtin.command: cephadm prepare-host + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Manage nodes with cephadm - ipv4 + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: ip_version == 'ipv4' + + - name: Manage nodes with cephadm - ipv6 + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + when: ip_version == 'ipv6' + + - name: Add ceph label for core component + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph" + changed_when: false + delegate_to: '{{ groups[mon_group_name][0] }}' + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Adjust service placement + hosts: "{{ mon_group_name|default('mons') }}[0]" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Update the placement of monitor hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json" + changed_when: false + register: ceph_health_raw + until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length + retries: "{{ health_mon_check_retries }}" + delay: "{{ health_mon_check_delay }}" + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of manager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of crash hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of ceph-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply ceph-exporter --placement='label:ceph'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Adjust monitoring service placement + hosts: "{{ monitoring_group_name|default('monitoring') }}" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: With dashboard enabled + when: dashboard_enabled | bool + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + block: + - name: Enable the prometheus module + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of alertmanager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of grafana hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of prometheus hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Update the placement of node-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + +- name: Print information + hosts: "{{ mon_group_name|default('mons') }}[0]" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Show ceph orchestrator services + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Show ceph orchestrator daemons + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh" + changed_when: false + environment: + CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + + - name: Inform users about cephadm + ansible.builtin.debug: + msg: | + This Ceph cluster is now ready to receive more configuration like + adding OSD, MDS daemons, create pools or keyring. + You can do this by using the cephadm CLI and you don't need to use + ceph-ansible playbooks anymore. diff --git a/infrastructure-playbooks/docker-to-podman.yml b/infrastructure-playbooks/docker-to-podman.yml new file mode 100644 index 0000000..784a244 --- /dev/null +++ b/infrastructure-playbooks/docker-to-podman.yml @@ -0,0 +1,236 @@ +--- +# This playbook is intended to be used as part of the el7 to el8 OS upgrade. +# It modifies the systemd unit files so containers are launched with podman +# instead of docker after the OS reboot once it is upgraded. +# It is *not* intended to restart services since we don't want to multiple services +# restarts. + +- name: Pre-requisite and facts gathering + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + - monitoring + + gather_facts: false + become: true + any_errors_fatal: true + + vars: + delegate_facts_host: true + + pre_tasks: + - name: Import raw_install_python tasks + ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" + + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + # pre-tasks for following import - + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}" + run_once: true + when: delegate_facts_host | bool + +- name: Migrate to podman + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ osd_group_name | default('osds') }}" + - "{{ mds_group_name | default('mdss') }}" + - "{{ rgw_group_name | default('rgws') }}" + - "{{ nfs_group_name | default('nfss') }}" + - "{{ mgr_group_name | default('mgrs') }}" + - "{{ rbdmirror_group_name | default('rbdmirrors') }}" + - "{{ monitoring_group_name | default('monitoring') }}" + gather_facts: false + become: true + tasks: + - name: Set_fact docker2podman and container_binary + ansible.builtin.set_fact: + docker2podman: true + container_binary: podman + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Install podman + ansible.builtin.package: + name: podman + state: present + register: result + until: result is succeeded + tags: with_pkg + when: not is_atomic | bool + + - name: Check podman presence # noqa command-instead-of-shell + ansible.builtin.shell: command -v podman + register: podman_presence + changed_when: false + failed_when: false + + - name: Pulling images from docker daemon + when: podman_presence.rc == 0 + block: + - name: Pulling Ceph container image from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + register: pull_image + until: pull_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) + + - name: Pulling alertmanager/grafana/prometheus images from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}" + changed_when: false + register: pull_image + until: pull_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + loop: + - "{{ alertmanager_container_image }}" + - "{{ grafana_container_image }}" + - "{{ prometheus_container_image }}" + when: + - dashboard_enabled | bool + - inventory_hostname in groups.get(monitoring_group_name, []) + + - name: Pulling node_exporter image from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}" + changed_when: false + register: pull_image + until: pull_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + when: dashboard_enabled | bool + + - name: Import ceph-mon role + ansible.builtin.import_role: + name: ceph-mon + tasks_from: systemd.yml + when: inventory_hostname in groups.get(mon_group_name, []) + + - name: Import ceph-mds role + ansible.builtin.import_role: + name: ceph-mds + tasks_from: systemd.yml + when: inventory_hostname in groups.get(mds_group_name, []) + + - name: Import ceph-mgr role + ansible.builtin.import_role: + name: ceph-mgr + tasks_from: systemd.yml + when: inventory_hostname in groups.get(mgr_group_name, []) + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + tasks_from: systemd.yml + when: inventory_hostname in groups.get(nfs_group_name, []) + + - name: Import ceph-osd role + ansible.builtin.import_role: + name: ceph-osd + tasks_from: systemd.yml + when: inventory_hostname in groups.get(osd_group_name, []) + + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: + name: ceph-rbd-mirror + tasks_from: systemd.yml + when: inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Import ceph-rgw role + ansible.builtin.import_role: + name: ceph-rgw + tasks_from: systemd.yml + when: inventory_hostname in groups.get(rgw_group_name, []) + + - name: Import ceph-crash role + ansible.builtin.import_role: + name: ceph-crash + tasks_from: systemd.yml + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Import ceph-exporter role + ansible.builtin.import_role: + name: ceph-exporter + tasks_from: systemd.yml + when: inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Dashboard configuration + when: dashboard_enabled | bool + block: + - name: Import ceph-node-exporter role + ansible.builtin.import_role: + name: ceph-node-exporter + tasks_from: systemd.yml + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: grafana.yml + when: inventory_hostname in groups.get(monitoring_group_name, []) + + - name: Import ceph-grafana role + ansible.builtin.import_role: + name: ceph-grafana + tasks_from: systemd.yml + when: inventory_hostname in groups.get(monitoring_group_name, []) + + - name: Import ceph-prometheus role + ansible.builtin.import_role: + name: ceph-prometheus + tasks_from: systemd.yml + when: inventory_hostname in groups.get(monitoring_group_name, []) + + - name: Reload systemd daemon + ansible.builtin.systemd: + daemon_reload: true diff --git a/infrastructure-playbooks/gather-ceph-logs.yml b/infrastructure-playbooks/gather-ceph-logs.yml new file mode 100644 index 0000000..ede64e5 --- /dev/null +++ b/infrastructure-playbooks/gather-ceph-logs.yml @@ -0,0 +1,39 @@ +--- +- name: Gather ceph logs + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + + gather_facts: false + become: true + + tasks: + - name: Create a temp directory + ansible.builtin.tempfile: + state: directory + prefix: ceph_ansible + run_once: true + register: localtempfile + become: false + delegate_to: localhost + + - name: Set_fact lookup_ceph_config - lookup keys, conf and logs + ansible.builtin.find: + paths: + - /etc/ceph + - /var/log/ceph + register: ceph_collect + + - name: Collect ceph logs, config and keys on the machine running ansible + ansible.builtin.fetch: + src: "{{ item.path }}" + dest: "{{ localtempfile.path }}" + fail_on_missing: false + flat: false + with_items: "{{ ceph_collect.files }}" diff --git a/infrastructure-playbooks/lv-create.yml b/infrastructure-playbooks/lv-create.yml new file mode 100644 index 0000000..f504bc4 --- /dev/null +++ b/infrastructure-playbooks/lv-create.yml @@ -0,0 +1,100 @@ +--- +- name: Creates logical volumes for the bucket index or fs journals on a single device. + become: true + hosts: osds + + vars: + logfile: | + Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml" + ----------------------------------------------------------------------------------------------------------- + {% for lv in nvme_device_lvs %} + - data: {{ lv.lv_name }} + data_vg: {{ nvme_vg_name }} + journal: {{ lv.journal_name }} + journal_vg: {{ nvme_vg_name }} + {% endfor %} + {% for hdd in hdd_devices %} + - data: {{ hdd_lv_prefix }}-{{ hdd.split('/')[-1] }} + data_vg: {{ hdd_vg_prefix }}-{{ hdd.split('/')[-1] }} + journal: {{ hdd_journal_prefix }}-{{ hdd.split('/')[-1] }} + journal_vg: {{ nvme_vg_name }} + {% endfor %} + + tasks: + + - name: Include vars of lv_vars.yaml + ansible.builtin.include_vars: + file: lv_vars.yaml # noqa missing-import + failed_when: false + + # ensure nvme_device is set + - name: Fail if nvme_device is not defined + ansible.builtin.fail: + msg: "nvme_device has not been set by the user" + when: nvme_device is undefined or nvme_device == 'dummy' + + # need to check if lvm2 is installed + - name: Install lvm2 + ansible.builtin.package: + name: lvm2 + state: present + register: result + until: result is succeeded + + # Make entire nvme device a VG + - name: Add nvme device as lvm pv + community.general.lvg: + force: true + pvs: "{{ nvme_device }}" + pesize: 4 + state: present + vg: "{{ nvme_vg_name }}" + + - name: Create lvs for fs journals for the bucket index on the nvme device + community.general.lvol: + lv: "{{ item.journal_name }}" + vg: "{{ nvme_vg_name }}" + size: "{{ journal_size }}" + pvs: "{{ nvme_device }}" + with_items: "{{ nvme_device_lvs }}" + + - name: Create lvs for fs journals for hdd devices + community.general.lvol: + lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ nvme_vg_name }}" + size: "{{ journal_size }}" + with_items: "{{ hdd_devices }}" + + - name: Create the lv for data portion of the bucket index on the nvme device + community.general.lvol: + lv: "{{ item.lv_name }}" + vg: "{{ nvme_vg_name }}" + size: "{{ item.size }}" + pvs: "{{ nvme_device }}" + with_items: "{{ nvme_device_lvs }}" + + # Make sure all hdd devices have a unique volume group + - name: Create vgs for all hdd devices + community.general.lvg: + force: true + pvs: "{{ item }}" + pesize: 4 + state: present + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + with_items: "{{ hdd_devices }}" + + - name: Create lvs for the data portion on hdd devices + community.general.lvol: + lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + size: "{{ hdd_lv_size }}" + pvs: "{{ item }}" + with_items: "{{ hdd_devices }}" + + - name: Write output for osds.yml + become: false + ansible.builtin.copy: + content: "{{ logfile }}" + dest: "{{ logfile_path }}" + mode: preserve + delegate_to: localhost diff --git a/infrastructure-playbooks/lv-teardown.yml b/infrastructure-playbooks/lv-teardown.yml new file mode 100644 index 0000000..b344f4f --- /dev/null +++ b/infrastructure-playbooks/lv-teardown.yml @@ -0,0 +1,109 @@ +--- +- name: Tear down existing osd filesystems then logical volumes, volume groups, and physical volumes + become: true + hosts: osds + + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to tear down the logical volumes? + default: 'no' + private: false + + tasks: + - name: Exit playbook, if user did not mean to tear down logical volumes + ansible.builtin.fail: + msg: > + "Exiting lv-teardown playbook, logical volumes were NOT torn down. + To tear down the logical volumes, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Include vars of lv_vars.yaml + ansible.builtin.include_vars: + file: lv_vars.yaml # noqa missing-import + failed_when: false + + # need to check if lvm2 is installed + - name: Install lvm2 + ansible.builtin.package: + name: lvm2 + state: present + register: result + until: result is succeeded + +# BEGIN TEARDOWN + - name: Find any existing osd filesystems + ansible.builtin.shell: | + set -o pipefail; + grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}' + register: old_osd_filesystems + changed_when: false + + - name: Tear down any existing osd filesystem + ansible.posix.mount: + path: "{{ item }}" + state: unmounted + with_items: "{{ old_osd_filesystems.stdout_lines }}" + + - name: Kill all lvm commands that may have been hung + ansible.builtin.command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n" + failed_when: false + changed_when: false + + ## Logcal Vols + - name: Tear down existing lv for bucket index + community.general.lvol: + lv: "{{ item.lv_name }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ nvme_device_lvs }}" + + - name: Tear down any existing hdd data lvs + community.general.lvol: + lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" + + - name: Tear down any existing lv of journal for bucket index + community.general.lvol: + lv: "{{ item.journal_name }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ nvme_device_lvs }}" + + - name: Tear down any existing lvs of hdd journals + community.general.lvol: + lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" + + ## Volume Groups + - name: Remove vg on nvme device + community.general.lvg: + vg: "{{ nvme_vg_name }}" + state: absent + force: true + + - name: Remove vg for each hdd device + community.general.lvg: + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" + + ## Physical Vols + - name: Tear down pv for nvme device + ansible.builtin.command: "pvremove --force --yes {{ nvme_device }}" + changed_when: false + + - name: Tear down pv for each hdd device + ansible.builtin.command: "pvremove --force --yes {{ item }}" + changed_when: false + with_items: "{{ hdd_devices }}" diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml new file mode 100644 index 0000000..d1a4114 --- /dev/null +++ b/infrastructure-playbooks/purge-cluster.yml @@ -0,0 +1,1175 @@ +--- +# This playbook purges Ceph +# It removes: packages, configuration files and ALL THE DATA +# +# Use it like this: +# ansible-playbook purge-cluster.yml +# Prompts for confirmation to purge, defaults to no and +# doesn't purge the cluster. yes purges the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- name: Confirm whether user really meant to purge the cluster + hosts: localhost + gather_facts: false + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to purge the cluster? + default: 'no' + private: false + tasks: + - name: Exit playbook, if user did not mean to purge cluster + ansible.builtin.fail: + msg: > + "Exiting purge-cluster playbook, cluster was NOT purged. + To purge the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + +- name: Gather facts on all hosts + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - nfss + - clients + - mgrs + - monitoring + become: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + + +- name: Check there's no ceph kernel threads present + hosts: clients + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Nfs related tasks + when: groups[nfs_group_name] | default([]) | length > 0 + block: + - name: Get nfs nodes ansible facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups[nfs_group_name] }}" + run_once: true + + - name: Get all nfs-ganesha mount points + ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts + register: nfs_ganesha_mount_points + failed_when: false + changed_when: false + with_items: "{{ groups[nfs_group_name] }}" + + - name: Ensure nfs-ganesha mountpoint(s) are unmounted + ansible.posix.mount: + path: "{{ item.split(' ')[1] }}" + state: unmounted + with_items: + - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}" + when: item | length > 0 + + - name: Ensure cephfs mountpoint(s) are unmounted + ansible.builtin.command: umount -a -t ceph + changed_when: false + + - name: Find mapped rbd ids + ansible.builtin.find: + paths: /sys/bus/rbd/devices + file_type: any + register: rbd_mapped_ids + + - name: Use sysfs to unmap rbd devices + ansible.builtin.shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major" + changed_when: false + with_items: "{{ rbd_mapped_ids.files }}" + + - name: Unload ceph kernel modules + community.general.modprobe: + name: "{{ item }}" + state: absent + with_items: + - rbd + - ceph + - libceph + + +- name: Purge ceph nfs cluster + hosts: nfss + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop ceph nfss with systemd + ansible.builtin.service: + name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}" + state: stopped + failed_when: false + + - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}" + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /etc/ganesha + - /var/lib/nfs/ganesha + - /var/run/ganesha + - /etc/systemd/system/ceph-nfs@.service + + +- name: Purge node-exporter + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - nfss + - clients + - mgrs + - monitoring + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Dashboard related tasks + when: dashboard_enabled | bool + block: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Disable node_exporter service + ansible.builtin.service: + name: node_exporter + state: stopped + enabled: false + failed_when: false + + - name: Remove node_exporter service file + ansible.builtin.file: + name: /etc/systemd/system/node_exporter.service + state: absent + + - name: Remove node-exporter image + ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" + failed_when: false + changed_when: false + tags: + - remove_img + + +- name: Purge ceph monitoring + hosts: monitoring + become: true + vars: + grafana_services: + - grafana-server + - prometheus + - alertmanager + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Dashboard related tasks + when: dashboard_enabled | bool + block: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Stop services + ansible.builtin.service: + name: "{{ item }}" + state: stopped + enabled: false + with_items: "{{ grafana_services }}" + failed_when: false + + - name: Remove service files + ansible.builtin.file: + name: "/etc/systemd/system/{{ item }}.service" + state: absent + with_items: "{{ grafana_services }}" + failed_when: false + + - name: Remove ceph dashboard container images + ansible.builtin.command: "{{ container_binary }} rmi {{ item }}" + with_items: + - "{{ prometheus_container_image }}" + - "{{ grafana_container_image }}" + - "{{ alertmanager_container_image }}" + failed_when: false + changed_when: false + tags: + - remove_img + + - name: Remove data + ansible.builtin.file: + name: "{{ item }}" + state: absent + with_items: + - /etc/grafana/dashboards + - /etc/grafana/grafana.ini + - /etc/grafana/provisioning + - /var/lib/grafana + - /etc/alertmanager + - /var/lib/alertmanager + - /var/lib/prometheus + - /etc/prometheus + failed_when: false + + +- name: Purge ceph mds cluster + hosts: mdss + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Stop ceph mdss with systemd + ansible.builtin.service: + name: ceph-mds@{{ ansible_facts['hostname'] }} + state: stopped + enabled: false + failed_when: false + + - name: Remove ceph mds service + ansible.builtin.file: + path: /etc/systemd/system/ceph-mds{{ item }} + state: absent + loop: + - '@.service' + - '.target' + + +- name: Purge ceph mgr cluster + hosts: mgrs + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Stop ceph mgrs with systemd + ansible.builtin.service: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + state: stopped + enabled: false + failed_when: false + when: ansible_facts['service_mgr'] == 'systemd' + + - name: Remove ceph mgr service + ansible.builtin.file: + path: /etc/systemd/system/ceph-mgr{{ item }} + state: absent + loop: + - '@.service' + - '.target' + +- name: Purge rgwloadbalancer cluster + hosts: rgwloadbalancers + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Stop rgwloadbalancer services + ansible.builtin.service: + name: ['keepalived', 'haproxy'] + state: stopped + enabled: false + failed_when: false + + +- name: Purge ceph rgw cluster + hosts: rgws + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_radosgw_address + + - name: Stop ceph rgws with systemd + ansible.builtin.service: + name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: stopped + enabled: false + failed_when: false + with_items: "{{ rgw_instances }}" + + - name: Remove ceph rgw service + ansible.builtin.file: + path: /etc/systemd/system/ceph-radosgw{{ item }} + state: absent + loop: + - '@.service' + - '.target' + + +- name: Purge ceph rbd-mirror cluster + hosts: rbdmirrors + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Stop ceph rbd mirror with systemd + ansible.builtin.service: + name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Remove ceph rbd-mirror service + ansible.builtin.file: + path: /etc/systemd/system/ceph-rbd-mirror{{ item }} + state: absent + loop: + - '@.service' + - '.target' + + +- name: Purge ceph osd cluster + vars: + reboot_osd_node: false + hosts: osds + gather_facts: false # Already gathered previously + become: true + handlers: + - name: Restart machine # noqa: ignore-errors + ansible.builtin.shell: sleep 2 && shutdown -r now "Ansible updates triggered" + async: 1 + poll: 0 + ignore_errors: true + changed_when: false + + - name: Wait for server to boot + become: false + ansible.builtin.wait_for: + port: 22 + host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}" + state: started + delay: 10 + timeout: 500 + delegate_to: localhost + + - name: Remove data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form + changed_when: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Default lvm_volumes if not defined + ansible.builtin.set_fact: + lvm_volumes: [] + when: lvm_volumes is not defined + + - name: Get osd numbers + ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa risky-shell-pipe + register: osd_ids + changed_when: false + + - name: Stop ceph-osd + ansible.builtin.service: + name: ceph-osd@{{ item }} + state: stopped + enabled: false + with_items: "{{ osd_ids.stdout_lines }}" + + - name: Remove ceph udev rules + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/udev/rules.d/95-ceph-osd.rules + - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules + when: not containerized_deployment | bool + + # NOTE(leseb): hope someone will find a more elegant way one day... + - name: See if encrypted partitions are present + ansible.builtin.shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa risky-shell-pipe + register: encrypted_ceph_partuuid + changed_when: false + + - name: Get osd data and lockbox mount points + ansible.builtin.shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa risky-shell-pipe + register: mounted_osd + changed_when: false + + - name: Drop all cache + ansible.builtin.shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches" + changed_when: false + + - name: See if ceph-volume is installed # noqa command-instead-of-shell + ansible.builtin.shell: command -v ceph-volume + changed_when: false + failed_when: false + register: ceph_volume_present + when: not containerized_deployment | bool + + - name: Zap and destroy osds by osd ids + ceph_volume: + osd_id: "{{ item | int }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ osd_ids.stdout_lines }}" + when: + - osd_auto_discovery | default(False) | bool + - (containerized_deployment | bool or ceph_volume_present.rc == 0) + + - name: Umount osd data partition + ansible.posix.mount: + path: "{{ item }}" + state: unmounted + with_items: "{{ mounted_osd.stdout_lines }}" + + - name: Remove osd mountpoint tree + ansible.builtin.file: + path: /var/lib/ceph/osd/ + state: absent + register: remove_osd_mountpoints + ignore_errors: true + + - name: Is reboot needed + ansible.builtin.command: echo requesting reboot + delegate_to: localhost + become: false + notify: + - Restart machine + - Wait for server to boot + - Remove data + changed_when: false + when: + - reboot_osd_node | bool + - remove_osd_mountpoints.failed is defined + + - name: Wipe table on dm-crypt devices + ansible.builtin.command: dmsetup wipe_table --force "{{ item }}" + with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false + when: encrypted_ceph_partuuid.stdout_lines | length > 0 + + - name: Delete dm-crypt devices if any + ansible.builtin.command: dmsetup remove --retry --force {{ item }} + with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false + when: encrypted_ceph_partuuid.stdout_lines | length > 0 + + - name: Get payload_offset + ansible.builtin.shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa risky-shell-pipe + register: payload_offset + with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false + when: encrypted_ceph_partuuid.stdout_lines | length > 0 + + - name: Get physical sector size + ansible.builtin.command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }} + changed_when: false + with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + when: encrypted_ceph_partuuid.stdout_lines | length > 0 + register: phys_sector_size + + - name: Wipe dmcrypt device + ansible.builtin.command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct + changed_when: false + with_together: + - "{{ encrypted_ceph_partuuid.stdout_lines }}" + - "{{ payload_offset.results }}" + - "{{ phys_sector_size.results }}" + + - name: Get ceph data partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph data" + changed_when: false + failed_when: false + register: ceph_data_partition_to_erase_path + + - name: Get ceph lockbox partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph lockbox" + changed_when: false + failed_when: false + register: ceph_lockbox_partition_to_erase_path + + - name: See if ceph-volume is installed # noqa: command-instead-of-shell + ansible.builtin.shell: command -v ceph-volume + changed_when: false + failed_when: false + register: ceph_volume_present + when: not containerized_deployment | bool + + - name: Zap and destroy osds created by ceph-volume with lvm_volumes + ceph_volume: + data: "{{ item.data }}" + data_vg: "{{ item.data_vg | default(omit) }}" + journal: "{{ item.journal | default(omit) }}" + journal_vg: "{{ item.journal_vg | default(omit) }}" + db: "{{ item.db | default(omit) }}" + db_vg: "{{ item.db_vg | default(omit) }}" + wal: "{{ item.wal | default(omit) }}" + wal_vg: "{{ item.wal_vg | default(omit) }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ lvm_volumes | default([]) }}" + when: + - containerized_deployment | bool + or ceph_volume_present.rc == 0 + + - name: Zap and destroy osds created by ceph-volume with devices + ceph_volume: + data: "{{ item }}" + action: "zap" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - "{{ devices | default([]) }}" + - "{{ dedicated_devices | default([]) }}" + - "{{ bluestore_wal_devices | default([]) }}" + when: + - containerized_deployment | bool + or ceph_volume_present.rc == 0 + + - name: Get ceph block partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph block" + changed_when: false + failed_when: false + register: ceph_block_partition_to_erase_path + + - name: Get ceph journal partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph journal" + changed_when: false + failed_when: false + register: ceph_journal_partition_to_erase_path + + - name: Get ceph db partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph block.db" + changed_when: false + failed_when: false + register: ceph_db_partition_to_erase_path + + - name: Get ceph wal partitions + ansible.builtin.shell: | + blkid -o device -t PARTLABEL="ceph block.wal" + changed_when: false + failed_when: false + register: ceph_wal_partition_to_erase_path + + - name: Set_fact combined_devices_list + ansible.builtin.set_fact: + combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines + + ceph_lockbox_partition_to_erase_path.stdout_lines + + ceph_block_partition_to_erase_path.stdout_lines + + ceph_journal_partition_to_erase_path.stdout_lines + + ceph_db_partition_to_erase_path.stdout_lines + + ceph_wal_partition_to_erase_path.stdout_lines }}" + + - name: Resolve parent device + ansible.builtin.command: lsblk --nodeps -no pkname "{{ item }}" + register: tmp_resolved_parent_device + changed_when: false + with_items: "{{ combined_devices_list }}" + + - name: Set_fact resolved_parent_device + ansible.builtin.set_fact: + resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}" + + - name: Wipe partitions + ansible.builtin.shell: | + wipefs --all "{{ item }}" + dd if=/dev/zero of="{{ item }}" bs=1 count=4096 + changed_when: false + with_items: "{{ combined_devices_list }}" + + - name: Check parent device partition + community.general.parted: + device: "/dev/{{ item }}" + loop: "{{ resolved_parent_device }}" + register: parted_info + + - name: Fail if there is a boot partition on the device + ansible.builtin.fail: + msg: "{{ item.item }} has a boot partition" + loop: "{{ parted_info.results }}" + when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)" + + - name: Zap ceph journal/block db/block wal partitions # noqa risky-shell-pipe + ansible.builtin.shell: | + sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}" + dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200 + parted -s /dev/"{{ item }}" mklabel gpt + partprobe /dev/"{{ item }}" + udevadm settle --timeout=600 + with_items: "{{ resolved_parent_device }}" + changed_when: false + + - name: Remove ceph osd service + ansible.builtin.file: + path: /etc/systemd/system/ceph-osd{{ item }} + state: absent + loop: + - '@.service' + - '.target' + +- name: Purge ceph mon cluster + hosts: mons + gather_facts: false # already gathered previously + become: true + tasks: + - name: Stop ceph mons with systemd + ansible.builtin.service: + name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + with_items: + - mon + - mgr + + - name: Remove monitor store and bootstrap keys + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /var/lib/ceph/mon + - /var/lib/ceph/bootstrap-mds + - /var/lib/ceph/bootstrap-osd + - /var/lib/ceph/bootstrap-rgw + - /var/lib/ceph/bootstrap-rbd + - /var/lib/ceph/bootstrap-mgr + - /var/lib/ceph/tmp + + - name: Remove ceph mon and mgr service + ansible.builtin.file: + path: "/etc/systemd/system/ceph-{{ item.0 }}{{ item.1 }}" + state: absent + loop: "{{ ['mon', 'mgr'] | product(['@.service', '.target']) | list }}" + + +- name: Purge ceph-crash daemons + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop ceph-crash service + ansible.builtin.service: + name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + state: stopped + enabled: false + failed_when: false + + - name: Systemctl reset-failed ceph-crash # noqa command-instead-of-module + ansible.builtin.command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + changed_when: false + failed_when: false + + - name: Remove service file + ansible.builtin.file: + name: "/etc/systemd/system/ceph-crash{{ '@' if containerized_deployment | bool else '' }}.service" + state: absent + failed_when: false + + - name: Remove /var/lib/ceph/crash + ansible.builtin.file: + path: /var/lib/ceph/crash + state: absent + + +- name: Purge ceph-exporter daemons + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Stop ceph-exporter service + ansible.builtin.service: + name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + state: stopped + enabled: false + failed_when: false + + - name: Systemctl reset-failed ceph-exporter # noqa command-instead-of-module + ansible.builtin.command: "systemctl reset-failed {{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + changed_when: false + failed_when: false + + - name: Remove service file + ansible.builtin.file: + name: "/etc/systemd/system/ceph-exporter{{ '@' if containerized_deployment | bool else '' }}.service" + state: absent + failed_when: false + + +- name: Check container hosts + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - nfss + - mgrs + become: true + tasks: + - name: Containerized_deployment only + when: containerized_deployment | bool + block: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Remove stopped/exited containers + ansible.builtin.command: > + {{ container_binary }} container prune -f + changed_when: false + + - name: Show container list on all the nodes (should be empty) + ansible.builtin.command: > + {{ container_binary }} ps --filter='name=ceph' -a -q + register: containers_list + changed_when: false + + - name: Show container images on all the nodes (should be empty if tags was passed remove_img) + ansible.builtin.command: > + {{ container_binary }} images + register: images_list + changed_when: false + + - name: Fail if container are still present + ansible.builtin.fail: + msg: "It looks like container are still present." + when: containers_list.stdout_lines | length > 0 + + +- name: Final cleanup - check any running ceph, purge ceph packages, purge config and remove data + vars: + # When set to true both groups of packages are purged. + # This can cause problem with qemu-kvm + purge_all_packages: true + ceph_packages: + - ceph + - ceph-base + - ceph-common + - ceph-fuse + - ceph-mds + - ceph-mgr + - ceph-mgr-modules-core + - ceph-mon + - ceph-osd + - ceph-release + - ceph-radosgw + - ceph-grafana-dashboards + - rbd-mirror + ceph_remaining_packages: + - libcephfs2 + - librados2 + - libradosstriper1 + - librbd1 + - librgw2 + - python3-ceph-argparse + - python3-ceph-common + - python3-cephfs + - python3-rados + - python3-rbd + - python3-rgw + extra_packages: + - keepalived + - haproxy + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - nfss + - clients + - mgrs + - monitoring + gather_facts: false # Already gathered previously + become: true + handlers: + - name: Get osd data and lockbox mount points + ansible.builtin.shell: "set -o pipefail && (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" + register: mounted_osd + changed_when: false + listen: "Remove data" + + - name: Umount osd data partition + ansible.posix.mount: + path: "{{ item }}" + state: unmounted + with_items: "{{ mounted_osd.stdout_lines }}" + listen: "Remove data" + + - name: Remove data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form + changed_when: false + listen: "Remove data" + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Non containerized related tasks + when: not containerized_deployment | bool + block: + - name: Purge ceph packages with yum + ansible.builtin.yum: + name: "{{ ceph_packages }}" + state: absent + when: ansible_facts['pkg_mgr'] == 'yum' + + - name: Purge ceph packages with dnf + ansible.builtin.dnf: + name: "{{ ceph_packages }}" + state: absent + when: ansible_facts['pkg_mgr'] == 'dnf' + + - name: Purge ceph packages with apt + ansible.builtin.apt: + name: "{{ ceph_packages }}" + state: absent + purge: true + when: ansible_facts['pkg_mgr'] == 'apt' + + - name: Purge remaining ceph packages with yum + ansible.builtin.yum: + name: "{{ ceph_remaining_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'yum' + - purge_all_packages | bool + + - name: Purge remaining ceph packages with dnf + ansible.builtin.dnf: + name: "{{ ceph_remaining_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'dnf' + - purge_all_packages | bool + + - name: Purge remaining ceph packages with apt + ansible.builtin.apt: + name: "{{ ceph_remaining_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'apt' + - purge_all_packages | bool + + - name: Purge extra packages with yum + ansible.builtin.yum: + name: "{{ extra_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'yum' + - purge_all_packages | bool + + - name: Purge extra packages with dnf + ansible.builtin.dnf: + name: "{{ extra_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'dnf' + - purge_all_packages | bool + + - name: Purge extra packages with apt + ansible.builtin.apt: + name: "{{ extra_packages }}" + state: absent + when: + - ansible_facts['pkg_mgr'] == 'apt' + - purge_all_packages | bool + + - name: Remove config and any ceph socket left + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /etc/ceph + - /etc/keepalived + - /etc/haproxy + - /run/ceph + + - name: Remove logs + ansible.builtin.file: + path: /var/log/ceph + state: absent + + - name: Request data removal + ansible.builtin.command: echo requesting data removal # noqa no-changed-when + become: false + delegate_to: localhost + notify: Remove data + + - name: Purge dnf cache + ansible.builtin.command: dnf clean all + changed_when: false + when: ansible_facts['pkg_mgr'] == 'dnf' + + - name: Clean apt + ansible.builtin.command: apt-get clean # noqa command-instead-of-module + changed_when: false + when: ansible_facts['pkg_mgr'] == 'apt' + + - name: Purge ceph repo file in /etc/yum.repos.d + ansible.builtin.file: + path: '/etc/yum.repos.d/{{ item }}.repo' + state: absent + with_items: + - ceph-dev + - ceph_stable + when: ansible_facts['os_family'] == 'RedHat' + + - name: Check for anything running ceph + ansible.builtin.command: "ps -u ceph -U ceph" + register: check_for_running_ceph + changed_when: false + failed_when: check_for_running_ceph.rc == 0 + + - name: Find ceph systemd unit files to remove + ansible.builtin.find: + paths: "/etc/systemd/system" + pattern: "ceph*" + recurse: true + file_type: any + register: systemd_files + + - name: Remove ceph systemd unit files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + with_items: "{{ systemd_files.files }}" + when: ansible_facts['service_mgr'] == 'systemd' + + - name: Containerized related tasks + when: containerized_deployment | bool + block: + - name: Check if it is Atomic host + ansible.builtin.stat: + path: /run/ostree-booted + register: stat_ostree + + - name: Set fact for using Atomic host + ansible.builtin.set_fact: + is_atomic: "{{ stat_ostree.stat.exists }}" + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Remove ceph container image + ansible.builtin.command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + when: + - inventory_hostname not in groups.get(client_group_name, []) + or inventory_hostname == groups.get(client_group_name, []) | first + tags: + - remove_img + + - name: Stop docker service # noqa: ignore-errors + ansible.builtin.service: + name: docker + state: stopped + enabled: false + when: + - not is_atomic + - container_binary == 'docker' + ignore_errors: true + tags: + - remove_docker + + - name: Remove docker on debian/ubuntu + ansible.builtin.apt: + name: ['docker-ce', 'docker-engine', 'docker.io', 'python-docker', 'python3-docker'] + state: absent + update_cache: true + autoremove: true + when: ansible_facts['os_family'] == 'Debian' + tags: + - remove_docker + + - name: Red hat based systems tasks + when: + ansible_facts['os_family'] == 'RedHat' and + not is_atomic + tags: + - remove_docker + block: + - name: Yum related tasks on red hat + when: ansible_facts['pkg_mgr'] == "yum" + block: + - name: Remove packages on redhat + ansible.builtin.yum: + name: ['epel-release', 'docker', 'python-docker-py'] + state: absent + + - name: Remove package dependencies on redhat + ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module + changed_when: false + + - name: Remove package dependencies on redhat again + ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module + changed_when: false + + - name: Dnf related tasks on red hat + when: ansible_facts['pkg_mgr'] == "dnf" + block: + - name: Remove docker on redhat + ansible.builtin.dnf: + name: ['docker', 'python3-docker'] + state: absent + + - name: Remove package dependencies on redhat + ansible.builtin.command: dnf -y autoremove + changed_when: false + + - name: Remove package dependencies on redhat again + ansible.builtin.command: dnf -y autoremove + changed_when: false + + - name: Find any service-cid file left + ansible.builtin.find: + paths: /run + patterns: + - "ceph-*.service-cid" + - "node_exporter.service-cid" + - "prometheus.service-cid" + - "grafana-server.service-cid" + - "alertmanager.service-cid" + register: service_cid_files + + - name: Rm any service-cid file + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + with_items: "{{ service_cid_files.files }}" + + +- name: Purge ceph directories + hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - nfss + - mgrs + - clients + gather_facts: false # Already gathered previously + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Purge ceph directories - containerized deployments + when: containerized_deployment | bool + block: + - name: Purge ceph directories and ceph socket + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /etc/ceph + - /var/log/ceph + - /run/ceph + - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh" + + - name: Remove ceph data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa: no-free-form + changed_when: false + + - name: Remove /var/lib/ceph + ansible.builtin.file: + path: /var/lib/ceph + state: absent + + # (todo): remove this when we are able to manage docker + # service on atomic host. + - name: Remove docker data + ansible.builtin.shell: rm -rf /var/lib/docker/* # noqa: no-free-form + changed_when: false + when: not is_atomic | bool + tags: + - remove_docker + + +- name: Purge fetch directory + hosts: localhost + gather_facts: false + tasks: + - name: Set fetch_directory value if not set + ansible.builtin.set_fact: + fetch_directory: "fetch/" + when: fetch_directory is not defined + + - name: Purge fetch directory for localhost + ansible.builtin.file: + path: "{{ fetch_directory | default('fetch/') }}" + state: absent diff --git a/infrastructure-playbooks/purge-container-cluster.yml b/infrastructure-playbooks/purge-container-cluster.yml new file mode 120000 index 0000000..b588c7b --- /dev/null +++ b/infrastructure-playbooks/purge-container-cluster.yml @@ -0,0 +1 @@ +purge-cluster.yml \ No newline at end of file diff --git a/infrastructure-playbooks/purge-dashboard.yml b/infrastructure-playbooks/purge-dashboard.yml new file mode 100644 index 0000000..7c9ae39 --- /dev/null +++ b/infrastructure-playbooks/purge-dashboard.yml @@ -0,0 +1,222 @@ +--- +# This playbook purges the Ceph MGR Dashboard and Monitoring +# (alertmanager/prometheus/grafana/node-exporter) stack. +# It removes: packages, configuration files and ALL THE DATA +# +# Use it like this: +# ansible-playbook purge-dashboard.yml +# Prompts for confirmation to purge, defaults to no and +# doesn't purge anything. yes purges the dashboard and +# monitoring stack. +# +# ansible-playbook -e ireallymeanit=yes|no purge-dashboard.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- name: Confirm whether user really meant to purge the dashboard + hosts: localhost + gather_facts: false + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to purge the dashboard? + default: 'no' + private: false + tasks: + - name: Exit playbook, if user did not mean to purge dashboard + ansible.builtin.fail: + msg: > + "Exiting purge-dashboard playbook, dashboard was NOT purged. + To purge the dashboard, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Import_role ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + +- name: Gather facts on all hosts + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ client_group_name|default('clients') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ monitoring_group_name | default('monitoring') }}" + become: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + +- name: Purge node exporter + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ client_group_name|default('clients') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ monitoring_group_name | default('monitoring') }}" + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Disable node_exporter service + ansible.builtin.service: + name: node_exporter + state: stopped + enabled: false + failed_when: false + + - name: Remove node_exporter service files + ansible.builtin.file: + name: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/node_exporter.service + - /run/node_exporter.service-cid + + - name: Remove node-exporter image + ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" + changed_when: false + failed_when: false + +- name: Purge ceph monitoring + hosts: "{{ monitoring_group_name | default('monitoring') }}" + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Stop services + ansible.builtin.service: + name: "{{ item }}" + state: stopped + enabled: false + failed_when: false + loop: + - alertmanager + - prometheus + - grafana-server + + - name: Remove systemd service files + ansible.builtin.file: + name: "{{ item }}" + state: absent + loop: + - /etc/systemd/system/alertmanager.service + - /etc/systemd/system/prometheus.service + - /etc/systemd/system/grafana-server.service + - /run/alertmanager.service-cid + - /run/prometheus.service-cid + - /run/grafana-server.service-cid + + - name: Remove ceph dashboard container images + ansible.builtin.command: "{{ container_binary }} rmi {{ item }}" + loop: + - "{{ alertmanager_container_image }}" + - "{{ prometheus_container_image }}" + - "{{ grafana_container_image }}" + changed_when: false + failed_when: false + + - name: Remove ceph-grafana-dashboards package on RedHat or SUSE + ansible.builtin.package: + name: ceph-grafana-dashboards + state: absent + when: + - not containerized_deployment | bool + - ansible_facts['os_family'] in ['RedHat', 'Suse'] + + - name: Remove data + ansible.builtin.file: + name: "{{ item }}" + state: absent + loop: + - "{{ alertmanager_conf_dir }}" + - "{{ prometheus_conf_dir }}" + - /etc/grafana + - "{{ alertmanager_data_dir }}" + - "{{ prometheus_data_dir }}" + - /var/lib/grafana + +- name: Purge ceph dashboard + hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}" + gather_facts: false + become: true + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Remove the dashboard admin user + ceph_dashboard_user: + name: "{{ dashboard_admin_user }}" + cluster: "{{ cluster }}" + state: absent + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Remove radosgw system user + radosgw_user: + name: "{{ dashboard_rgw_api_user_id }}" + cluster: "{{ cluster }}" + state: absent + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + when: groups.get(rgw_group_name, []) | length > 0 + + - name: Disable mgr dashboard and prometheus modules + ceph_mgr_module: + name: "{{ item }}" + cluster: "{{ cluster }}" + state: disable + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + loop: + - dashboard + - prometheus + + - name: Remove TLS certificate and key files + ansible.builtin.file: + name: "/etc/ceph/ceph-dashboard.{{ item }}" + state: absent + loop: + - crt + - key + when: dashboard_protocol == "https" + + - name: Remove ceph-mgr-dashboard package + ansible.builtin.package: + name: ceph-mgr-dashboard + state: absent + when: not containerized_deployment | bool diff --git a/infrastructure-playbooks/rgw-add-users-buckets.yml b/infrastructure-playbooks/rgw-add-users-buckets.yml new file mode 100644 index 0000000..da2927a --- /dev/null +++ b/infrastructure-playbooks/rgw-add-users-buckets.yml @@ -0,0 +1,65 @@ +# This example playbook is used to add rgw users and buckets +# +# This example is run on your local machine +# +# Ensure that your local machine can connect to rgw of your cluster +# +# You will need to update the following vars +# +# rgw_host +# port +# admin_access_key +# admin_secret_key +# +# Additionally modify the users list and buckets list to create the +# users and buckets you want +# +- name: Add rgw users and buckets + connection: local + hosts: localhost + gather_facts: false + tasks: + - name: Add rgw users and buckets + ceph_add_users_buckets: + rgw_host: '172.20.0.2' + port: 8000 + admin_access_key: '8W56BITCSX27CD555Z5B' + admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan1@email.com' + maxbucket: 666 + suspend: false + autogenkey: false + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + - username: 'test2' + fullname: 'tester' + buckets: + - bucket: 'bucket1' + user: 'test2' + - bucket: 'bucket2' + user: 'test1' + - bucket: 'bucket3' + user: 'test1' + - bucket: 'bucket4' + user: 'test1' + - bucket: 'bucket5' + user: 'test1' + - bucket: 'bucket6' + user: 'test2' + - bucket: 'bucket7' + user: 'test2' + - bucket: 'bucket8' + user: 'test2' + - bucket: 'bucket9' + user: 'test2' + - bucket: 'bucket10' + user: 'test2' diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml new file mode 100644 index 0000000..839609a --- /dev/null +++ b/infrastructure-playbooks/rolling_update.yml @@ -0,0 +1,1359 @@ +--- +# This playbook does a rolling update for all the Ceph services +# +# The value of 'serial:' adjusts the number of servers to be updated simultaneously. +# We recommend a value of 1, which means hosts of a group (e.g: monitor) will be +# upgraded one by one. It is really crucial for the update process to happen +# in a serialized fashion. DO NOT CHANGE THIS VALUE. +# +# +# If you run a Ceph community version, you have to change the variable: ceph_stable_release to the new release + +- name: Confirm whether user really meant to upgrade the cluster + hosts: localhost + tags: always + become: false + gather_facts: false + vars: + mgr_group_name: mgrs + + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to upgrade the cluster? + default: 'no' + private: false + + tasks: + - name: Exit playbook, if user did not mean to upgrade cluster + ansible.builtin.fail: + msg: > + "Exiting rolling_update.yml playbook, cluster was NOT upgraded. + To upgrade the cluster, either say 'yes' on the prompt or + use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Import_role ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + +- name: Gather facts and check the init system + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ client_group_name|default('clients') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + tags: always + any_errors_fatal: true + become: true + gather_facts: false + vars: + delegate_facts_host: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" + run_once: true + when: delegate_facts_host | bool + + - name: Set_fact rolling_update + ansible.builtin.set_fact: + rolling_update: true + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-infra role + ansible.builtin.import_role: + name: ceph-infra + tags: ceph_infra + + - name: Import ceph-validate role + ansible.builtin.import_role: + name: ceph-validate + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + when: + - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) + - (containerized_deployment | bool) or (dashboard_enabled | bool) + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + tasks_from: registry + when: + - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) + - (containerized_deployment | bool) or (dashboard_enabled | bool) + - ceph_docker_registry_auth | bool + + - name: Check ceph release in container image + when: + - groups.get(mon_group_name, []) | length > 0 + - containerized_deployment | bool + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + block: + - name: Get the ceph release being deployed + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} --version" + register: ceph_version + changed_when: false + + - name: Check ceph release being deployed + ansible.builtin.fail: + msg: "This version of ceph-ansible is intended for upgrading to Ceph Squid only." + when: "'squid' not in ceph_version.stdout.split()" + + +- name: Upgrade ceph mon cluster + tags: mons + vars: + health_mon_check_retries: 5 + health_mon_check_delay: 15 + upgrade_ceph_packages: true + hosts: "{{ mon_group_name|default('mons') }}" + serial: 1 + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Upgrade ceph mon cluster + block: + - name: Remove ceph aliases + ansible.builtin.file: + path: /etc/profile.d/ceph-aliases.sh + state: absent + when: containerized_deployment | bool + + - name: Set mon_host_count + ansible.builtin.set_fact: + mon_host_count: "{{ groups[mon_group_name] | length }}" + + - name: Fail when less than three monitors + ansible.builtin.fail: + msg: "Upgrade of cluster with less than three monitors is not supported." + when: mon_host_count | int < 3 + + - name: Select a running monitor + ansible.builtin.set_fact: + mon_host: "{{ groups[mon_group_name] | difference([inventory_hostname]) | last }}" + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Check Ceph monitors quorum status + when: inventory_hostname == groups[mon_group_name] | first + block: + - name: Get ceph cluster status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json" + register: check_cluster_health + delegate_to: "{{ mon_host }}" + changed_when: false + + - name: Display health status before failing + when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR' + block: + - name: Display ceph health detail + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail" + delegate_to: "{{ mon_host }}" + changed_when: false + + - name: Fail if cluster isn't in an acceptable state + ansible.builtin.fail: + msg: "cluster is not in an acceptable state!" + + - name: Get the ceph quorum status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" + register: check_quorum_status + delegate_to: "{{ mon_host }}" + changed_when: false + + - name: Fail if the cluster quorum isn't in an acceptable state + ansible.builtin.fail: + msg: "cluster quorum is not in an acceptable state!" + when: (check_quorum_status.stdout | from_json).quorum | length != groups[mon_group_name] | length + + - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror is present + ansible.builtin.file: + path: /var/lib/ceph/bootstrap-rbd-mirror + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: '755' + state: directory + delegate_to: "{{ item }}" + with_items: "{{ groups[mon_group_name] }}" + when: + - cephx | bool + - inventory_hostname == groups[mon_group_name][0] + + - name: Create potentially missing keys (rbd and rbd-mirror) + ceph_key: + name: "client.{{ item.0 }}" + dest: "/var/lib/ceph/{{ item.0 }}/" + caps: + mon: "allow profile {{ item.0 }}" + cluster: "{{ cluster }}" + delegate_to: "{{ item.1 }}" + with_nested: + - ['bootstrap-rbd', 'bootstrap-rbd-mirror'] + - "{{ groups[mon_group_name] }}" # so the key goes on all the nodes + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + when: + - cephx | bool + - inventory_hostname == groups[mon_group_name][0] + + # NOTE: we mask the service so the RPM can't restart it + # after the package gets upgraded + - name: Stop ceph mon + ansible.builtin.systemd: + name: ceph-mon@{{ item }} + state: stopped + enabled: false + masked: true + with_items: + - "{{ ansible_facts['hostname'] }}" + - "{{ ansible_facts['fqdn'] }}" + + # only mask the service for mgr because it must be upgraded + # after ALL monitors, even when collocated + - name: Mask the mgr service + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + masked: true + when: inventory_hostname in groups[mgr_group_name] | default([]) + or groups[mgr_group_name] | default([]) | length == 0 + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mon role + ansible.builtin.import_role: + name: ceph-mon + + - name: Start ceph mgr + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + when: inventory_hostname in groups[mgr_group_name] | default([]) + or groups[mgr_group_name] | default([]) | length == 0 + + - name: Import_role ceph-facts + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_monitor_address.yml + delegate_to: "{{ groups[mon_group_name][0] }}" + delegate_facts: true + + - name: Non container | waiting for the monitor to join the quorum... + ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json + register: ceph_health_raw + until: + - ceph_health_raw.rc == 0 + - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or + hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) + retries: "{{ health_mon_check_retries }}" + delay: "{{ health_mon_check_delay }}" + changed_when: false + when: not containerized_deployment | bool + + - name: Container | waiting for the containerized monitor to join the quorum... + ansible.builtin.command: > + {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json + register: ceph_health_raw + until: + - ceph_health_raw.rc == 0 + - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or + hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) + retries: "{{ health_mon_check_retries }}" + delay: "{{ health_mon_check_delay }}" + changed_when: false + when: containerized_deployment | bool + rescue: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Unmask the mon service + ansible.builtin.systemd: + name: ceph-mon@{{ ansible_facts['hostname'] }} + enabled: true + masked: false + + - name: Unmask the mgr service + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + masked: false + when: inventory_hostname in groups[mgr_group_name] | default([]) + or groups[mgr_group_name] | default([]) | length == 0 + + - name: Stop the playbook execution + ansible.builtin.fail: + msg: "There was an error during monitor upgrade. Please, check the previous task results." + +- name: Reset mon_host + hosts: "{{ mon_group_name|default('mons') }}" + tags: always + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Reset mon_host fact + ansible.builtin.set_fact: + mon_host: "{{ groups[mon_group_name][0] }}" + + +- name: Upgrade ceph mgr nodes when implicitly collocated on monitors + vars: + health_mon_check_retries: 5 + health_mon_check_delay: 15 + upgrade_ceph_packages: true + hosts: "{{ mon_group_name|default('mons') }}" + tags: mgrs + serial: 1 + become: true + gather_facts: false + tasks: + - name: Upgrade mgrs when no mgr group explicitly defined in inventory + when: groups.get(mgr_group_name, []) | length == 0 + block: + - name: Stop ceph mgr + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + state: stopped + masked: true + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mgr role + ansible.builtin.import_role: + name: ceph-mgr + +- name: Upgrade ceph mgr nodes + vars: + upgrade_ceph_packages: true + ceph_release: "{{ ceph_stable_release }}" + hosts: "{{ mgr_group_name|default('mgrs') }}" + tags: mgrs + serial: 1 + become: true + gather_facts: false + tasks: + # The following task has a failed_when: false + # to handle the scenario where no mgr existed before the upgrade + # or if we run a Ceph cluster before Luminous + - name: Stop ceph mgr + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + state: stopped + enabled: false + masked: false + failed_when: false + + - name: Mask ceph mgr systemd unit + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + masked: true + failed_when: false + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mgr role + ansible.builtin.import_role: + name: ceph-mgr + + +- name: Set osd flags + hosts: "{{ osd_group_name | default('osds') }}" + tags: osds + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Set osd flags, disable autoscaler and balancer + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + block: + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + register: balancer_status_update + run_once: true + changed_when: false + check_mode: false + + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + changed_when: false + when: (balancer_status_update.stdout | from_json)['active'] | bool + + - name: Set osd flags + ceph_osd_flag: + name: "{{ item }}" + cluster: "{{ cluster }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - noout + - nodeep-scrub + - noautoscale + +- name: Upgrade ceph osds cluster + vars: + health_osd_check_retries: 600 + health_osd_check_delay: 2 + upgrade_ceph_packages: true + hosts: osds + tags: osds + serial: 1 + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Get osd numbers - non container + ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa: risky-shell-pipe + register: osd_ids + changed_when: false + + - name: Set num_osds + ansible.builtin.set_fact: + num_osds: "{{ osd_ids.stdout_lines | default([]) | length }}" + + - name: Set_fact container_exec_cmd_osd + ansible.builtin.set_fact: + container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" + when: containerized_deployment | bool + + - name: Stop ceph osd + ansible.builtin.systemd: + name: ceph-osd@{{ item }} + state: stopped + enabled: false + masked: true + with_items: "{{ osd_ids.stdout_lines }}" + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-osd role + ansible.builtin.import_role: + name: ceph-osd + + - name: Scan ceph-disk osds with ceph-volume if deploying nautilus + ceph_volume_simple_scan: + cluster: "{{ cluster }}" + force: true + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + when: not containerized_deployment | bool + + - name: Activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus + ceph_volume_simple_activate: + cluster: "{{ cluster }}" + osd_all: true + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + when: not containerized_deployment | bool + + - name: Waiting for clean pgs... + ansible.builtin.command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} pg stat --format json" + register: ceph_health_post + until: > + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0) + and + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs) + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + retries: "{{ health_osd_check_retries }}" + delay: "{{ health_osd_check_delay }}" + + +- name: Complete osd upgrade + hosts: "{{ osd_group_name | default('osds') }}" + tags: osds + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Unset osd flags, re-enable pg autoscaler and balancer + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + block: + - name: Unset osd flags + ceph_osd_flag: + name: "{{ item }}" + cluster: "{{ cluster }}" + state: absent + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - noout + - nodeep-scrub + - noautoscale + + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + changed_when: false + when: (balancer_status_update.stdout | from_json)['active'] | bool + +- name: Upgrade ceph mdss cluster, deactivate all rank > 0 + hosts: "{{ mon_group_name | default('mons') }}[0]" + tags: mdss + become: true + gather_facts: false + tasks: + - name: Deactivate all mds rank > 0 + when: groups.get(mds_group_name, []) | length > 0 + block: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Deactivate all mds rank > 0 if any + when: groups.get(mds_group_name, []) | length > 1 + block: + - name: Set max_mds 1 on ceph fs + ceph_fs: + name: "{{ cephfs }}" + cluster: "{{ cluster }}" + data: "{{ cephfs_data_pool.name }}" + metadata: "{{ cephfs_metadata_pool.name }}" + max_mds: 1 + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Wait until only rank 0 is up + ceph_fs: + name: "{{ cephfs }}" + cluster: "{{ cluster }}" + state: info + register: wait_rank_zero + retries: 720 + delay: 5 + until: (wait_rank_zero.stdout | from_json).mdsmap.in | length == 1 and (wait_rank_zero.stdout | from_json).mdsmap.in[0] == 0 + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Get name of remaining active mds + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + changed_when: false + register: _mds_active_name + + - name: Set_fact mds_active_name + ansible.builtin.set_fact: + mds_active_name: "{{ (_mds_active_name.stdout | from_json)['filesystems'][0]['mdsmap']['info'][item.key]['name'] }}" + with_dict: "{{ (_mds_active_name.stdout | default('{}') | from_json).filesystems[0]['mdsmap']['info'] | default({}) }}" + + - name: Set_fact mds_active_host + ansible.builtin.set_fact: + mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}" + with_items: "{{ groups[mds_group_name] }}" + when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name + + - name: Create standby_mdss group + ansible.builtin.add_host: + name: "{{ item }}" + groups: standby_mdss + ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}" + ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}" + with_items: "{{ groups[mds_group_name] | difference(mds_active_host) }}" + + - name: Stop standby ceph mds + ansible.builtin.systemd: + name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}" + state: stopped + enabled: false + delegate_to: "{{ item }}" + with_items: "{{ groups['standby_mdss'] }}" + when: groups['standby_mdss'] | default([]) | length > 0 + + # dedicated task for masking systemd unit + # somehow, having a single task doesn't work in containerized context + - name: Mask systemd units for standby ceph mds + ansible.builtin.systemd: + name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}" + masked: true + delegate_to: "{{ item }}" + with_items: "{{ groups['standby_mdss'] }}" + when: groups['standby_mdss'] | default([]) | length > 0 + + - name: Wait until all standbys mds are stopped + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + changed_when: false + register: wait_standbys_down + retries: 300 + delay: 5 + until: (wait_standbys_down.stdout | from_json).standbys | length == 0 + + - name: Create active_mdss group + ansible.builtin.add_host: + name: "{{ mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0] }}" + groups: active_mdss + ansible_host: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_host'] | default(omit) }}" + ansible_port: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_port'] | default(omit) }}" + + +- name: Upgrade active mds + vars: + upgrade_ceph_packages: true + hosts: active_mdss + tags: mdss + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Prevent restart from the packaging + ansible.builtin.systemd: + name: ceph-mds@{{ ansible_facts['hostname'] }} + enabled: false + masked: true + when: not containerized_deployment | bool + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mds role + ansible.builtin.import_role: + name: ceph-mds + + - name: Restart ceph mds + ansible.builtin.systemd: + name: ceph-mds@{{ ansible_facts['hostname'] }} + state: restarted + enabled: true + masked: false + when: not containerized_deployment | bool + + - name: Restart active mds + ansible.builtin.command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}" + changed_when: false + when: containerized_deployment | bool + +- name: Upgrade standbys ceph mdss cluster + vars: + upgrade_ceph_packages: true + hosts: standby_mdss + tags: mdss + become: true + gather_facts: false + + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Prevent restarts from the packaging + ansible.builtin.systemd: + name: ceph-mds@{{ ansible_facts['hostname'] }} + enabled: false + masked: true + when: not containerized_deployment | bool + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-mds role + ansible.builtin.import_role: + name: ceph-mds + + - name: Set max_mds + ceph_fs: + name: "{{ cephfs }}" + cluster: "{{ cluster }}" + max_mds: "{{ mds_max_mds }}" + data: "{{ cephfs_data_pool.name }}" + metadata: "{{ cephfs_metadata_pool.name }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + when: inventory_hostname == groups['standby_mdss'] | last + + +- name: Upgrade ceph rgws cluster + vars: + upgrade_ceph_packages: true + hosts: "{{ rgw_group_name|default('rgws') }}" + tags: rgws + serial: 1 + become: true + gather_facts: false + tasks: + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors + ansible.builtin.systemd: + name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }} + state: stopped + enabled: false + masked: true + ignore_errors: true + + - name: Stop ceph rgw + ansible.builtin.systemd: + name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} + state: stopped + enabled: false + masked: true + with_items: "{{ rgw_instances }}" + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-rgw role + ansible.builtin.import_role: + name: ceph-rgw + + +- name: Upgrade ceph rbd mirror node + vars: + upgrade_ceph_packages: true + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" + tags: rbdmirrors + serial: 1 + become: true + gather_facts: false + tasks: + - name: Check for ceph rbd mirror services + ansible.builtin.command: systemctl show --no-pager --property=Id --state=enabled ceph-rbd-mirror@* # noqa command-instead-of-module + changed_when: false + register: rbdmirror_services + + - name: Stop ceph rbd mirror + ansible.builtin.service: + name: "{{ item.split('=')[1] }}" + state: stopped + enabled: false + masked: true + loop: "{{ rbdmirror_services.stdout_lines }}" + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: + name: ceph-rbd-mirror + + +- name: Upgrade ceph nfs node + vars: + upgrade_ceph_packages: true + hosts: "{{ nfs_group_name|default('nfss') }}" + tags: nfss + serial: 1 + become: true + gather_facts: false + tasks: + # failed_when: false is here so that if we upgrade + # from a version of ceph that does not have nfs-ganesha + # then this task will not fail + - name: Stop ceph nfs + ansible.builtin.systemd: + name: nfs-ganesha + state: stopped + enabled: false + masked: true + failed_when: false + when: not containerized_deployment | bool + + - name: Systemd stop nfs container + ansible.builtin.systemd: + name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} + state: stopped + enabled: false + masked: true + failed_when: false + when: + - ceph_nfs_enable_service | bool + - containerized_deployment | bool + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: containerized_deployment | bool + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + +- name: Upgrade ceph client node + vars: + upgrade_ceph_packages: true + hosts: "{{ client_group_name|default('clients') }}" + tags: clients + serial: "{{ client_update_batch | default(20) }}" + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + when: containerized_deployment | bool + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + - name: Import ceph-common role + ansible.builtin.import_role: + name: ceph-common + when: not containerized_deployment | bool + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + when: + - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) + - containerized_deployment | bool + + +- name: Upgrade ceph-crash daemons + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ osd_group_name | default('osds') }}" + - "{{ mds_group_name | default('mdss') }}" + - "{{ rgw_group_name | default('rgws') }}" + - "{{ rbdmirror_group_name | default('rbdmirrors') }}" + - "{{ mgr_group_name | default('mgrs') }}" + tags: + - post_upgrade + - crash + gather_facts: false + become: true + tasks: + - name: Stop the ceph-crash service + ansible.builtin.systemd: + name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + state: stopped + + # it needs to be done in a separate task otherwise the stop just before doesn't work. + - name: Mask and disable the ceph-crash service + ansible.builtin.systemd: + name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + enabled: false + masked: true + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + - name: Import ceph-crash role + ansible.builtin.import_role: + name: ceph-crash + +- name: Upgrade ceph-exporter daemons + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ osd_group_name | default('osds') }}" + - "{{ mds_group_name | default('mdss') }}" + - "{{ rgw_group_name | default('rgws') }}" + - "{{ rbdmirror_group_name | default('rbdmirrors') }}" + - "{{ mgr_group_name | default('mgrs') }}" + tags: + - post_upgrade + - ceph-exporter + gather_facts: false + become: true + tasks: + - name: Exit ceph-exporter upgrade if non containerized deployment + ansible.builtin.meta: end_play + when: not containerized_deployment | bool + + - name: Stop the ceph-exporter service + ansible.builtin.systemd: + name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + state: stopped + + # it needs to be done in a separate task otherwise the stop just before doesn't work. + - name: Mask and disable the ceph-exporter service + ansible.builtin.systemd: + name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + enabled: false + masked: true + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + - name: Import ceph-exporter role + ansible.builtin.import_role: + name: ceph-exporter + +- name: Complete upgrade + hosts: "{{ mon_group_name | default('mons') }}" + tags: post_upgrade + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Container | disallow pre-squid OSDs and enable all new squid-only functionality + ansible.builtin.command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release squid" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + when: + - containerized_deployment | bool + - groups.get(mon_group_name, []) | length > 0 + + - name: Non container | disallow pre-squid OSDs and enable all new squid-only functionality + ansible.builtin.command: "ceph --cluster {{ cluster }} osd require-osd-release squid" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + when: + - not containerized_deployment | bool + - groups.get(mon_group_name, []) | length > 0 + +- name: Upgrade node-exporter + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + - "{{ monitoring_group_name|default('monitoring') }}" + tags: monitoring + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: With dashboard configuration + when: dashboard_enabled | bool + block: + - name: Stop node-exporter + ansible.builtin.service: + name: node_exporter + state: stopped + failed_when: false + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + tasks_from: registry + when: + - not containerized_deployment | bool + - ceph_docker_registry_auth | bool + - name: Import ceph-node-exporter role + ansible.builtin.import_role: + name: ceph-node-exporter + +- name: Upgrade monitoring node + hosts: "{{ monitoring_group_name|default('monitoring') }}" + tags: monitoring + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: With dashboard configuration + when: dashboard_enabled | bool + block: + - name: Stop monitoring services + ansible.builtin.service: + name: '{{ item }}' + state: stopped + failed_when: false + with_items: + - alertmanager + - prometheus + - grafana-server + + # - name: Import ceph-facts role + # ansible.builtin.import_role: + # name: ceph-facts + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: grafana + - name: Import ceph-prometheus role + ansible.builtin.import_role: + name: ceph-prometheus + - name: Import ceph-grafana role + ansible.builtin.import_role: + name: ceph-grafana + +- name: Upgrade ceph dashboard + hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}" + tags: monitoring + gather_facts: false + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: With dashboard configuration + when: dashboard_enabled | bool + block: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: grafana + + - name: Import ceph-dashboard role + ansible.builtin.import_role: + name: ceph-dashboard + +- name: Switch any existing crush buckets to straw2 + hosts: "{{ mon_group_name | default('mons') }}[0]" + tags: post_upgrade + become: true + any_errors_fatal: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Set_fact ceph_cmd + ansible.builtin.set_fact: + ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" + + - name: Backup the crushmap + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap" + changed_when: false + + - name: Migrate crush buckets to straw2 + block: + - name: Switch crush buckets to straw2 + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2" + changed_when: false + rescue: + - name: Restore the crushmap + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap" + changed_when: false + + - name: Inform that the switch to straw2 buckets failed + ansible.builtin.fail: + msg: > + "An attempt to switch to straw2 bucket was made but failed. + Check the cluster status." + + - name: Remove crushmap backup + ansible.builtin.file: + path: /etc/ceph/{{ cluster }}-crushmap + state: absent + +- name: Show ceph status + hosts: "{{ mon_group_name|default('mons') }}" + tags: always + become: true + gather_facts: false + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Set_fact container_exec_cmd_status + ansible.builtin.set_fact: + container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" + when: containerized_deployment | bool + + - name: Show ceph status + ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} -s" + changed_when: false + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Show all daemons version + ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} versions" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false diff --git a/infrastructure-playbooks/shrink-mds.yml b/infrastructure-playbooks/shrink-mds.yml new file mode 100644 index 0000000..6f9eb46 --- /dev/null +++ b/infrastructure-playbooks/shrink-mds.yml @@ -0,0 +1,177 @@ +--- +# This playbook removes the Ceph MDS from your cluster. +# +# Use it like this: +# ansible-playbook shrink-mds.yml -e mds_to_kill=ceph-mds01 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. +- name: Gather facts and check the init system + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ mds_group_name | default('mdss') }}" + become: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: gather facts on all Ceph hosts for following reference + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + +- name: Perform checks, remove mds and print cluster health + hosts: mons[0] + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Exit playbook, if no mds was given + when: mds_to_kill is not defined + ansible.builtin.fail: + msg: > + mds_to_kill must be declared. + Exiting shrink-cluster playbook, no MDS was removed. On the command + line when invoking the playbook, you can use + "-e mds_to_kill=ceph-mds1" argument. You can only remove a single + MDS each time the playbook runs." + + - name: Exit playbook, if the mds is not part of the inventory + when: mds_to_kill not in groups[mds_group_name] + ansible.builtin.fail: + msg: "It seems that the host given is not part of your inventory, + please make sure it is." + + - name: Exit playbook, if user did not mean to shrink cluster + when: ireallymeanit != 'yes' + ansible.builtin.fail: + msg: "Exiting shrink-mds playbook, no mds was removed. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + + - name: Set_fact container_exec_cmd for mon0 + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + when: containerized_deployment | bool + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + changed_when: false + register: ceph_health + until: ceph_health is succeeded + retries: 5 + delay: 2 + + - name: Set_fact mds_to_kill_hostname + ansible.builtin.set_fact: + mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}" + + tasks: + # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also + # removes the MDS from the FS map. + - name: Exit mds when containerized deployment + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit" + changed_when: false + when: containerized_deployment | bool + + - name: Get ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" + register: ceph_status + changed_when: false + + - name: Set_fact current_max_mds + ansible.builtin.set_fact: + current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}" + + - name: Fail if removing that mds node wouldn't satisfy max_mds anymore + ansible.builtin.fail: + msg: "Can't remove more mds as it won't satisfy current max_mds setting" + when: + - ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int + - (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1 + + - name: Stop mds service and verify it + block: + - name: Stop mds service + ansible.builtin.service: + name: ceph-mds@{{ mds_to_kill_hostname }} + state: stopped + enabled: false + delegate_to: "{{ mds_to_kill }}" + failed_when: false + + - name: Ensure that the mds is stopped + ansible.builtin.command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa command-instead-of-module + register: mds_to_kill_status + failed_when: mds_to_kill_status.rc == 0 + delegate_to: "{{ mds_to_kill }}" + retries: 5 + delay: 2 + changed_when: false + + - name: Fail if the mds is reported as active or standby + block: + - name: Get new ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" + register: ceph_status + changed_when: false + + - name: Get active mds nodes list + ansible.builtin.set_fact: + active_mdss: "{{ active_mdss | default([]) + [item.name] }}" + with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}" + + - name: Get ceph fs dump status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + register: ceph_fs_status + changed_when: false + + - name: Create a list of standby mdss + ansible.builtin.set_fact: + standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list + + - name: Fail if mds just killed is being reported as active or standby + ansible.builtin.fail: + msg: "mds node {{ mds_to_kill }} still up and running." + when: + - (mds_to_kill in active_mdss | default([])) or + (mds_to_kill in standby_mdss | default([])) + + - name: Delete the filesystem when killing last mds + ceph_fs: + name: "{{ cephfs }}" + cluster: "{{ cluster }}" + state: absent + when: + - (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0 + - (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0 + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Purge mds store + ansible.builtin.file: + path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }} + state: absent + delegate_to: "{{ mds_to_kill }}" + + post_tasks: + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + changed_when: false diff --git a/infrastructure-playbooks/shrink-mgr.yml b/infrastructure-playbooks/shrink-mgr.yml new file mode 100644 index 0000000..af105af --- /dev/null +++ b/infrastructure-playbooks/shrink-mgr.yml @@ -0,0 +1,138 @@ +--- +# This playbook shrinks the Ceph manager from your cluster +# +# Use it like this: +# ansible-playbook shrink-mgr.yml -e mgr_to_kill=ceph-mgr1 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster and yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-mgr.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + + +- name: Gather facts and check the init system + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ mgr_group_name | default('mgrs') }}" + become: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: gather facts on all Ceph hosts for following reference + +- name: Confirm if user really meant to remove manager from the ceph cluster + hosts: mons[0] + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Set_fact container_exec_cmd + when: containerized_deployment | bool + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + register: ceph_health + changed_when: false + until: ceph_health is succeeded + retries: 5 + delay: 2 + + - name: Get total number of mgrs in cluster + block: + - name: Save mgr dump output + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" + register: mgr_dump + changed_when: false + + - name: Get active and standbys mgr list + ansible.builtin.set_fact: + active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}" + standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}" + + - name: Exit playbook, if there's no standby manager + ansible.builtin.fail: + msg: "You are about to shrink the only manager present in the cluster." + when: standbys_mgr | length | int < 1 + + - name: Exit playbook, if no manager was given + ansible.builtin.fail: + msg: "mgr_to_kill must be declared + Exiting shrink-cluster playbook, no manager was removed. + On the command line when invoking the playbook, you can use + -e mgr_to_kill=ceph-mgr01 argument. You can only remove a single + manager each time the playbook runs." + when: mgr_to_kill is not defined + + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: + msg: "Exiting shrink-mgr playbook, no manager was removed. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Set_fact mgr_to_kill_hostname + ansible.builtin.set_fact: + mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}" + + - name: Exit playbook, if the selected manager is not present in the cluster + ansible.builtin.fail: + msg: "It seems that the host given is not present in the cluster." + when: + - mgr_to_kill_hostname not in active_mgr + - mgr_to_kill_hostname not in standbys_mgr + + tasks: + - name: Stop manager services and verify it + block: + - name: Stop manager service + ansible.builtin.service: + name: ceph-mgr@{{ mgr_to_kill_hostname }} + state: stopped + enabled: false + delegate_to: "{{ mgr_to_kill }}" + failed_when: false + + - name: Ensure that the mgr is stopped + ansible.builtin.command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa command-instead-of-module + register: mgr_to_kill_status + failed_when: mgr_to_kill_status.rc == 0 + delegate_to: "{{ mgr_to_kill }}" + changed_when: false + retries: 5 + delay: 2 + + - name: Fail if the mgr is reported in ceph mgr dump + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" + register: mgr_dump + changed_when: false + failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list) + until: mgr_to_kill_hostname not in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list) + retries: 12 + delay: 10 + + - name: Purge manager store + ansible.builtin.file: + path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }} + state: absent + delegate_to: "{{ mgr_to_kill }}" + + post_tasks: + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + changed_when: false diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml new file mode 100644 index 0000000..6693815 --- /dev/null +++ b/infrastructure-playbooks/shrink-mon.yml @@ -0,0 +1,151 @@ +--- +# This playbook shrinks the Ceph monitors from your cluster +# It can remove a Ceph of monitor from the cluster and ALL ITS DATA +# +# Use it like this: +# ansible-playbook shrink-mon.yml -e mon_to_kill=ceph-mon01 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-mon.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + + +- name: Gather facts and check the init system + + hosts: "{{ mon_group_name|default('mons') }}" + + become: true + + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + +- name: Confirm whether user really meant to remove monitor from the ceph cluster + hosts: mons[0] + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + vars: + mon_group_name: mons + + pre_tasks: + - name: Exit playbook, if only one monitor is present in cluster + ansible.builtin.fail: + msg: "You are about to shrink the only monitor present in the cluster. + If you really want to do that, please use the purge-cluster playbook." + when: groups[mon_group_name] | length | int == 1 + + - name: Exit playbook, if no monitor was given + ansible.builtin.fail: + msg: "mon_to_kill must be declared + Exiting shrink-cluster playbook, no monitor was removed. + On the command line when invoking the playbook, you can use + -e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs." + when: mon_to_kill is not defined + + - name: Exit playbook, if the monitor is not part of the inventory + ansible.builtin.fail: + msg: "It seems that the host given is not part of your inventory, please make sure it is." + when: mon_to_kill not in groups[mon_group_name] + + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: + msg: "Exiting shrink-mon playbook, no monitor was removed. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + tasks: + - name: Pick a monitor different than the one we want to remove + ansible.builtin.set_fact: + mon_host: "{{ item }}" + with_items: "{{ groups[mon_group_name] }}" + when: item != mon_to_kill + + - name: Set container_exec_cmd fact + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}" + when: containerized_deployment | bool + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + register: ceph_health + changed_when: false + until: ceph_health.stdout.find("HEALTH") > -1 + delegate_to: "{{ mon_host }}" + retries: 5 + delay: 2 + + - name: Set_fact mon_to_kill_hostname + ansible.builtin.set_fact: + mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}" + + - name: Stop monitor service(s) + ansible.builtin.service: + name: ceph-mon@{{ mon_to_kill_hostname }} + state: stopped + enabled: false + delegate_to: "{{ mon_to_kill }}" + failed_when: false + + - name: Purge monitor store + ansible.builtin.file: + path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }} + state: absent + delegate_to: "{{ mon_to_kill }}" + + - name: Remove monitor from the quorum + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" + changed_when: false + failed_when: false + delegate_to: "{{ mon_host }}" + + post_tasks: + - name: Verify the monitor is out of the cluster + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" + delegate_to: "{{ mon_host }}" + changed_when: false + failed_when: false + register: result + until: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names'] + retries: 2 + delay: 10 + + - name: Please remove the monitor from your ceph configuration file + ansible.builtin.debug: + msg: "The monitor has been successfully removed from the cluster. + Please remove the monitor entry from the rest of your ceph configuration files, cluster wide." + run_once: true + when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names'] + + - name: Fail if monitor is still part of the cluster + ansible.builtin.fail: + msg: "Monitor appears to still be part of the cluster, please check what happened." + run_once: true + when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names'] + + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" + delegate_to: "{{ mon_host }}" + changed_when: false + + - name: Show ceph mon status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat" + delegate_to: "{{ mon_host }}" + changed_when: false diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml new file mode 100644 index 0000000..27feb4d --- /dev/null +++ b/infrastructure-playbooks/shrink-osd.yml @@ -0,0 +1,379 @@ +--- +# This playbook shrinks Ceph OSDs that have been created with ceph-volume. +# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA +# +# Use it like this: +# ansible-playbook shrink-osd.yml -e osd_to_kill=0,2,6 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- name: Gather facts and check the init system + hosts: + - mons + - osds + + become: true + tasks: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + +- name: Confirm whether user really meant to remove osd(s) from the cluster + hosts: mons[0] + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + vars: + mon_group_name: mons + osd_group_name: osds + + pre_tasks: + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: + msg: "Exiting shrink-osd playbook, no osd(s) was/were removed.. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Exit playbook, if no osd(s) was/were given + ansible.builtin.fail: + msg: "osd_to_kill must be declared + Exiting shrink-osd playbook, no OSD(s) was/were removed. + On the command line when invoking the playbook, you can use + -e osd_to_kill=0,1,2,3 argument." + when: osd_to_kill is not defined + + - name: Check the osd ids passed have the correct format + ansible.builtin.fail: + msg: "The id {{ item }} has wrong format, please pass the number only" + with_items: "{{ osd_to_kill.split(',') }}" + when: not item is regex("^\d+$") + + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + post_tasks: + - name: Set_fact container_exec_cmd build docker exec command (containerized) + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + when: containerized_deployment | bool + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + register: ceph_health + changed_when: false + until: ceph_health.stdout.find("HEALTH") > -1 + retries: 5 + delay: 2 + + - name: Find the host(s) where the osd(s) is/are running on + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" + changed_when: false + with_items: "{{ osd_to_kill.split(',') }}" + register: find_osd_hosts + + - name: Set_fact osd_hosts + ansible.builtin.set_fact: + osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}" + with_items: "{{ find_osd_hosts.results }}" + + - name: Set_fact _osd_hosts + ansible.builtin.set_fact: + _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}" + with_nested: + - "{{ groups.get(osd_group_name) }}" + - "{{ osd_hosts }}" + when: hostvars[item.0]['ansible_facts']['hostname'] == item.1 + + - name: Set_fact host_list + ansible.builtin.set_fact: + host_list: "{{ host_list | default([]) | union([item.0]) }}" + loop: "{{ _osd_hosts }}" + + - name: Get ceph-volume lvm list data + ceph_volume: + cluster: "{{ cluster }}" + action: list + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _lvm_list_data + delegate_to: "{{ item }}" + loop: "{{ host_list }}" + + - name: Set_fact _lvm_list + ansible.builtin.set_fact: + _lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}" + with_items: "{{ _lvm_list_data.results }}" + + - name: Refresh /etc/ceph/osd files non containerized_deployment + ceph_volume_simple_scan: + cluster: "{{ cluster }}" + force: true + delegate_to: "{{ item }}" + loop: "{{ host_list }}" + when: not containerized_deployment | bool + + - name: Get osd unit status + ansible.builtin.systemd: + name: ceph-osd@{{ item.2 }} + register: osd_status + delegate_to: "{{ item.0 }}" + loop: "{{ _osd_hosts }}" + when: + - containerized_deployment | bool + + - name: Refresh /etc/ceph/osd files containerized_deployment + ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" + changed_when: false + delegate_to: "{{ item.0 }}" + loop: "{{ _osd_hosts }}" + when: + - containerized_deployment | bool + - item.2 not in _lvm_list.keys() + - osd_status.results[0].status.ActiveState == 'active' + + - name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down + when: + - containerized_deployment | bool + - osd_status.results[0].status.ActiveState != 'active' + block: + - name: Create tmp osd folder + ansible.builtin.file: + path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} + state: directory + mode: '0755' + delegate_to: "{{ item.0 }}" + when: item.2 not in _lvm_list.keys() + loop: "{{ _osd_hosts }}" + + - name: Activate OSD + ansible.builtin.command: | + {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 + -v /dev:/dev -v /etc/localtime:/etc/localtime:ro + -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared + -v /etc/ceph:/etc/ceph:z -v /var/run/ceph:/var/run/ceph:z + -v /var/run/udev/:/var/run/udev/ -v /var/log/ceph:/var/log/ceph:z + -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 -e CLUSTER=ceph -e DEBUG=verbose + -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /run/lvm/:/run/lvm/ + -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE -e CONTAINER_IMAGE={{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} + -e OSD_ID={{ item.2 }} + --entrypoint=ceph-volume + {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} + simple activate {{ item.2 }} {{ item.1 }} --no-systemd + changed_when: false + delegate_to: "{{ item.0 }}" + when: item.2 not in _lvm_list.keys() + loop: "{{ _osd_hosts }}" + + - name: Simple scan + ansible.builtin.command: | + {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 + -v /dev:/dev -v /etc/localtime:/etc/localtime:ro + -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared + -v /etc/ceph:/etc/ceph:z -v /var/run/ceph:/var/run/ceph:z + -v /var/run/udev/:/var/run/udev/ -v /var/log/ceph:/var/log/ceph:z + -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0 -e CLUSTER=ceph -e DEBUG=verbose + -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728 -v /run/lvm/:/run/lvm/ + -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE -e CONTAINER_IMAGE={{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} + -e OSD_ID={{ item.2 }} + --entrypoint=ceph-volume + {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} + simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }} + changed_when: false + delegate_to: "{{ item.0 }}" + when: item.2 not in _lvm_list.keys() + loop: "{{ _osd_hosts }}" + + - name: Umount OSD temp folder + ansible.posix.mount: + path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} + state: unmounted + delegate_to: "{{ item.0 }}" + when: item.2 not in _lvm_list.keys() + loop: "{{ _osd_hosts }}" + + - name: Remove OSD temp folder + ansible.builtin.file: + path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} + state: absent + delegate_to: "{{ item.0 }}" + when: item.2 not in _lvm_list.keys() + loop: "{{ _osd_hosts }}" + + - name: Find /etc/ceph/osd files + ansible.builtin.find: + paths: /etc/ceph/osd + pattern: "{{ item.2 }}-*" + register: ceph_osd_data + delegate_to: "{{ item.0 }}" + loop: "{{ _osd_hosts }}" + when: item.2 not in _lvm_list.keys() + + - name: Slurp ceph osd files content + ansible.builtin.slurp: + src: "{{ item['files'][0]['path'] }}" + delegate_to: "{{ item.item.0 }}" + register: ceph_osd_files_content + loop: "{{ ceph_osd_data.results }}" + when: + - item.skipped is undefined + - item.matched > 0 + + - name: Set_fact ceph_osd_files_json + ansible.builtin.set_fact: + ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}" + with_items: "{{ ceph_osd_files_content.results }}" + when: item.skipped is undefined + + - name: Mark osd(s) out of the cluster + ceph_osd: + ids: "{{ osd_to_kill.split(',') }}" + cluster: "{{ cluster }}" + state: out + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + run_once: true + + - name: Stop osd(s) service + ansible.builtin.service: + name: ceph-osd@{{ item.2 }} + state: stopped + enabled: false + loop: "{{ _osd_hosts }}" + delegate_to: "{{ item.0 }}" + + - name: Umount osd lockbox + ansible.posix.mount: + path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}" + state: absent + loop: "{{ _osd_hosts }}" + delegate_to: "{{ item.0 }}" + when: + - not containerized_deployment | bool + - item.2 not in _lvm_list.keys() + - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool + - ceph_osd_data_json[item.2]['data']['uuid'] is defined + + - name: Umount osd data + ansible.posix.mount: + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" + state: absent + loop: "{{ _osd_hosts }}" + delegate_to: "{{ item.0 }}" + when: not containerized_deployment | bool + + - name: Get parent device for data partition + ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}" + register: parent_device_data_part + loop: "{{ _osd_hosts }}" + delegate_to: "{{ item.0 }}" + changed_when: false + when: + - item.2 not in _lvm_list.keys() + - ceph_osd_data_json[item.2]['data']['path'] is defined + + - name: Add pkname information in ceph_osd_data_json + ansible.builtin.set_fact: + ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}" + loop: "{{ parent_device_data_part.results }}" + when: item.skipped is undefined + + - name: Close dmcrypt close on devices if needed + ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}" + with_nested: + - "{{ _osd_hosts }}" + - ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt'] + delegate_to: "{{ item.0 }}" + failed_when: false + register: result + until: result is succeeded + changed_when: false + when: + - item.2 not in _lvm_list.keys() + - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool + - ceph_osd_data_json[item.2][item.3] is defined + + - name: Use ceph-volume lvm zap to destroy all partitions + ceph_volume: + cluster: "{{ cluster }}" + action: zap + destroy: true + data: "{{ ceph_osd_data_json[item.2]['pkname_data'] if item.3 == 'data' else ceph_osd_data_json[item.2][item.3]['path'] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_nested: + - "{{ _osd_hosts }}" + - ['block', 'block.db', 'block.wal', 'journal', 'data'] + delegate_to: "{{ item.0 }}" + failed_when: false + register: result + when: + - item.2 not in _lvm_list.keys() + - ceph_osd_data_json[item.2][item.3] is defined + + - name: Zap osd devices + ceph_volume: + action: "zap" + osd_fsid: "{{ item.1 }}" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ item.0 }}" + loop: "{{ _osd_hosts }}" + when: item.2 in _lvm_list.keys() + + - name: Ensure osds are marked down + ceph_osd: + ids: "{{ osd_to_kill.split(',') }}" + cluster: "{{ cluster }}" + state: down + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Purge osd(s) from the cluster + ceph_osd: + ids: "{{ item }}" + cluster: "{{ cluster }}" + state: purge + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + run_once: true + with_items: "{{ osd_to_kill.split(',') }}" + + - name: Remove osd data dir + ansible.builtin.file: + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" + state: absent + loop: "{{ _osd_hosts }}" + delegate_to: "{{ item.0 }}" + + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" + changed_when: false + + - name: Show ceph osd tree + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree" + changed_when: false diff --git a/infrastructure-playbooks/shrink-rbdmirror.yml b/infrastructure-playbooks/shrink-rbdmirror.yml new file mode 100644 index 0000000..9f6f0ce --- /dev/null +++ b/infrastructure-playbooks/shrink-rbdmirror.yml @@ -0,0 +1,128 @@ +--- +# This playbook removes the Ceph RBD mirror from your cluster on the given +# node. +# +# Use it like this: +# ansible-playbook shrink-rbdmirror.yml -e rbdmirror_to_kill=ceph-rbdmirror01 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-rbdmirror.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- name: Gather facts and check the init system + hosts: + - mons + - rbdmirrors + become: true + tasks: + - name: Gather facts on MONs and RBD mirrors + ansible.builtin.debug: + msg: gather facts on MONs and RBD mirrors + +- name: Confirm whether user really meant to remove rbd mirror from the ceph + cluster + hosts: mons[0] + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Exit playbook, if no rbdmirror was given + ansible.builtin.fail: + msg: "rbdmirror_to_kill must be declared + Exiting shrink-cluster playbook, no RBD mirror was removed. + On the command line when invoking the playbook, you can use + -e rbdmirror_to_kill=rbd-mirror01 argument. You can only remove a + single rbd mirror each time the playbook runs." + when: rbdmirror_to_kill is not defined + + - name: Exit playbook, if the rbdmirror is not part of the inventory + ansible.builtin.fail: + msg: > + It seems that the host given is not part of your inventory, + please make sure it is. + when: rbdmirror_to_kill not in groups[rbdmirror_group_name] + + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: + msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed. + To shrink the cluster, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Set_fact container_exec_cmd for mon0 + when: containerized_deployment | bool + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" + register: ceph_health + changed_when: false + until: ceph_health is succeeded + retries: 5 + delay: 2 + + - name: Set_fact rbdmirror_to_kill_hostname + ansible.builtin.set_fact: + rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}" + + - name: Set_fact rbdmirror_gids + ansible.builtin.set_fact: + rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [item] }}" + with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}" + when: item != 'summary' + + - name: Set_fact rbdmirror_to_kill_gid + ansible.builtin.set_fact: + rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}" + with_items: "{{ rbdmirror_gids }}" + when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname + + tasks: + - name: Stop rbdmirror service + ansible.builtin.service: + name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }} + state: stopped + enabled: false + delegate_to: "{{ rbdmirror_to_kill }}" + failed_when: false + + - name: Purge related directories + ansible.builtin.file: + path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }} + state: absent + delegate_to: "{{ rbdmirror_to_kill }}" + + post_tasks: + - name: Get servicemap details + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" + register: ceph_health + failed_when: + - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" + - rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list + until: + - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" + - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list + changed_when: false + when: rbdmirror_to_kill_gid is defined + retries: 12 + delay: 10 + + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + changed_when: false diff --git a/infrastructure-playbooks/shrink-rgw.yml b/infrastructure-playbooks/shrink-rgw.yml new file mode 100644 index 0000000..9ce1058 --- /dev/null +++ b/infrastructure-playbooks/shrink-rgw.yml @@ -0,0 +1,141 @@ +--- +# This playbook shrinks the Ceph RGW from your cluster +# +# Use it like this: +# ansible-playbook shrink-rgw.yml -e rgw_to_kill=ceph-rgw01 +# Prompts for confirmation to shrink, defaults to no and +# doesn't shrink the cluster. yes shrinks the cluster. +# +# ansible-playbook -e ireallymeanit=yes|no shrink-rgw.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + + +- name: Confirm whether user really meant to remove rgw from the ceph cluster + hosts: localhost + become: false + gather_facts: false + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to shrink the cluster? + default: 'no' + private: false + tasks: + - name: Exit playbook, if no rgw was given + when: rgw_to_kill is not defined or rgw_to_kill | length == 0 + ansible.builtin.fail: + msg: > + rgw_to_kill must be declared. + Exiting shrink-cluster playbook, no RGW was removed. On the command + line when invoking the playbook, you can use + "-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single + RGW each time the playbook runs. + + - name: Exit playbook, if user did not mean to shrink cluster + when: ireallymeanit != 'yes' + ansible.builtin.fail: + msg: > + Exiting shrink-mon playbook, no monitor was removed. To shrink the + cluster, either say 'yes' on the prompt or use + '-e ireallymeanit=yes' on the command line when invoking the playbook + +- name: Gather facts and mons and rgws + hosts: + - "{{ mon_group_name | default('mons') }}[0]" + - "{{ rgw_group_name | default('rgws') }}" + become: true + gather_facts: false + tasks: + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + +- name: Shrink rgw service + hosts: mons[0] + become: true + gather_facts: false + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary + + - name: Set_fact container_exec_cmd for mon0 + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + when: containerized_deployment | bool + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + register: ceph_health + changed_when: false + until: ceph_health is succeeded + retries: 5 + delay: 2 + + - name: Get rgw instances + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" + register: rgw_instances + changed_when: false + + + - name: Exit playbook, if the rgw_to_kill doesn't exist + when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list + ansible.builtin.fail: + msg: > + It seems that the rgw instance given is not part of the ceph cluster. Please + make sure it is. + The rgw instance format is $(hostname}.rgw$(instance number). + tasks: + - name: Get rgw host running the rgw instance to kill + ansible.builtin.set_fact: + rgw_host: '{{ item }}' + with_items: '{{ groups[rgw_group_name] }}' + when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0] + + - name: Stop rgw service + ansible.builtin.service: + name: ceph-radosgw@rgw.{{ rgw_to_kill }} + state: stopped + enabled: false + delegate_to: "{{ rgw_host }}" + failed_when: false + + - name: Ensure that the rgw is stopped + ansible.builtin.command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa command-instead-of-module + register: rgw_to_kill_status + failed_when: rgw_to_kill_status.rc == 0 + changed_when: false + delegate_to: "{{ rgw_host }}" + retries: 5 + delay: 2 + + - name: Exit if rgw_to_kill is reported in ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" + register: ceph_status + changed_when: false + failed_when: + - (ceph_status.stdout | from_json).services.rgw is defined + - rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list + until: + - (ceph_status.stdout | from_json).services.rgw is defined + - rgw_to_kill not in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list + retries: 3 + delay: 3 + + - name: Purge directories related to rgw + ansible.builtin.file: + path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }} + state: absent + delegate_to: "{{ rgw_host }}" + post_tasks: + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + changed_when: false diff --git a/infrastructure-playbooks/storage-inventory.yml b/infrastructure-playbooks/storage-inventory.yml new file mode 100644 index 0000000..988d307 --- /dev/null +++ b/infrastructure-playbooks/storage-inventory.yml @@ -0,0 +1,30 @@ +--- +# This playbook queries each OSD using `ceph-volume inventory` to report the +# entire storage device inventory of a cluster. +# +# Usage: +# ansible-playbook storage-inventory.yml + +- name: Gather facts and check the init system + hosts: osds + become: true + tasks: + - name: Gather facts on all Ceph hosts + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" + +- name: Query each host for storage device inventory + hosts: osds + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: List storage inventory + ceph_volume: + action: "inventory" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml new file mode 100644 index 0000000..5c9ce49 --- /dev/null +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -0,0 +1,808 @@ +--- +# This playbook switches from non-containerized to containerized Ceph daemons + +- name: Confirm whether user really meant to switch from non-containerized to containerized ceph daemons + + hosts: localhost + + gather_facts: false + any_errors_fatal: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons? + default: 'no' + private: false + + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Fail when less than three monitors + ansible.builtin.fail: + msg: "This playbook requires at least three monitors." + when: groups[mon_group_name] | length | int < 3 + + - name: Exit playbook, if user did not mean to switch from non-containerized to containerized daemons? + ansible.builtin.fail: + msg: > + "Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook, + cluster did not switch from non-containerized to containerized ceph daemons. + To switch from non-containerized to containerized ceph daemons, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + +- name: Gather facts + + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + - "{{ rbdmirror_group_name|default('rbdmirrors') }}" + - "{{ nfs_group_name|default('nfss') }}" + + become: true + + vars: + delegate_facts_host: true + + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Gather and delegate facts + ansible.builtin.setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}" + run_once: true + when: delegate_facts_host | bool + tags: always + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-validate role + ansible.builtin.import_role: + name: ceph-validate + +- name: Switching from non-containerized to containerized ceph mon + vars: + containerized_deployment: true + switch_to_containers: true + mon_group_name: mons + hosts: "{{ mon_group_name|default('mons') }}" + serial: 1 + become: true + pre_tasks: + - name: Select a running monitor + ansible.builtin.set_fact: + mon_host: "{{ item }}" + with_items: "{{ groups[mon_group_name] }}" + when: item != inventory_hostname + + - name: Stop non-containerized ceph mon + ansible.builtin.service: + name: "ceph-mon@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + + - name: Remove old systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-mon@.service + - /usr/lib/systemd/system/ceph-mon.target + - /lib/systemd/system/ceph-mon@.service + - /lib/systemd/system/ceph-mon.target + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + - name: Check for existing old leveldb file extension (ldb) + ansible.builtin.shell: stat /var/lib/ceph/mon/*/store.db/*.ldb + changed_when: false + failed_when: false + register: ldb_files + + - name: Rename leveldb extension from ldb to sst + ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb + changed_when: false + failed_when: false + when: ldb_files.rc == 0 + + - name: Copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common + ansible.builtin.command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring + args: + creates: /etc/ceph/{{ cluster }}.mon.keyring + changed_when: false + failed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-mon role + ansible.builtin.import_role: + name: ceph-mon + + post_tasks: + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json" + register: ceph_health_raw + until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"] + changed_when: false + retries: "{{ health_mon_check_retries }}" + delay: "{{ health_mon_check_delay }}" + +- name: Switching from non-containerized to containerized ceph mgr + + hosts: "{{ mgr_group_name|default('mgrs') }}" + + vars: + containerized_deployment: true + mgr_group_name: mgrs + + serial: 1 + become: true + pre_tasks: + # failed_when: false is here because if we're + # working with a jewel cluster then ceph mgr + # will not exist + - name: Stop non-containerized ceph mgr(s) + ansible.builtin.service: + name: "ceph-mgr@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + failed_when: false + + - name: Remove old systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-mgr@.service + - /usr/lib/systemd/system/ceph-mgr.target + - /lib/systemd/system/ceph-mgr@.service + - /lib/systemd/system/ceph-mgr.target + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-mgr role + ansible.builtin.import_role: + name: ceph-mgr + + +- name: Set osd flags + hosts: "{{ mon_group_name | default('mons') }}[0]" + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + register: pool_list + changed_when: false + check_mode: false + + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + register: balancer_status_switch + changed_when: false + check_mode: false + + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: + pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" + with_items: "{{ pool_list.stdout | default('{}') | from_json }}" + + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + changed_when: false + when: (balancer_status_switch.stdout | from_json)['active'] | bool + + - name: Disable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: false + with_items: "{{ pools_pgautoscaler_mode }}" + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Set osd flags + ceph_osd_flag: + name: "{{ item }}" + cluster: "{{ cluster }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - noout + - nodeep-scrub + + +- name: Switching from non-containerized to containerized ceph osd + + vars: + containerized_deployment: true + osd_group_name: osds + switch_to_containers: true + + hosts: "{{ osd_group_name|default('osds') }}" + + serial: 1 + become: true + pre_tasks: + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Collect running osds + ansible.builtin.shell: | + set -o pipefail; + systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume' + register: running_osds + changed_when: false + failed_when: false + + # systemd module does not support --runtime option + - name: Disable ceph-osd@.service runtime-enabled + ansible.builtin.command: "systemctl disable --runtime {{ item }}" # noqa command-instead-of-module + changed_when: false + failed_when: false + with_items: "{{ running_osds.stdout_lines | default([]) }}" + when: item.startswith('ceph-osd@') + + - name: Stop/disable/mask non-containerized ceph osd(s) (if any) + ansible.builtin.systemd: + name: "{{ item }}" + state: stopped + enabled: false + with_items: "{{ running_osds.stdout_lines | default([]) }}" + when: running_osds != [] + + - name: Disable ceph.target + ansible.builtin.systemd: + name: ceph.target + enabled: false + + - name: Remove old ceph-osd systemd units + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-osd.target + - /usr/lib/systemd/system/ceph-osd@.service + - /usr/lib/systemd/system/ceph-volume@.service + - /lib/systemd/system/ceph-osd.target + - /lib/systemd/system/ceph-osd@.service + - /lib/systemd/system/ceph-volume@.service + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + - name: Check for existing old leveldb file extension (ldb) + ansible.builtin.shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb + changed_when: false + failed_when: false + register: ldb_files + + - name: Rename leveldb extension from ldb to sst + ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb + changed_when: false + failed_when: false + when: ldb_files.rc == 0 + + - name: Check if containerized osds are already running + ansible.builtin.command: > + {{ container_binary }} ps -q --filter='name=ceph-osd' + changed_when: false + failed_when: false + register: osd_running + + - name: Get osd directories + ansible.builtin.command: > + find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d + register: osd_dirs + changed_when: false + failed_when: false + + - name: Unmount all the osd directories + ansible.builtin.command: > + umount {{ item }} + changed_when: false + failed_when: false + with_items: "{{ osd_dirs.stdout_lines }}" + when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0 + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-osd role + ansible.builtin.import_role: + name: ceph-osd + + post_tasks: + - name: Container - waiting for clean pgs... + ansible.builtin.command: > + {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json + register: ceph_health_post + until: > + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0) + and + (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs) + delegate_to: "{{ groups[mon_group_name][0] }}" + retries: "{{ health_osd_check_retries }}" + delay: "{{ health_osd_check_delay }}" + changed_when: false + + +- name: Unset osd flags + hosts: "{{ mon_group_name | default('mons') }}[0]" + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Re-enable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: true + with_items: "{{ pools_pgautoscaler_mode }}" + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Unset osd flags + ceph_osd_flag: + name: "{{ item }}" + cluster: "{{ cluster }}" + state: absent + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - noout + - nodeep-scrub + + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + changed_when: false + when: (balancer_status_switch.stdout | from_json)['active'] | bool + + +- name: Switching from non-containerized to containerized ceph mds + + hosts: "{{ mds_group_name|default('mdss') }}" + + vars: + containerized_deployment: true + mds_group_name: mdss + + serial: 1 + become: true + pre_tasks: + + - name: Stop non-containerized ceph mds(s) + ansible.builtin.service: + name: "ceph-mds@{{ ansible_facts['hostname'] }}" + state: stopped + enabled: false + + - name: Remove old systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-mds@.service + - /usr/lib/systemd/system/ceph-mds.target + - /lib/systemd/system/ceph-mds@.service + - /lib/systemd/system/ceph-mds.target + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-mds role + ansible.builtin.import_role: + name: ceph-mds + + +- name: Switching from non-containerized to containerized ceph rgw + + hosts: "{{ rgw_group_name|default('rgws') }}" + + vars: + containerized_deployment: true + rgw_group_name: rgws + + serial: 1 + become: true + pre_tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + - name: Import ceph-config role + ansible.builtin.import_role: + name: ceph-config + tasks_from: rgw_systemd_environment_file.yml + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Stop non-containerized ceph rgw(s) + ansible.builtin.service: + name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: stopped + enabled: false + with_items: "{{ rgw_instances }}" + + - name: Remove old systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-radosgw@.service + - /usr/lib/systemd/system/ceph-radosgw.target + - /lib/systemd/system/ceph-radosgw@.service + - /lib/systemd/system/ceph-radosgw.target + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-rgw role + ansible.builtin.import_role: + name: ceph-rgw + + +- name: Switching from non-containerized to containerized ceph rbd-mirror + + hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" + + vars: + containerized_deployment: true + rbdmirror_group_name: rbdmirrors + + serial: 1 + become: true + pre_tasks: + - name: Check for ceph rbd mirror services + ansible.builtin.command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa: command-instead-of-module + changed_when: false + register: rbdmirror_services + + - name: Stop non-containerized ceph rbd mirror(s) # noqa: ignore-errors + ansible.builtin.service: + name: "{{ item.split('=')[1] }}" + state: stopped + enabled: false + ignore_errors: true + loop: "{{ rbdmirror_services.stdout_lines }}" + + - name: Remove old systemd unit files + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/systemd/system/ceph-rbd-mirror@.service + - /usr/lib/systemd/system/ceph-rbd-mirror.target + - /lib/systemd/system/ceph-rbd-mirror@.service + - /lib/systemd/system/ceph-rbd-mirror.target + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: + name: ceph-rbd-mirror + + +- name: Switching from non-containerized to containerized ceph nfs + + hosts: "{{ nfs_group_name|default('nfss') }}" + + vars: + containerized_deployment: true + nfs_group_name: nfss + + serial: 1 + become: true + pre_tasks: + + # failed_when: false is here because if we're + # working with a jewel cluster then ceph nfs + # will not exist + - name: Stop non-containerized ceph nfs(s) + ansible.builtin.service: + name: nfs-ganesha + state: stopped + enabled: false + failed_when: false + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false + + tasks: + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-container-engine role + ansible.builtin.import_role: + name: ceph-container-engine + + - name: Import ceph-container-common role + ansible.builtin.import_role: + name: ceph-container-common + + - name: Import ceph-nfs role + ansible.builtin.import_role: + name: ceph-nfs + +- name: Switching from non-containerized to containerized ceph-crash + + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ osd_group_name | default('osds') }}" + - "{{ mds_group_name | default('mdss') }}" + - "{{ rgw_group_name | default('rgws') }}" + - "{{ rbdmirror_group_name | default('rbdmirrors') }}" + - "{{ mgr_group_name | default('mgrs') }}" + + vars: + containerized_deployment: true + become: true + tasks: + - name: Stop non-containerized ceph-crash + ansible.builtin.service: + name: ceph-crash + state: stopped + enabled: false + + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-crash role + ansible.builtin.import_role: + name: ceph-crash + +- name: Switching from non-containerized to containerized ceph-exporter + + hosts: + - "{{ mon_group_name | default('mons') }}" + - "{{ osd_group_name | default('osds') }}" + - "{{ mds_group_name | default('mdss') }}" + - "{{ rgw_group_name | default('rgws') }}" + - "{{ rbdmirror_group_name | default('rbdmirrors') }}" + - "{{ mgr_group_name | default('mgrs') }}" + + vars: + containerized_deployment: true + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Import ceph-handler role + ansible.builtin.import_role: + name: ceph-handler + + - name: Import ceph-exporter role + ansible.builtin.import_role: + name: ceph-exporter + +- name: Final task + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ mgr_group_name|default('mgrs') }}" + - "{{ osd_group_name|default('osds') }}" + - "{{ mds_group_name|default('mdss') }}" + - "{{ rgw_group_name|default('rgws') }}" + vars: + containerized_deployment: true + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + # NOTE: changed from file module to raw find command for performance reasons + # The file module has to run checks on current ownership of all directories and files. This is unnecessary + # as in this case we know we want all owned by ceph user + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + changed_when: false diff --git a/infrastructure-playbooks/take-over-existing-cluster.yml b/infrastructure-playbooks/take-over-existing-cluster.yml new file mode 100644 index 0000000..228e86a --- /dev/null +++ b/infrastructure-playbooks/take-over-existing-cluster.yml @@ -0,0 +1,73 @@ +--- +# NOTE (leseb): +# The playbook aims to takeover a cluster that was not configured with +# ceph-ansible. +# +# The procedure is as follows: +# +# 1. Install Ansible and add your monitors and osds hosts in it. For more detailed information you can read the [Ceph Ansible Wiki](https://github.com/ceph/ceph-ansible/wiki) +# 2. Set `generate_fsid: false` in `group_vars` +# 3. Get your current cluster fsid with `ceph fsid` and set `fsid` accordingly in `group_vars` +# 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`. +# 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`. + +- name: Fetch keys + hosts: mons + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-fetch-keys role + ansible.builtin.import_role: + name: ceph-fetch-keys + +- name: Take over existing cluster + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + become: true + tasks: + - name: Import ceph-defaults role + ansible.builtin.import_role: + name: ceph-defaults + + post_tasks: + - name: Get the name of the existing ceph cluster + ansible.builtin.shell: | + set -o pipefail; + basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1) + changed_when: false + register: cluster_name + + - name: Run stat module on Ceph configuration file + ansible.builtin.stat: + path: "/etc/ceph/{{ cluster_name.stdout }}.conf" + register: ceph_conf_stat + + # Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format + - name: Make a backup of original Ceph configuration file + ansible.builtin.copy: + src: "/etc/ceph/{{ cluster_name.stdout }}.conf" + dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak" + remote_src: true + owner: "{{ ceph_conf_stat.stat.pw_name }}" + group: "{{ ceph_conf_stat.stat.gr_name }}" + mode: "{{ ceph_conf_stat.stat.mode }}" + + - name: Generate ceph configuration file + openstack.config_template.config_template: + src: "roles/ceph-config/templates/ceph.conf.j2" + dest: "/etc/ceph/{{ cluster_name.stdout }}.conf" + owner: "{{ ceph_conf_stat.stat.pw_name }}" + group: "{{ ceph_conf_stat.stat.gr_name }}" + mode: "{{ ceph_conf_stat.stat.mode }}" + config_overrides: "{{ ceph_conf_overrides }}" + config_type: ini diff --git a/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml b/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml new file mode 100644 index 0000000..d7bf4e3 --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml @@ -0,0 +1,39 @@ +--- +# This playbook was made to automate Ceph servers maintenance +# Typical use case: hardware change +# By running this playbook you will set the 'noout' flag on your +# cluster, which means that OSD **can't** be marked as out +# of the CRUSH map, but they will be marked as down. +# Basically we tell the cluster to don't move any data since +# the operation won't last for too long. + +- hosts: + gather_facts: false + + tasks: + + - name: Set the noout flag + ansible.builtin.command: ceph osd set noout + delegate_to: + + - name: Turn off the server + ansible.builtin.command: poweroff + + - name: Wait for the server to go down + local_action: + module: wait_for + host: + port: 22 + state: stopped + + - name: Wait for the server to come up + local_action: + module: wait_for + host: + port: 22 + delay: 10 + timeout: 3600 + + - name: Unset the noout flag + ansible.builtin.command: ceph osd unset noout + delegate_to: diff --git a/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml new file mode 100644 index 0000000..e43e8eb --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml @@ -0,0 +1,552 @@ +--- +# This playbook was meant to upgrade a node from Ubuntu to RHEL. +# We are performing a set of actions prior to reboot the node. +# The node reboots via PXE and gets its new operating system. +# This playbook only works for monitors and OSDs. +# Note that some of the checks are ugly: +# ie: the when migration_completed.stat.exists +# can be improved with includes, however I wanted to keep a single file... +# + +- hosts: mons + serial: 1 + sudo: true + + vars: + backup_dir: /tmp/ + + tasks: + + - name: Check if the node has be migrated already + ansible.builtin.stat: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed + register: migration_completed + failed_when: false + + - name: Check for failed run + ansible.builtin.stat: > + path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + register: mon_archive_leftover + + - fail: msg="Looks like an archive is already there, please remove it!" + when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True + + - name: Compress the store as much as possible + ansible.builtin.command: ceph tell mon.{{ ansible_facts['hostname'] }} compact + when: migration_completed.stat.exists == False + + - name: Check if sysvinit + ansible.builtin.stat: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit + register: monsysvinit + changed_when: false + + - name: Check if upstart + ansible.builtin.stat: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart + register: monupstart + changed_when: false + + - name: Check if init does what it is supposed to do (Sysvinit) + ansible.builtin.shell: > + ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null + register: ceph_status_sysvinit + changed_when: false + + # can't complete the condition since the previous taks never ran... + - fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" + when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True + + - name: Check if init does what it is supposed to do (upstart) + ansible.builtin.shell: > + ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null + register: ceph_status_upstart + changed_when: false + + - fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" + when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True + + - name: Restart the Monitor after compaction (Upstart) + service: > + name=ceph-mon + state=restarted + args=id={{ ansible_facts['hostname'] }} + when: monupstart.stat.exists == True and migration_completed.stat.exists == False + + - name: Restart the Monitor after compaction (Sysvinit) + service: > + name=ceph + state=restarted + args=mon + when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False + + - name: Wait for the monitor to be up again + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + port: 6789 + timeout: 10 + when: migration_completed.stat.exists == False + + - name: Stop the monitor (Upstart) + service: > + name=ceph-mon + state=stopped + args=id={{ ansible_facts['hostname'] }} + when: monupstart.stat.exists == True and migration_completed.stat.exists == False + + - name: Stop the monitor (Sysvinit) + service: > + name=ceph + state=stopped + args=mon + when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False + + - name: Wait for the monitor to be down + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + port: 6789 + timeout: 10 + state: stopped + when: migration_completed.stat.exists == False + + - name: Create a backup directory + file: > + path={{ backup_dir }}/monitors-backups + state=directory + owner=root + group=root + mode=0644 + delegate_to: "{{ item }}" + with_items: "{{ groups.backup[0] }}" + when: migration_completed.stat.exists == False + + # NOTE (leseb): should we convert upstart to sysvinit here already? + - name: Archive monitor stores + ansible.builtin.shell: > + tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar + chdir=/var/lib/ceph/ + creates={{ ansible_facts['hostname'] }}.tar + when: migration_completed.stat.exists == False + + - name: Scp the Monitor store + fetch: > + src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar + flat=yes + when: migration_completed.stat.exists == False + + - name: Reboot the server + ansible.builtin.command: reboot + when: migration_completed.stat.exists == False + + - name: Wait for the server to come up + local_action: + module: wait_for + port: 22 + delay: 10 + timeout: 3600 + when: migration_completed.stat.exists == False + + - name: Wait a bit more to be sure that the server is ready + pause: seconds=20 + when: migration_completed.stat.exists == False + + - name: Check if sysvinit + ansible.builtin.stat: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit + register: monsysvinit + changed_when: false + + - name: Check if upstart + ansible.builtin.stat: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart + register: monupstart + changed_when: false + + - name: Make sure the monitor is stopped (Upstart) + service: > + name=ceph-mon + state=stopped + args=id={{ ansible_facts['hostname'] }} + when: monupstart.stat.exists == True and migration_completed.stat.exists == False + + - name: Make sure the monitor is stopped (Sysvinit) + service: > + name=ceph + state=stopped + args=mon + when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False + + # NOTE (leseb): 'creates' was added in Ansible 1.6 + - name: Copy and unarchive the monitor store + unarchive: > + src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar + dest=/var/lib/ceph/ + copy=yes + mode=0600 + creates=etc/ceph/ceph.conf + when: migration_completed.stat.exists == False + + - name: Copy keys and configs + ansible.builtin.shell: > + cp etc/ceph/* /etc/ceph/ + chdir=/var/lib/ceph/ + when: migration_completed.stat.exists == False + + - name: Configure RHEL7 for sysvinit + ansible.builtin.shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; + when: migration_completed.stat.exists == False + + # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary + # so we directly call sysvinit + - name: Start the monitor + service: > + name=ceph + state=started + args=mon + when: migration_completed.stat.exists == False + + - name: Wait for the Monitor to be up again + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + port: 6789 + timeout: 10 + when: migration_completed.stat.exists == False + + - name: Waiting for the monitor to join the quorum... + ansible.builtin.shell: > + ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }} + register: result + until: result.rc == 0 + retries: 5 + delay: 10 + delegate_to: "{{ item }}" + with_items: "{{ groups.backup[0] }}" + when: migration_completed.stat.exists == False + + - name: Done moving to the next monitor + file: > + path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed + state=touch + owner=root + group=root + mode=0600 + when: migration_completed.stat.exists == False + +- hosts: osds + serial: 1 + sudo: true + + vars: + backup_dir: /tmp/ + + tasks: + - name: Check if the node has be migrated already + ansible.builtin.stat: > + path=/var/lib/ceph/migration_completed + register: migration_completed + failed_when: false + + - name: Check for failed run + ansible.builtin.stat: > + path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + register: osd_archive_leftover + + - fail: msg="Looks like an archive is already there, please remove it!" + when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True + + - name: Check if init does what it is supposed to do (Sysvinit) + ansible.builtin.shell: > + ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null + register: ceph_status_sysvinit + changed_when: false + + # can't complete the condition since the previous taks never ran... + - fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" + when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True + + - name: Check if init does what it is supposed to do (upstart) + ansible.builtin.shell: > + ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]" + register: ceph_status_upstart + changed_when: false + + - fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" + when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True + + - name: Set the noout flag + ansible.builtin.command: ceph osd set noout + delegate_to: "{{ item }}" + with_items: "{{ groups[mon_group_name][0] }}" + when: migration_completed.stat.exists == False + + - name: Check if sysvinit + ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/sysvinit + register: osdsysvinit + failed_when: false + changed_when: false + + - name: Check if upstart + ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/upstart + register: osdupstart + failed_when: false + changed_when: false + + - name: Archive ceph configs + ansible.builtin.shell: > + tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar + chdir=/var/lib/ceph/ + creates={{ ansible_facts['hostname'] }}.tar + when: migration_completed.stat.exists == False + + - name: Create backup directory + file: > + path={{ backup_dir }}/osds-backups + state=directory + owner=root + group=root + mode=0644 + delegate_to: "{{ item }}" + with_items: "{{ groups.backup[0] }}" + when: migration_completed.stat.exists == False + + - name: Scp OSDs dirs and configs + fetch: > + src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + dest={{ backup_dir }}/osds-backups/ + flat=yes + when: migration_completed.stat.exists == False + + - name: Collect OSD ports + ansible.builtin.shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq + register: osd_ports + when: migration_completed.stat.exists == False + + - name: Gracefully stop the OSDs (Upstart) + service: > + name=ceph-osd-all + state=stopped + when: osdupstart.rc == 0 and migration_completed.stat.exists == False + + - name: Gracefully stop the OSDs (Sysvinit) + service: > + name=ceph + state=stopped + args=mon + when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False + + - name: Wait for the OSDs to be down + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + port: {{ item }} + timeout: 10 + state: stopped + with_items: "{{ osd_ports.stdout_lines }}" + when: migration_completed.stat.exists == False + + - name: Configure RHEL with sysvinit + ansible.builtin.shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; + when: migration_completed.stat.exists == False + + - name: Reboot the server + ansible.builtin.command: reboot + when: migration_completed.stat.exists == False + + - name: Wait for the server to come up + local_action: + module: wait_for + port: 22 + delay: 10 + timeout: 3600 + when: migration_completed.stat.exists == False + + - name: Wait a bit to be sure that the server is ready for scp + pause: seconds=20 + when: migration_completed.stat.exists == False + + # NOTE (leseb): 'creates' was added in Ansible 1.6 + - name: Copy and unarchive the OSD configs + unarchive: > + src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar + dest=/var/lib/ceph/ + copy=yes + mode=0600 + creates=etc/ceph/ceph.conf + when: migration_completed.stat.exists == False + + - name: Copy keys and configs + ansible.builtin.shell: > + cp etc/ceph/* /etc/ceph/ + chdir=/var/lib/ceph/ + when: migration_completed.stat.exists == False + + # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary + # so we directly call sysvinit + - name: Start all the OSDs + service: > + name=ceph-osd-all + state=started + args=osd + when: migration_completed.stat.exists == False + + # NOTE (leseb): this is tricky unless this is set into the ceph.conf + # listened ports can be predicted, thus they will change after each restart +# - name: Wait for the OSDs to be up again +# local_action: > +# wait_for +# host={{ ansible_ssh_host | default(inventory_hostname) }} +# port={{ item }} +# timeout=30 +# with_items: +# - "{{ osd_ports.stdout_lines }}" + + - name: Waiting for clean PGs... + ansible.builtin.shell: > + test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')" + register: result + until: result.rc == 0 + retries: 10 + delay: 10 + delegate_to: "{{ item }}" + with_items: "{{ groups.backup[0] }}" + when: migration_completed.stat.exists == False + + - name: Done moving to the next OSD + file: > + path=/var/lib/ceph/migration_completed + state=touch + owner=root + group=root + mode=0600 + when: migration_completed.stat.exists == False + + - name: Unset the noout flag + ansible.builtin.command: ceph osd unset noout + delegate_to: "{{ item }}" + with_items: "{{ groups[mon_group_name][0] }}" + when: migration_completed.stat.exists == False + +- hosts: rgws + serial: 1 + sudo: true + + vars: + backup_dir: /tmp/ + + tasks: + - name: Check if the node has be migrated already + ansible.builtin.stat: > + path=/var/lib/ceph/radosgw/migration_completed + register: migration_completed + failed_when: false + + - name: Check for failed run + ansible.builtin.stat: > + path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + register: rgw_archive_leftover + + - fail: msg="Looks like an archive is already there, please remove it!" + when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True + + - name: Archive rados gateway configs + ansible.builtin.shell: > + tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar + chdir=/var/lib/ceph/ + creates={{ ansible_facts['hostname'] }}.tar + when: migration_completed.stat.exists == False + + - name: Create backup directory + file: > + path={{ backup_dir }}/rgws-backups + state=directory + owner=root + group=root + mode=0644 + delegate_to: "{{ item }}" + with_items: "{{ groups.backup[0] }}" + when: migration_completed.stat.exists == False + + - name: Scp RGWs dirs and configs + fetch: > + src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar + dest={{ backup_dir }}/rgws-backups/ + flat=yes + when: migration_completed.stat.exists == False + + - name: Gracefully stop the rados gateway + service: > + name={{ item }} + state=stopped + with_items: radosgw + when: migration_completed.stat.exists == False + + - name: Wait for radosgw to be down + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + path: /tmp/radosgw.sock + state: absent + timeout: 30 + when: migration_completed.stat.exists == False + + - name: Reboot the server + ansible.builtin.command: reboot + when: migration_completed.stat.exists == False + + - name: Wait for the server to come up + local_action: + module: wait_for + port: 22 + delay: 10 + timeout: 3600 + when: migration_completed.stat.exists == False + + - name: Wait a bit to be sure that the server is ready for scp + pause: seconds=20 + when: migration_completed.stat.exists == False + + # NOTE (leseb): 'creates' was added in Ansible 1.6 + - name: Copy and unarchive the OSD configs + unarchive: > + src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar + dest=/var/lib/ceph/ + copy=yes + mode=0600 + creates=etc/ceph/ceph.conf + when: migration_completed.stat.exists == False + + - name: Copy keys and configs + ansible.builtin.shell: > + {{ item }} + chdir=/var/lib/ceph/ + with_items: cp etc/ceph/* /etc/ceph/ + when: migration_completed.stat.exists == False + + - name: Start rados gateway + service: > + name={{ item }} + state=started + with_items: radosgw + when: migration_completed.stat.exists == False + + - name: Wait for radosgw to be up again + local_action: + module: wait_for + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + path: /tmp/radosgw.sock + state: present + timeout: 30 + when: migration_completed.stat.exists == False + + - name: Done moving to the next rados gateway + file: > + path=/var/lib/ceph/radosgw/migration_completed + state=touch + owner=root + group=root + mode=0600 + when: migration_completed.stat.exists == False diff --git a/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml new file mode 100644 index 0000000..3892991 --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml @@ -0,0 +1,97 @@ +--- +# This playbook will make custom partition layout for your osd hosts. +# You should define `devices` variable for every host. +# +# For example, in host_vars/hostname1 +# +# devices: +# - device_name: sdb +# partitions: +# - index: 1 +# size: 10G +# type: data +# - index: 2 +# size: 5G +# type: journal +# - device_name: sdc +# partitions: +# - index: 1 +# size: 10G +# type: data +# - index: 2 +# size: 5G +# type: journal +# +- vars: + osd_group_name: osds + journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106 + data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d + devices: [] + hosts: "{{ osd_group_name }}" + + tasks: + + - name: Load a variable file for devices partition + include_vars: "{{ item }}" + with_first_found: + - files: + - "host_vars/{{ ansible_facts['hostname'] }}.yml" + - "host_vars/default.yml" + skip: true + + - name: Exit playbook, if devices not defined + ansible.builtin.fail: + msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml" + when: devices is not defined + + - name: Install sgdisk(gdisk) + ansible.builtin.package: + name: gdisk + state: present + register: result + until: result is succeeded + + - name: Erase all previous partitions(dangerous!!!) + ansible.builtin.shell: sgdisk --zap-all -- /dev/{{item.device_name}} + with_items: "{{ devices }}" + + - name: Make osd partitions + ansible.builtin.shell: > + sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}" + "--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}" + --mbrtogpt -- /dev/{{item.0.device_name}} + with_subelements: + - "{{ devices }}" + - partitions + + - set_fact: + owner: 167 + group: 167 + when: ansible_facts['os_family'] == "RedHat" + + - set_fact: + owner: 64045 + group: 64045 + when: ansible_facts['os_family'] == "Debian" + + - name: Change partitions ownership + ansible.builtin.file: + path: "/dev/{{item.0.device_name}}{{item.1.index}}" + owner: "{{ owner | default('root')}}" + group: "{{ group | default('disk')}}" + with_subelements: + - "{{ devices }}" + - partitions + when: + item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$') + + - name: Change partitions ownership + ansible.builtin.file: + path: "/dev/{{item.0.device_name}}p{{item.1.index}}" + owner: "{{ owner | default('root')}}" + group: "{{ group | default('disk')}}" + with_subelements: + - "{{ devices }}" + - partitions + when: item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') +... diff --git a/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml new file mode 100644 index 0000000..a43a0e6 --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml @@ -0,0 +1,105 @@ +--- +# This playbook use to migrate activity osd(s) journal to SSD. +# +# You should define `osds_journal_devices` variable for host which osd(s) journal migrate to. +# +# For example in host_vars/hostname1.yml +# +# osds_journal_devices: +# - device_name: /dev/sdd +# partitions: +# - index: 1 +# size: 10G +# osd_id: 0 +# - index: 2 +# size: 10G +# osd_id: 1 +# - device_name: /dev/sdf +# partitions: +# - index: 1 +# size: 10G +# osd_id: 2 +# +# @param device_name: The full device path of new ssd. +# @param partitions: The custom partition layout of ssd. +# @param index: The index of this partition. +# @param size: The size of this partition. +# @param osd_id: Which osds's journal this partition for. +# +# ansible-playbook migrate-journal-to-ssd.yml +# The playbook will migrate osd(s) journal to ssd device which you define in host_vars. + +- vars: + osd_group_name: osds + journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106 + osds_journal_devices: [] + hosts: "{{ osd_group_name }}" + serial: 1 + tasks: + + - name: Get osd(s) if directory stat + ansible.builtin.stat: + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" + register: osds_dir_stat + with_subelements: + - "{{ osds_journal_devices }}" + - partitions + + - name: Exit playbook osd(s) is not on this host + ansible.builtin.fail: + msg: exit playbook osd(s) is not on this host + with_items: + osds_dir_stat.results + when: osds_dir_stat is defined and item.stat.exists == false + + - name: Install sgdisk(gdisk) + ansible.builtin.package: + name: gdisk + state: present + register: result + until: result is succeeded + when: osds_journal_devices is defined + + - name: Generate uuid for osds journal + ansible.builtin.command: uuidgen + register: osds + with_subelements: + - "{{ osds_journal_devices }}" + - partitions + + - name: Make osd partitions on ssd + ansible.builtin.shell: > + sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" + --typecode={{ item.item[1].index }}:{{ journal_typecode }} + --partition-guid={{ item.item[1].index }}:{{ item.stdout }} + --mbrtogpt -- {{ item.item[0].device_name }} + with_items: "{{ osds.results }}" + + - name: Stop osd(s) service + ansible.builtin.service: + name: "ceph-osd@{{ item.item[1].osd_id }}" + state: stopped + with_items: "{{ osds.results }}" + + - name: Flush osd(s) journal + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }} + with_items: "{{ osds.results }}" + when: osds_journal_devices is defined + + - name: Update osd(s) journal soft link + ansible.builtin.command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal + with_items: "{{ osds.results }}" + + - name: Update osd(s) journal uuid + ansible.builtin.command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid + with_items: "{{ osds.results }}" + + - name: Initialize osd(s) new journal + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} + with_items: "{{ osds.results }}" + + - name: Start osd(s) service + ansible.builtin.service: + name: "ceph-osd@{{ item.item[1].osd_id }}" + state: started + with_items: "{{ osds.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml new file mode 100644 index 0000000..d4840a6 --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml @@ -0,0 +1,11 @@ +--- +# Nukes a multisite config +- hosts: rgws + become: true + tasks: + - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml + + handlers: + # Ansible 2.1.0 bug will ignore included handlers without this + - name: Import_tasks roles/ceph-rgw/handlers/main.yml + import_tasks: roles/ceph-rgw/handlers/main.yml diff --git a/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml new file mode 100644 index 0000000..9178a7d --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml @@ -0,0 +1,115 @@ +--- +# This playbook use to recover Ceph OSDs after ssd journal failure. +# You will also realise that it’s really simple to bring your +# OSDs back to life after replacing your faulty SSD with a new one. +# +# You should define `dev_ssds` variable for host which changes ssds after +# failure. +# +# For example in host_vars/hostname1.yml +# +# dev_ssds: +# - device_name: /dev/sdd +# partitions: +# - index: 1 +# size: 10G +# osd_id: 0 +# - index: 2 +# size: 10G +# osd_id: 1 +# - device_name: /dev/sdf +# partitions: +# - index: 1 +# size: 10G +# osd_id: 2 +# +# @param device_name: The full device path of new ssd +# @param partitions: The custom partition layout of new ssd +# @param index: The index of this partition +# @param size: The size of this partition +# @param osd_id: Which osds's journal this partition for. +# +# ansible-playbook recover-osds-after-ssd-journal-failure.yml +# Prompts for select which host to recover, defaults to null, +# doesn't select host the recover ssd. Input the hostname +# which to recover osds after ssd journal failure +# +# ansible-playbook -e target_host=hostname \ +# recover-osds-after-ssd-journal-failure.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- hosts: localhost + gather_facts: false + vars_prompt: + - name: target_host # noqa: name[casing] + prompt: please enter the target hostname which to recover osds after ssd journal failure + private: false + tasks: + - add_host: + name: "{{ target_host }}" + groups: dynamically_created_hosts + +- hosts: dynamically_created_hosts + vars: + journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106 + dev_ssds: [] + + tasks: + - fail: msg="please define dev_ssds variable" + when: dev_ssds|length <= 0 + + - name: Get osd(s) if directory stat + ansible.builtin.stat: + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" + register: osds_dir_stat + with_subelements: + - "{{ dev_ssds }}" + - partitions + + - name: Exit playbook osd(s) is not on this host + ansible.builtin.fail: + msg: exit playbook osds is not no this host + with_items: + osds_dir_stat.results + when: + - osds_dir_stat is defined | bool + - item.stat.exists == false + + - name: Install sgdisk(gdisk) + ansible.builtin.package: + name: gdisk + state: present + register: result + until: result is succeeded + + - name: Get osd(s) journal uuid + ansible.builtin.command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" + register: osds_uuid + with_subelements: + - "{{ dev_ssds }}" + - partitions + + - name: Make partitions on new ssd + ansible.builtin.shell: > + sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" + --typecode={{ item.item[1].index }}:{{ journal_typecode }} + --partition-guid={{ item.item[1].index }}:{{ item.stdout }} + --mbrtogpt -- {{ item.item[0].device_name }} + with_items: "{{ osds_uuid.results }}" + + - name: Stop osd(s) service + ansible.builtin.service: + name: "ceph-osd@{{ item.item[1].osd_id }}" + state: stopped + with_items: "{{ osds_uuid.results }}" + + - name: Reinitialize osd(s) journal in new ssd + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} + with_items: "{{ osds_uuid.results }}" + + - name: Start osd(s) service + ansible.builtin.service: + name: "ceph-osd@{{ item.item[1].osd_id }}" + state: started + with_items: "{{ osds_uuid.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/replace-osd.yml b/infrastructure-playbooks/untested-by-ci/replace-osd.yml new file mode 100644 index 0000000..adeba39 --- /dev/null +++ b/infrastructure-playbooks/untested-by-ci/replace-osd.yml @@ -0,0 +1,190 @@ +--- +# This playbook replaces Ceph OSDs. +# It can replace any number of OSD(s) from the cluster and ALL THEIR DATA +# +# When disks fail, or if an admnistrator wants to reprovision OSDs with a new backend, +# for instance, for switching from FileStore to BlueStore, OSDs need to be replaced. +# Unlike Removing the OSD, replaced OSD’s id and CRUSH map entry need to be keep intact after the OSD is destroyed for replacement. +# +# Use it like this: +# ansible-playbook replace-osd.yml -e osd_to_replace=0,2,6 +# Prompts for confirmation to replace, defaults to no and +# doesn't replace the osd(s). yes replaces the osd(s). +# +# ansible-playbook -e ireallymeanit=yes|no replace-osd.yml +# Overrides the prompt using -e option. Can be used in +# automation scripts to avoid interactive prompt. + +- name: Gather facts and check the init system + + hosts: + - "{{ mon_group_name|default('mons') }}" + - "{{ osd_group_name|default('osds') }}" + + become: true + tasks: + - ansible.builtin.debug: msg="gather facts on all Ceph hosts for following reference" + +- name: Confirm whether user really meant to replace osd(s) + hosts: localhost + become: true + vars_prompt: + - name: ireallymeanit # noqa: name[casing] + prompt: Are you sure you want to replace the osd(s)? + default: 'no' + private: false + vars: + mon_group_name: mons + osd_group_name: osds + pre_tasks: + - name: Exit playbook, if user did not mean to replace the osd(s) + ansible.builtin.fail: + msg: "Exiting replace-osd playbook, no osd(s) was/were replaced.. + To replace the osd(s), either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' + + - name: Exit playbook, if no osd(s) was/were given + ansible.builtin.fail: + msg: "osd_to_replace must be declared + Exiting replace-osd playbook, no OSD(s) was/were replaced. + On the command line when invoking the playbook, you can use + -e osd_to_replace=0,1,2,3 argument." + when: osd_to_replace is not defined + + tasks: + - ansible.builtin.import_role: + name: ceph-defaults + + post_tasks: + - name: Set_fact container_exec_cmd build docker exec command (containerized) + ansible.builtin.set_fact: + container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" + when: containerized_deployment | bool + + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + register: ceph_health + until: ceph_health.stdout.find("HEALTH") > -1 + delegate_to: "{{ groups[mon_group_name][0] }}" + retries: 5 + delay: 2 + + - name: Find the host(s) where the osd(s) is/are running on + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" + with_items: "{{ osd_to_replace.split(',') }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + register: find_osd_hosts + + - name: Set_fact osd_hosts + ansible.builtin.set_fact: + osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}" + with_items: "{{ find_osd_hosts.results }}" + + - name: Check if ceph admin key exists on the osd nodes + ansible.builtin.stat: + path: "/etc/ceph/{{ cluster }}.client.admin.keyring" + register: ceph_admin_key + with_items: "{{ osd_hosts }}" + delegate_to: "{{ item }}" + failed_when: false + when: not containerized_deployment | bool + + - name: Fail when admin key is not present + ansible.builtin.fail: + msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done." + with_items: "{{ ceph_admin_key.results }}" + when: + - not containerized_deployment | bool + - item.stat.exists == false + + # NOTE(leseb): using '>' is the only way I could have the command working + - name: Find osd device based on the id + ansible.builtin.shell: > + docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}' + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace.split(',') }}" + register: osd_to_replace_disks + delegate_to: "{{ item.0 }}" + when: containerized_deployment | bool + + - name: Zapping osd(s) - container + ansible.builtin.shell: > + docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + zap {{ item.1 }} + run_once: true + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + delegate_to: "{{ item.0 }}" + when: containerized_deployment | bool + + - name: Zapping osd(s) - non container + ansible.builtin.command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }} + run_once: true + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + delegate_to: "{{ item.0 }}" + when: not containerized_deployment | bool + + - name: Destroying osd(s) + ansible.builtin.command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap + run_once: true + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace.split(',') }}" + delegate_to: "{{ item.0 }}" + when: not containerized_deployment | bool + + - name: Replace osd(s) - prepare - non container + ansible.builtin.command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen) + run_once: true + delegate_to: "{{ item.0 }}" + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + - "{{ osd_to_replace.split(',') }}" + + - name: Replace osd(s) - prepare - container + ansible.builtin.shell: > + docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + prepare {{ item.1 }} + run_once: true + delegate_to: "{{ item.0 }}" + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + + - name: Replace osd(s) - activate - non container + ansible.builtin.command: ceph-disk activate {{ item.1 }}1 + run_once: true + delegate_to: "{{ item.0 }}" + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + + - name: Replace osd(s) - activate - container + ansible.builtin.shell: > + docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + activate {{ item.1 }}1 + run_once: true + delegate_to: "{{ item.0 }}" + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_replace_disks.results }}" + + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Show ceph osd tree + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" + delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/infrastructure-playbooks/vars/lv_vars.yaml.sample b/infrastructure-playbooks/vars/lv_vars.yaml.sample new file mode 100644 index 0000000..790d277 --- /dev/null +++ b/infrastructure-playbooks/vars/lv_vars.yaml.sample @@ -0,0 +1,57 @@ +# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs. +# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run. +# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml. +# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data. +# +## CHANGE THESE VARS ## +# +# The NVMe device and the hdd devices must be raw and not have any GPT, FS, or RAID signatures. +# GPT, FS, & RAID signatures should be removed from a device prior to running the lv-create.yml playbook. +# +# Having leftover signatures can result in ansible errors that say "device $device_name excluded by a filter" after running the lv-create.yml playbook. +# This can be done by running `wipefs -a $device_name`. + +# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case. +nvme_device: dummy + +# Path of hdd devices designated for LV creation. +hdd_devices: + - /dev/sdd + - /dev/sde + - /dev/sdf + - /dev/sdg + - /dev/sdh + +# Per the lvol module documentation, "size" and "journal_size" is the size of the logical volume, according to lvcreate(8) --size. +# This is by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit. +# For further reading and examples see: https://docs.ansible.com/ansible/2.6/modules/lvol_module.html + +# Suggested journal size is 5500 +journal_size: 5500 + +# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others +nvme_device_lvs: + - lv_name: "ceph-bucket-index-1" + size: 100%FREE + journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}" + +## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ## + +# the path to where to save the logfile for lv-create.yml +logfile_path: ./lv-create.log + +# all hdd's have to be the same size and the LVs on them are dedicated for OSD data +hdd_lv_size: 100%FREE + +# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book. +# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs. +nvme_device_basename: "{{ nvme_device.split('/')[-1] }}" + +# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device". +nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}" + +hdd_vg_prefix: "ceph-hdd-vg" +hdd_lv_prefix: "ceph-hdd-lv" +hdd_journal_prefix: "ceph-journal" + +# Journals are created on NVMe device diff --git a/library/__init__.py b/library/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/library/__pycache__/ceph_config.cpython-314.pyc b/library/__pycache__/ceph_config.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95c9d90d080539387e034880e90d7b1cf1631da GIT binary patch literal 6677 zcmb_AT~HjynLRtRf3v%=>@EwEfE@%#UfC=NOTv*I*%%>6IV{wdl=AQEW7Pk*cZdz6s>!jD>W*@H)WFesCEiOh|XtDM1I zbr_DTyuruFEx~ZYykpdL)or+AmC2HX4oWZ_o(J zPT3{9Wlw^Wy|UQH8%?s$2+4jUEC-B;+$=Y=kSu33!>EysqA&`wQH$KvLX1{9)I#Ji z%mj^Nw~ot^m);~K1KhNcTgN)dIh*(xoZH2I61f#{kxURF7(M)G!;^HjlWR$hyv(-~ zjf+SBgjnLd*{ICwg>1=CQ(CTApoZzmXp5Sz74l|7k!pD(HCM`~4fu|8W?(3Ry%x-Uw)YA_JV2CYH}tw5RBk>#R76;L^>=|wfI&1q@|Sd~s&LOfkZeiWa`e@2~C zN?GHgbQ%9!n2rDTAwSRD0AZUeAYrzP#qMg;^!bzHP%q z5k85QLD)h&E8gY1Kp{>$f#4rE{2-c=M}L5>OcJE%Ogbk)!-v~+D9I=IbM{J%%-}B8 zWXPTgN79)jlR|B7|L-K8aHz?S8lI#e^Mm%Tq6;v14}rpG(p7`L4QJ9jMep=t(lhCM z60(*Vlin90GeUZDS?+G~U~8s3ZAIFT1k9P|c4<>E8EWMacCVkohI4IO1 z#N;tMI%wMD3njzkRZ4ZNR8C>Ta3`7H0i(;Qee-Hg%WHj;wD7)~HuOGbgo>4c`?A_B zRp=6%v{PTAxMYgJY05g8o(8Pb@YA!${VE2~L^FWN= z_pJ9+V*S-NsT%9~^@qRwa68t&5$pf>*kgfx6Zr=A&d3aYOk>lIpo{ezN{q$z| z%=fRAUthlQl`pV5{EN|l9{uUq{cy$CT@kt;!FeQo&=~4w_IpMOn3<3ijyAJI-8hag zGU0DBGX8*(p_v|KWX{KoY`*B3R+H|h8kxs3GTHt?~Cz&VIhJQ_H*45*-u%+%E?^GT{>aPM0WE^YE7yo3%P(QB$P%&^>E~Vy2WUzLyxG6!UA`VY+jwu0v2^CJ`4b zn-LJ|uBW#b(q&FDOyPYUs%#evH}p90MNeSYh8^-o+o6MXW>(CYjo)w694h@<>4wuhl;PQBD7h?R8JdlIF9l62rauzl8zv`@&X}6KFQ1E1(bl@ zk_4|n^8njM#voe5I``N*cVNAXtve2`mmD$DMZBZ~u5c_dxOIrAPyw0(mNB#w&YAcK zGRD5Qnm$X#mdwz{bpbkz(Ux4Bm|QJ$NLexd0st%{bB#c?1pt zz^Ba1LC|avolSc#$lk&@gV!=Cov=+ zYBd^@fv2r9tUzHKJnc+kBtO)!z^>CFj!(~E;lSE7H{F#)e(fY$<5%X2?(t!uSYWsz zM1W2LbIt6GU{REtk3Q#|?Ttm>c=uGGD2{gm4j01#+#!xS*C205tQa0^l=J`{wq+iq zW2UPPBPKo5ze>82ZaG@>DR`=iK#XdILfoKJkPii8si>Ns+KvB;mFYr&gPws70`Vmr zzX~1H?F$7ht-`~QeP1#KOto=-ABGO$CO3=TrpvluQVB62>85~>E~b-}^;TLRuC2if zSOD$-aP5VcE_|La!}hBaJXDSY=IKC~~rIAWOu2TGQzN@fYK$qDJk z2U@YH<>wObyuqS%zfMM-QKlhQoCB9&(t!b^^zDqQ`fFk+q)n?yX?oPPk?5=K9UgUYV@Co!Sb&XMIm? zg$H&*(Y2BFk*(0cPE+&R`StT#O=l0xMOxR?b@h|vR^;+tqo+mO4UzWFYP93NbKQM^ zsnUG55*gTQ@i(~2S9UvyujyXzO7FIKVnaN!HnJ(6gjy*OzISWo)^?y{BhaySXEX3z z`AXFvx;MTuzU^<{@VBqcZ~9M_M~;Gf5IwaSeXe}{D|cYq-3tFJ?$)ZOVcXNX;c2Z# zj#b0W)nEww#$YuR!Eq?E+eEy__K4Rj?nOyM)H--OaB?GXax>5khc|@FZ#-x{z3$p- zo!k(b@wx{L|KiNQoLRf^)4uZ9P6Td(AHTdAd9^$aC$BdD;Ge$qwXWU#^*g_O=U2DZ zbK8lbjl|GqVtA{4xEi{=9U9vRjs3@P<*j!&L#dVF2hy4K3!k_u({o$WJn*;t`s$5p z^!UmfRd3_!`Jcq9zShrt5)ir!y9c*Jmo`F|KG8QrV=Kd7g<>ni2zKr#9dP816kk8} z@#2=0+DEhe#si^ob#Z;58fjUNR$F6VI~~qWR?nE8R0?e&m5Or{@e`()f>&;+o3J3_ z(W)@QQ4F^%#+U&Bhd1kVR?+oT$~5@mLZpsq}{0^@G%rBWUpfs=y2Ieu9F$q}?<9WrR;}V^N z1#lOl{|35Up5wT$MZyhn8|1ZWFj5ZuH#xWK;JLTCF9l^&P#$>*7yKjf?YfAwWzAU? z{L4$rA5?|V>e#MZh&U=Fxa;q363T(yYh1HmD0_AjL=2Z*d#)hYvv;Zyx^6Gm{y(h( z7yRD_C`Wf+a}dCCcZ)yvpMvLyS03_2IR59|Q5W%q_Xx*Me(Q7LEu`&j?k_sl5%F~(r94aNf=+!%X1><%#Te&x+9 zHaM>K+Ni>bB6HWO$5p-7RZpr@=_D$(lB@ocwEg4$skCcw*i5OCQ`1)W9|@e2rvLQ! z-ps&)Npe-6*f(!xe((2wU+?$vhTjSW1q3bf!QW{AybGa!CpS*>*n`zY7!=WMBr(0H zhcTEQm*MJR4Ymp0;tV%DclCODyoR?4?IuucFW=)cd`)D`^#+W9qZaNX^gars8aj$eweY4+ibB8RR=pHEeFLEs zP~M1c?bwG-*n~F0dd>7FqFaK;hM;t?y?%?PJ?P364SkQ@gES_-<5y&6oHfJBsGds~ z3^k!;@;Pjnd`g?tbS;-PLwT%ajl@_Xn>64)&X~cDtgelw)xKP+kXGS=drUEubiWzN zsD_e)abi-%Wb!n7wC$+r(GA5Y=%zQ1)k#ggYXw`Vwgr;HzCnu%^!KO8#q*7|?jJW?;+o}D6 zP(5D|p|y(Vr^JG;WyeKhLKP=+x-qF>5q_Ei!VnMU^pr(vBArVrWZ{E!;sy_5lG34z zqlM%h)zC#Hn-cSBC8_EJXGYYr6gSq!CyW;f%~5XjGZxl#K{r%v-4Oa-AecNPtyLTX zdI=R`(wZb@m5ka((GUQvzERLXglEJtC9SLL&r|A{Qb-$?7U)xlvEOLb7v&+Qw4@=9 z;arBYsbweCtdYZ0;+U3J+r(Z&Oy;tNqGk1U8VG$v+*C>#02gy(qN*e(r~({;Svt)q z`R$l8aKYlqMy_IQ8kk*Q;u{1vtZJ}07IRq4<_sW2(?$4G96Bvjj#%5MpeL1dK?SIF z{DcKX*R}DinsP9u)V!Ka5nIY-fs?xqk5i+$+#L}Yvif$Me2segb6HhT+zi<}RJEY7 zTy{(wFJRC+**sO|E>`WS&6XtYzklChMSSpJ|7MNTx3?*7 zc1yh~PN?_PWFnbK+0b`E&Iy84hYbj1jN@D(uiqplM287tu@36H6DsU!%2)z}2TEAn zif@V_y|ggRHUpDx+b5C>4?plM+$wlYaO4srxnywD5+^gVOJ>{nyj$iD5=dyN2I5v_ zZUE%DwKK1+(4FDDIx#FI-f!;8(!a5T8bNcUsG;qLC8`zu;a4lOr4PD_?7? zy%$Xum^9s#8{$g!d|ET&9@Fc*7xcoYjm`8}j>Gg@*Mu>ZSIyuiL7OZ{RVSgVg*4GT zePkk+QICwP87-?F8Nj*M)uf>xA!M{FmLors)<(e_PSJ;!4{OU$nSpVe8=@!-(GhjQ zPd^7;5v>Tw7q0Np3J-md_O%s01W#6Ke11={^NSeb>nfq$OQC(`(7uIG^W)_6P<)21 z_(R1@tAGO~U8J|07p_!Ce+_b1*diKPm%ofo*yG!-)3uWafm8s|k&8 z&hzgv*SJe4)kvTrG`i}Mj3Vp7vO9o~qw{gV)rc^2Z~MjKbhk~c#RvW^^wswBDxNxr=6RhQ(0Q zP+1WTq3#_By-~B7Oo<`+kF7K?2${KuitZ+K8MZD}ovRt0WFfy{F%hrH5x>>{9*Vn6 zSKD_?jyf9hrbWa%34re`G4*?S+-@GyN%rt!>}oqp7)liEz#W|CAEZy{2X{zZ~2{dkj_d3xSxZQJ>80)`Ac=#-Uk!Ym<19Tv- zH6HQ)nbW`jVD^ngZ^JSlEQL&o#SOx>e|!CI$>09W-%eqV z&YxQ3&#Z7^caQ6Hmw&HN;&-hzpvaEdiNym$i{a}f{`!~8{<<~900f=VyA&EIhXzX5 z4-+~)E3Ns`MTjdc%VODlI ze$|e`uI;Sd@m(^zDGIYP8mL|mRv}Gho!xL3-r}yjXSj1%J`ROaoS%+D#3p$i)CROH zfD}@|A((}U&%_bZ(fz_1SXHO_)>g+NUe}=(a#5Qw{gALh5<(+>+>6hUS!CDZdeVVY zLZ%BRA<`ENlLcGRiPe$d49&8W(70;Y#RFLgUjP6De)?Bzyd8h(H(bX(tes&3Nb<-6xgKM;U6sGh zR6EwZ=kh%(#>ieUAn)`k!Ky9w>f+M!T2^_dV60@vAqONi<9gA#DfJ;7P$&O745*W5 zrA_V}BzyoQ?2=v5CTe6<6$b$WymwVoD_6(KW<-)8w%Ll1#Qfc+JDY>*$K*(MZF0oR z4t3)upaR^SlVg&1n0R%(4?3vUNkNf-jpDdEJ7NSat|7D6bVE0fTeWTilHf!pH>oCU zp(N7qdLx)_n)EI#pp)GV4zUv3GdI2zYbnKAW-e61(b?{I-<-SuVffG!W4`<6Z$7>M z;gL&b{qE1)sP5or9u%pwY}vL3z+vSRRIU}cA##;a9weLR20feQ2=WDgOw#x*@}u&# z&RO6cTP<5~R$HN~rF{sr;g-1S8M3QNTi_#x+F9^yqw}e!r~WQo>b~*esT&pXz>k8T`M^-V2%yN$zY2a7 zZTKYI@Z`|*#*^jn$r68(+D`wp07pQr7852%jMikn4>_Yp$&+fGRzPvT<=KhwsTeFD ze}&v$ht3SsL#|vww`B~9$fT&zTcV>oiReNnuC}FRj7EX=K+yP2BEusvmdtW;Q z>jGrG=U}C{$BM-_VNAm??jWP{r0ayvE|WLHaL_uIup+sRZv)IcfjbIa5v}qFB+FLn z_bk=N%k}Xm%?tHy4+Ar_i>u-^C!N2iYb{vVnN4gcq1 z!NCkXgg5jr^CKh~p-uKUyjDF6ZSySbJd?e$Z=l(kzj2kK>kz`XybpqWVz1a1pO36t ziDLn=4Xn-x&XRV(2c7T{Mvk0kI7r4owg;TI8&?8%1UWDgNi*wBRyS}rEll7{g9Lb! zcTVk1uYFE$a^!e@2#OK{fEDQa0h(k$n1(B2U=YrNkOCCQQF?+L%$QO8V~n0iPN>N{ zkgTQDEt5<#%`>V(?W3A*Welq6=~yH6dq9AD9BfPaUshCv-#-t~Wq> zrW~g%Hc4%`5s$2df<`NHmSS=sl@!ieOR742i_wX*BJpZdvua2#17A*X5di)ke)>n? zIf`gGxNCXGuH{(6occt4Dlf(^EZ6Ovo1Ets>rO83+WWX`zU%!Hi@SQ(yj;BiXS#d# ztp&XyPqBNY7KQ5`WgcecdY^|{iWe)fT|e&oVc+9J&tt8{zU6Ss&k9d&Ew!F2x1L*Q zJ-=|U^L=?Ce5Kgqz#VxWJ67!b)E}P3k82nG2Z|S#gLO;6z2)HErQrT@aQ{N^K(Twx zgLWQ#Joz-Z(9m7x>S%fD_03#+>+bC2QvJ*DSE_#*%G@Qsq0Bdw_8foe`h{NN8y5M& z6%XPC0tqWF#SWBX2ObYD#NrQqpVT$Zjn4MX_yBq2@RN&ArxwCDiano(YGz-4Co`v% zLruktAA3WUhP{6t`bp^Vy@iJF%y2Wq4}+C(-BMVDzfxFS7F(Z;Er_p{1AAu9&kla- zk1qKe%l^hCzgYH*3;zAZ3zb0R-M(Vir@@*^AiNCdTi&^|90<>H@AQ(pm)?0DO6^Lx z_R;ji>A9AL@V-ssNPQ((vlbPoup1m+D67v1x9z}fiEp2ue8#tb!U;=U>=_p;b9-ke z7rDJO#WMK>)M1b|C*n+hyulO_@Rb(!qMArxk~Clv88E4&Ete4laQHx*OvABRB4O^b zPygGjQ>;Y6(9(Jvd|TC$bZ41+w*^Ql^r$0epPl)8=?7!$yllrNR2z^6&|pS7`UiW@ zU+GHpbq#e~=osoSgBPxLUhnJbAL9(~t zB}vMtl@sAGF%d8-cm15Z!UblgX6{tD+S$u1UM}V;p~y<0sfH^CSI#qhfGhHl>k84L zXU!8~!k@S7g#QUxJ6+4{Svz!qiGAMBc$ht(*PdpYRwfw)$fg(BogZ=hA9O>a$nE-a n<*FC)Q8xe5$gi&7qYzReMaON^Ng@&Ao?EBH=#eP^symH@8r2)l7aSAOY2tb-i=5s)VO5b44qS`etP5TTI;k%~F=SfWv&M;2oX zaT?E|Z*eYhQC>*U#DYSVg(OWbq-ZL~>C$3`X7s2Y)8o2a6!e6yOp7$DCuxtKqP@CG z`}7_?J%p+P?bkCyhz>j+)U($gA!LJ>qv-L#JLp=+sT}B=2tFEmtj3U?aWVZ( z`xnvOTMqi6co8{5zW483OI~CdbKP^RKE;MpYq*3m*><)a&vEN4-5^e#8XJDyqHxa( zOfA(tXT6H=xwc=$d5Ig4&5s1(Z10!e7(MeR$W)7Zo?K z>&*ttb+d*;jEjCl7bkKkXq-`VH#EA1&-%)>7PZ9Ii5Df07h^3y4@0vzW-7p@5*tr7V@q#`F$qApfMT`GzVX)W5-UPO9qS*S>r zNJY@2Gg8Mwv=W^_`S|XLatBfiP3lpoi8J|CSiF-ECh6^ zi+lch(9V)cHa*e<`dQ|rtzl-En;52u=z2Db2Ia#fUOzyKHB4gGJSIYQ5g7#YF%}D3 zl?=i)P)2$=Ho(=2Ff}|raW7rs6~JyyZ@D!*y@_j1-I-n??nl_7-ZW2J!3^inbk$iW zCfNyy=R3dH*kLl*4rT$#f#NcJya#YJQCmTofzNB7)eh6S{dDekLU^G++-^ZAX> zZX6~@_miW0m4oDDbN(->-e>cB(=U^koAYm!!9V3@TBBMkcd0e?{o}ZpyxyDxU8zj- zM@OI-S^Vfi5)fVqM{AJzCgNuZYbW(_is`Ui^vF!7*3Qu(?=u}4$a05=uaPd^;FZ|P ze%A{Bp6Xoe5xyLNhu7~p0K2j-!+L@f{t`1Co*Y`^X$xTG1nW`|TH*1XgYI%_2)fXS ztf}3fPw3)Wrc0}!HC|5vJQQ`M8_Ts$d`}=AQYM>geJ5P8hvkQ#R3duv)JHu9m98=o z#@_9Ggh_O#ftlQ$T1?{gk{8JY=pd7Duy|*-#QBv zVq4hym@hfZpr@d5w#@n_>@(O@w(Ha28xz7J#9~pH%CJO5T!((Jag~~RA*l9zoQ!ho zB1n(|cjjVn<$`?W5o(jmKv~Kwgf{^+q+?O0oH3pVmMw2gSkf^2sRMm0*4-y0!S(Ui zn4ruF&tF~+?2+$rDpwr#+=*p!h10IW;qlb+j^jYa;KHODY;E!bKp%33-@(yDuhk2$ z2QR$t8Q6R8`FpQ=uDtFa-owxFmz7uj^T$zfNNLOH;ycI5SUT3c-OeI8eHb5x|CjOM z7U1&mzC7IOA8GXs@c&G<)!X07_QKiDpv1^Ak`u~tKS~cCs$=`=*r7VUuZ|z6lg-<$ zbYJt%aSRPj9SpwPTz)e=`9;X zL<66hY#2Ne!^jIu`5~ql@G?VTyBh}K-9&i95I#$T@?a{k!#jypH9gNT*oDrNDTYJK z@TpVvitDE5SYe9B-pXK^!8?vy5AqO9zS9J;f-Velgp1v_g3zf@3WX*UVWf1h1ut6OL1Rypa= z+mgRVWOk)yq?Ly0{b)YP*Y-*)Z9Yb$>5rB66GRw1HE6n}?bT}kJV|C}H2bsXRFxfv zWa#YPgv+gS&pr3tbIkblRIT60)~=PzJTCLa@lnI=<=#!T5X z+Z3y@J>)K@*#T~wc1$@nXAkK_EOy#8<<{I&9?di5)x1+a&DVo)ZaSz1jo%?HB-jOq z;1pa*MsN$BAy#V=yjocBX%WG%H4805pq1nptp!FwV-$r^$QZQ>O|3+06T;B93lZo$ z?sf{zBex03!f9gUZbvT}wwUjMeUBMGJh|(4kZcGs41U<@!0UW=k*g`0oMXF)%*3Pr zg;e6K9uyZ;C0Ejb@ljh9c0*x!{*3x-n@vI)oOPZJ! zH8Fi(qPXEIdn$QKcc_}El~mnXq|$v^dZ7E%QV}_mvgw6~x*Lg=3bOW4{8x?;Pa=`< zm=s6(j8t4s!+u4oEXp~_<58DkT{V;_Hhn zZqi~lD`iLdzSGH(K98r7h=+%k_HLo9EP_R z@dYt2B@Hr&K&2m*WQxo!iaAxP!?V(&SjuUpAPgw3d#fE0kyjRZ4R*F9@uvMWS~9o8 zOL`m1(~JB7T%;-$v;kgL`I0JSPa3e0^IyDy|0&{fo;V$hjOyGAC)$ z_woL!tTM}roRQQ~{!X30ll84&Bo`KyfxEAZ);f$ap=zL3QW~}Kt%Qu%D(00^K|@ia z>hp2|PHv1c%ca7~Nxmpi-lY2``M*5F%3JL?Tgop0L2I{!v4?61N?EukWi;N9--5)e z#hk1GH}wO*)Uo=V=ih3}aV1{_cfx0stXx>)tt}T7%1dHqnYZ=@cL2v!!1zGp^1M>u zv-0AiMB$?AEa3g~{I%;7>YGGSXDdTX>M|JleYnqbCMT+v!QtW~Y)p_)%pihdPRvMo z*qGN8Sb$_yl$Z!Ykuvh4oB=Zgs{G(`Han!M*?679mqIcx#v_(Aa5<)&68z;7Xs1{L z%NkptvQqklKfffJqxZplmqjfJ-t3}h^-jR^(C;&Ek&LsOPez@enCg4fUtymUVVX z(pLS61ZYxT)GpjpAwsc=n*DRLC8-2TDq-P2=5(i8S}=kTC(Kq3(rK)Z!C`w1h0fk7^35>LLE{`tU~2wl`j$_|6mEv~Lew z*zUPliSd=5{>s36FPyA*q&yCYzF_&o=dhGykhNN?OaCcAOz9_WMSF^wt4qIzO6qw; zh+H~B$O9W3cqwI{YrIVhS+LDD?qWf2Ee*pOY{M4y9&%-X5I5;2DH{xomJMT8@)zi` zFIUbZ??w|i!4CRi2Myr_M_oT$DHrI6b2aQ4w```1G??r=cPliNeN& zMzCv!4w4nStVwxQcZkKJ1lfqg)Fk8b<8ImsNV*T=6Rq8kZU+n?bXXx)q=&lwzL+aX zGzN=QH*}yJs2&iTyO2)6wBs5j8VBG5#Kr3_6YHkeE|e!K(Z2oY;7)Y#*U@(#dDhs9 z%lEnWk$3&_&t`u*yLo1}Ir(eX(C=B&`h)*;`f8KYPhH*LuOGArHhZ&m{?K3ptrPU4 z(Kc-YTenTFE_>59?I8OJGt}0jwMVzn{2@`(cA!o>pnGXImRL+5R2R!h!1)Q{4MAs$ zrD^^6+WEK9e)DvVdW|}Y+WY(qgIW?i%{8XT43VNO#ekMy1TVv;*f-gNzzDXQHiIW3 zbCzZUp?Z6h4Wzi527{H^YdXIKT*SzgQ9^DBDYhRdTzS7{7ofwO?kMI;OL9SH5;~KP z`)CqSbY?|o@;Z~%nVimKbZ`7obi`cvh`GoS zbInK0wHz@QJz}o)h`F{S=Gq(PI^e!+jZ))Ck<44SM+jsd4#5RC5)?v7$U&eU@2uTz zk%5@V)pevkn5##jI*17Nx{ZS$dFxg6s0cpi?N_<#dvX7VRqgdv_IZp(MoTeur~_yj zuOYQL^LNc(Wx-)XjArKiug>EaNH`|M;B5nX_}6vTl28ws57e;d>Z2NjkU4ZIhg$9B zqUdb6MWotYv#X*@uod0&7`2U7U*UmVz-_#PY#^`mEj5f3Gmr7vXt$1~JVH z0o*hF1^pewfndt}F)^sX8lZw%p3&woS{-V=B;~J5FnH^~N0uOdxKr-B)PzGcFc~=1 zfJU%JBjpl!j0Fw5zIlbCVMVX8qVcpC)Qy(1r5r|6d)Beo)+R<1CG;r8K9H6>^Q7P}GXbTcrgF)-_95Kdb`IBf=D-3-^Sl&uu+TIUuNB?rgyL)8MMBRs%i8LCID zS1@>QTb42__=+a!-rAwl@}dsOF%&t=VqpoM;ouc5tCZk`@LQr3vb2nGdU)VtG5NJa zCZ>sTo9ySNCiQ*p+ud_KaH+<^`r{aogTG_x9kat9yQ_)kBBH>kXFU&(Ia#Uhky=Ch8KIUq__k<`L~Rm;U)^c)_K1+Hic$)oZEGr$}^`B5!PuOo!Rhk)6~ zBp$(GMx&TlP{^f-k);?Z1jzTL5ZuPAqEQdJ;3Y-UC`{L1`=;4YmzkG7EuAh=(^tQEO=$yT8T`xi_U( zKOVh{u#cgGsc~MFZ6zu1;BUyruEXMw&y=twZrEN2MI|6Fq?8N3L+nqmr68{6s1SgrQ z6K6f7rF}!%lJ=U1t6l{72oP-9YI)+>3yxR)2nrByaD8%fa?kTlHHd&v1E2{3;RZki z0nH7776e2a0IdjUBh76aH@9x?Mb1>)5zs**ts9eDlY8OQ)lLM&8Zf#L&`rAgHcMZX z_PQ=qdl0~r7{7V*tDAeB=c>I3=)pPz|F*NO;tK4$+IC!R zmFD(Jqy_&6LzQrIB@~9f8YJ%a7sTcEfcOH@eSi0kzkA=`x8v{I^&c-^I)GcCo8hM> zZ;|q4xZ2i%-RNNX+P6;My8EfK{UA2D75(SVCpWg|SDv2A?Z)yuuJ$$CdZf}3TMJga z{`IE6JA2UG_vO7W?rlZ>L0P-@ZMb(Qd}_`4jW>MI)%)c~UwpI`-0d1!n^$j2H#Q8A)<-i3{K>7akBZI%42}!@_^N@VyV7aI zNT(sBd;pWP@tE#O!=FbqsIAgziknjoI}FA~MA7}Q2L8%nRAcG1-eJ{FNz1>KG&!dx z7erOg7;aAQej`FgH2#F56iknXG0}8u=&p3{wDE_VNlF!}M_-}*a-oE4mJmHOHhX>g zgR7J2naLYt6Js~VbpOP)@tZS~vp2?WOkbPTU6Z%RX6CL=Ue_JMQyJS7%! zrUX$~C>MM;0|^`wsO!AR%tiVs;9;e%{v&i%mSLDbb`xfd*&**&Ld|9We~{s-&CQJb z(Z<@(Jtx?O2^0D)@m5{L-nwD0aK5#NYbzBlyndzX;M1& literal 0 HcmV?d00001 diff --git a/library/__pycache__/ceph_key.cpython-314.pyc b/library/__pycache__/ceph_key.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..687c0276e453a30da58dc9c640f90dc55693bb9e GIT binary patch literal 20192 zcmcJ1YjB&_mDu;;{eBQ20g@p3@gY7WJ|vkIC0UY9@gY(aAA%@QwkaVX0g|vt0RBFZ zl;Fr=vY8EJI}x?b#BkSMb9T2a)7_bJJDIGuyEAdN>8v}`A0R=Q$QRB;Gj(U%Uu7h9 zlDhrTbME&5NJ0{wOs~z0``zcc=bm%!x#ym%BNnrPf?)gk--Q3Km!keFen>$hCZ2qQ zrl=+A2IZyusXjVR_bK9vK4n~4MO|0L)sU~~*Ys)Q+A69XYbpD6efqe*&k#5C8RN!2 zQ`}UA`Ko?v+)93JaU1!y$L-{|C|*Q<9dQTwb;h0Kw>VzxReLpFtykAdd-YyJn=)SF zHO5_DQ{3$}$4gr&Z^bYjFY{VlRJ`14h1lb@L0sXj^x9q2EFG_eR1ry4LCQf=45XYS zRSl_PlB$7J2}#v@UC=|F*A4L@Zz;s}-ZF?AyyXx#UT^YxPF|y^AYjfzU2m+W+C@ZF z!5nMJA46R?Yp9?NYmm7s`z`x%QbAq%AWXfYtf0blQ`P^#UYeA=H88uIJUcdBAU$+?OtdVOn7(@vDfnYEcJk8V`Z#_|CFyyy0Fc4buTVG=)*-!vGZDxX@ z*$}3fKqSb_$6(eWu9Y|3L2W7kS`HTo8@ZqNVK>M>cucl73Lh^xaedUMhyaO#{v`K*)Z^+Nbop6421?G z#2;X|5O5A~0LR4G=t3A+E)R%3L(a|u?<@vx15iPs8ce>G8Jh`D&MUXeUAX#Gb@%>}TQe8mE zbA}ZsAan%Xq8KKk6QxzI;~b~w*a-3QM!}7g=UY92;3oVhTNdc^uC^m8M=SWIfR_!9<{hbd*BRM%)Xp#Z~4ic%4wE!4X(3sE}Dum~IZfhE;g2uOxyqAU}M#!(^(?Fk`~T?|-I=937N zBXV*}i!hAN0Kei^HW5O>F*QGn%))Tdxey=-dJ4puXaop-F%XFp@hdHy)H5gJ7WJz;*an2$hHTPCro5l74+Ls2vIFenpx(q=1Hrklpk7Qwqj4_I24aZU z0&4`zMAjh^3ZZ1hMH&KqDGW-4Ab%&K3was=uEiK)fn$Qv#fY?*;rzMVpmxj=A`zJq zhvXP(VS~)XZBjZH1)hzvtqfBOYx;KJG}H6)(PO)oTOc3M?NDSP84V9zdYxhCBg_;V zodYzBeL(wQ9&|6HfZPfQ^2v`XLf~MuFe>a0qKHIHK}%+F2GmKAp==oRQ{2CyR=%;S@!MVu7!1=`NwczEmXKRp-K@dV$fLLf4n4OI- zGVJ0lD47dFftYi+hPlH)7GNFnj-EJr6f^d&<35lkXG6~z0s4<}s6z`Q9;APRB(wK| z3dqaDvJN+6U2vge;&4iUW+J5vX`OFjaOfON{_o6^zoIDoAjTu@O!Ufb82=xFs80_!zAr3Bi`-WKG z(Kp%$ug%B2i|q@iE?zzXL`DF7E)IxTW@_3N0&LrCctVhg@wr%A%>gHeQy{v=Gp1AX z&O1aVzFRhNO-n6X!pz@bi=a&|Dy7c(km4~G#GnP|n-KpO_wNj?CrxdNO+P2u2Yu}%_ z7r7hBSSsIB?kFMiE6xhtf5&!$`e|Kfh4SYf8sbUFl7k*nu!^cD{~Z!b)VN$QBCm{` zMO9NbY|`q*sBxql5=Qb?SIVmfy0}4)WB(+K6JsSNN92@Hf^p6eC-r}W8aGSjKA8^M zMcRnb;})rO1Wa{%Lp3#KYtGA*dW|W3idw2kktj7>5RGk2Ks2TZrwc(a!59PPB(2ld#8w zHBbo)&ueBvfgsSBM(CacWK$p}SU3FgR!PD03#DuWgUP>d%$U2DdZE&mxnjdy zk(&BsHe)`r)ceS2UmAP@9a59|i;AcuOaD_cRx1U_p`XWSHmdhLMJ+-fZ2LIQg9M*a zLhg+`p)la0P$k92Bjz)V}H&b;jCT6}dVC-4vy#Dxoecq%u*H zTSye`U@Ocsln_Ip;VU!B#uhYX#)0<|>XsI$1Pi1FWqgifs{tHd6S@@-MS@LgUhR+H zj)iy??lHU$V?U^eJbja=7dV_keZ9PZW_baz9(x>0x8aZTK?GcFpln4;eUEI;4-NMX zKQi4_CF!Rc%4AKx{r-vN%OAY*OOrEev@H*B8f&vA=e^;(!>O@U|E8%vtFzs+-nFLe zDa{wUs^3^Cd&N!>WpXYJvyIU8WIn@Tnylkrl0kiHBq9t!V15+OG0nIv&c{OSA`Qf} zgK96`3ZQ{s)^}*+7Wq$h2Ajs>2m8(xJFKRdugByajg7SuxjeW z@!YC4>{^@#u;~d$mszlT;~5D}-5{9_>|p>PVS}|J3Tca|D&%UaP&%d1dRW$Ur16(U}RniFr$nbyN|V6_H@hO;Ysn^&SKUWeR`MtKhV zttUYyfwCM40G0VAD#PmRD^UG^!XFogXo>o*fzr9Mb|$IWws^9pq9;n)dgv|+dq+*$ zUX#>hElwi)_2pTe321=Cj;zkIW7gUOx*uR?11v4+=j^AqIKM-^bFITlxdSE1g_ z>m==&?SRZC4cQ@bl+9}bu^5OgLTN0btI5P`1S2!)hV6ztwg;n^G0G9XA*ZRbC$Iq zPITm_lCbcyy=7z>+-_ND6mcO8B}`C{ub1vBB*#{v-pT8GCwx0POVrncZJ;!?5w7?D-Nnxaq= z^a{98!%@fr4ME{?Tp+A3?@1tY^cKMMj#_EbEOo<9r8Xm$e|TxNe$}0=Kb&nkit*v& z*<-H(`ijpZ`qW*!(I@f-bPPlQe?-Q>{i+~iG>AUjSXxyvtxxj;6eKYU zYl>^CYwDK}hct-M9ziRVv_>XD=z>%0-c?L6t7?>c_mOHuMDv) zCm;;eIPolIMnzL<%&~rGaSxG(d*>4M1XOu&4XH?D3ep&v`^zz;fYOL z2smQFag4-Clx&gNUVdF(zmYEC042ff1(fQdRxpWMM&=|syLmoqFpj>LmzcGia zlRHDa9t-`!`MDSz#fdvg;z)y>*MK9E4#lek95nCN{WS0B>KWL0q`zdF+0 zIoi{kXvhN?Ez5B81_0x2sCeKbGda!hG}B~ZQFB3?U?9k`NJA{1p?HmOUBw>34?7vA zf2e=7zw?rRpy%}~g0fbP)sehV9d;yazNKJ}8iqSEwrJW0N96&7|J5g3a5R?$lELj#mo}T5Ke7U0tH2)Z-mjxma z`6i|MD!uJ+eR%p$PNydS#j8o(j*ckmS#8ns%%-+7>nKkRY&ja!j>cqH)?NOo{v-XL z8Skr>>8!(<(*5Xc*5%&xw0z-e$-0?Otshxe=`Y-epJ=Jly2nb&+4R%|`efSj$^+k~ zuKP=^A$jtT?tH9-78!TzrndDb@BHHUKb`*h>3=%+^K)tMbUGZ#bVav1VjCSXJpC3Y zwZDe5LTA=$`@+g(ZLSY3_bsWnzpz#RMoksfg3Saw?|4xAv&O$`{H!B$df=gD;By#i zG;4G$4gc=x5z178CjywH&YZS%Zdv-)Eq$NcGgxg)H@cx4{Xb|Cy-I(@xd1ocDd{eu zeoioeuK{64`3$%WBeZs)giF!z^;Ws zgi8yXvMzDhaDF}vY>%8uoV9`>JenfciK;o4pl!{-tH~ANS9%+e(X7I&xfr-b!FG>38jCszJCFG`7!G;fk0%1O`tUl)@nb{Y zB#0P4X}V7puuR}YP+fn>s$Ni2E&)h$=qRKcrfZyEY*tz4P48KY|gHe zOL@U4h}N7AaT)iNs5->WVNbpqbm7;A!I`&1C5)C=Zh#(*&+O6YHF?biT3Mi#>6xvp z&n&S$v*g(@*a7;^eCWN74$(#icNrokU^AxxB9qrC;U)%bUx+y&7uB~tK~f_KYZfT? z7Vlz6Ft`@#rO@W>Es^_HeT`NwxfKYH1CQmFTWP*Vt5Ug@_G`2%lUwP&MyqnUmHunA z^2n_WU!zrp+zRZ12gYHg+{y%2kn?2g00jZmbd{`Rg4zoztgM`St9(imes6%gx~?|LXE{%+ zz~y}$qs3b#}%?`GIKUM%HG$BUS4R2#Fy6+_VtI_B^hC3vR6L;%}p?Feqi zd&1Zo((GeSZ(Xc-952gA7|A={$a!#Yyf^rYeNMTLl0n6&F;)Vr(2c9mqYQ#92fRPO z*Q&jJf^5)BYbwtIB%y~I)yG_eecnSpJy~T~PgrHOw|>`F>mGDLUPFGKZ`WqLxBd&@ zD>Q4p&n?f|<#WM-u(2Md;nPX*_RzsUL+IdOQ#xKP&Co}W*GlQJQJ=wA;xqYdvSnS| z4HjXpv9efs%oD2^ErYexNWD(O(FGqtzX z;eD+@JqP&DlHThict>!8dqOqk%W-wl{KF43`*1#1bdmoJkzLpMu&XERGeUycoMMHbbaL)Tkt^=F1G9 zL^~h?VRpw0%`RvDk>=*BfYeI|r6E|u7p}9{6f^hGgHK^U&=5`=KzRNTX-H;<7_HgY z(=i!O0;ky|50*k05+VCHH$ct;gnt1LY7s(^dT^ov7}39ggb`_bl``Nr=R-5nk(Vtd zy$v^HTHh`IVwK)@ybt!iylZc&%>VM9$p4DDU%&+g_9kyf-riKQ3){G9l0c-9tzA4_ z6vB{9Lj=>_le?(o6jI49o_`@PjKg7G2yk`hkw%PFd5dBuUlkbqehPaYGY)%>SWE3= zM0mu*Isuzf7cppcmbXYNsDSpib?W22;2@<@eANA^NOz;t|M!5yEWW6Fnjmw zSgo&i_X>~T4cMOGtC7Y5jW2I+@X1DpUA+7Ioq?)l4deiPIq*O<6E^_e>j-B8T!7t9 z33Q3*N4lh(+^=?ZWV30 zu~~RN+hh<-LoZ{;YT_A7e18OI2*i`%0MU&pTvB!vBABEZyfNnIU;x#@%=3L zgUy5{Z~D=JoOgoL5wS3ltD|VdPdqr1*b9y)jNFPTluqokMoXA#2w*k%Sg$7GsUiN$ zHE6aXS!l8b6oXGxu$8BpwBXQ0-qi)sF)@gjTKEh1S}fWb$t>adu@dZWyc(PV;(>(u zoja}Yzi1R=-$rQS7;zY}7|lbJc)2Uzfo={S#NtCF`TZbyv_U{cNacrSiB|>$Ya6k! zqt|ul78v@QncLBMhIAtKz%&T1jt%6AOar>jH3)WZ1Uykgt`4xh;Npxo`T#QX&e1yz zJQ;cC*qv7P2UxM3l#34<_tZ!nC)H=cKPPxw07Q6^gVulS8(-|8ZyCO@B5#O;t%)lI z+Q20PjJSUe!W5Xp;2L^Z@I@zR$$8WuZr(Y5Cs83!M{t?JhfczXI3xBR&P0VT1$eq8 zp=%QtCsDM^=-CDp+7h0r0N61J*y5WR@lgge;$ZcI@Nk(;H1^81h!In^#x}#P2P0TL zncJcGtHdiJp)ec36#_S4b{V7l82t%GxEci01W5M#@X0IZxaov55{m0UZ2cXKa2EsS1uceRzXuVoyfrt=t8NG8X2DNy3A0J%A3^HJ;F&9A z;aAfk_PbcDfru}|TbaFlMtBqE9t*Gm@Z%i8+T_Z{kjJ2UAjG&I?~33xKD=ZRoOUD8 zMOKIPr!c}h8$$h}MLEpDTQqV7%wEMJ0(}Tl;L*jQIhnnTAC(+9f=qG#faE^T`NLCi ztur+hVgc2}H;Nr1&EOsH%}|^-g6qgkl#3H5#0Bj7b&Mhyp_N(i>_VLJ8sX}U$X#B; zf=5<3#_N#?$Yh%8d86RxB}g3HN}w2E`#mL)u*4d8sRPf`QJMtUcu;T((~9oN;2Mkk z%IksT;GZA>{^sqHe_GDUZj!SAiM(04^Qtyt5vb(ly z$JTAf*6RPZz1k&ea3Ql=|aJJ(9`w1*SE}->*mVTt&F*GsW)qPeOPh7V#{8$Zm(H6 zld&INx>&Gnp>eIn_wL-ivt?!0t;|YU#@f8pmvxqXICy_>%Xw(sd1xh?ah_NjggL;K zo~>Z0KsfG#3l=+A`Oyn`<)Wx}{#YbaXXjN*b4j z!IQ&U3`jBA0E0IBQvW|29a+zjEzj|F&+)a6jHffLE6uuUlBR8U*{9l%w4a(jGOdiK zU+T`dd+wSaxtXN-5zN6}|4>&C-P(%oE#6(k$)`rQtTh|fnylM%xBro~EEU|e*1&}8 zj;y}%u(o5}#yqM$yn5wdon32(&j*Je)^?|DO#UdN=_9XX+-H*gk35Hx`sMm{UFkm? zi<76b?y_Y6Hq(-|m3*vQajmJ==03Zg9-Yj%gWx_{Jw>mZDw5}ymCILBwU4mwuYorE zSM47UZQF{|C4-+YY}$NTt2@>C!MiKoKa3$QHMZ%fTXn7#|8>Q7dBvw4A9Z|s?xS<7O6zJdWN`;}Pr6_6grd6Wi}=$rpjdY|r4*^I701eCwxKDdNyR^c z3$=+byk$%&k4!o6VyP4`;Il1m>&U$LLJk1-P=5^1B)rAeuiR_WS zts}$hM~2fQqiLT%bL1P)(XDlNeQC9m#eetiyQ%P&wQqR{Yl>yf@+}~RY!w5k_6VSzurcel!z(8?Y=^U^ z;*|Y^p%u+)!zOcVZS=v_pIrZ3pT6q*FBTwy)v2Gn1x;3*D|gm{pLN50(t#KdK?h4e zq3CLE7b#fn@GfC7TYG-1wtv00|MR*`?XWa{ZM~G*4?xkD>DapI*jm$rtDB~-Y)M)2 z!vB7xw=6eq>dUr0P2lYQ!@ktmYWG^pXBFuYf5!O@fS_W626{~@l3mM=<;h3rNA*F6 zNVJBP4yfz=>{}W47*g!(yTB|dd+)*{d(nI6VT^Q9{X>2Iwxe{*acIMFDC20@a! zR8{W3L07=`j~3P^c-fSHgD%+^;7$(w5`Rn&HPI~U3hYN1HDct3h*!nu?}CrW$;eli%=_#E9PJ+-;&naO zItNEC^<3dK-k#B`-XV4x8<%3#fDzhVg(nslF@-8UIROZ#1ERvCzi6HwPsjo(02dpz zeZb=$D$$)0rgV<|9O{9(NpX)M0yia^{%s{ichc+BYgwCf$^74__8o?(axv!rL^_w$vQ|4(GG~4iy=Bn zA}8&9+-jgJ9+y=>>NT2tk0|M?$5R@bwgVIZKw?u_`b(AWk9&Wkq*P_U+X;aR=6Fh} j#A*C5oW_n4KELAZFzp|j&bz38bUDtqDF3mUhWP&h$1Cu} literal 0 HcmV?d00001 diff --git a/library/__pycache__/ceph_key_info.cpython-314.pyc b/library/__pycache__/ceph_key_info.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb0b81677afafda8ca2754f95924011f2c92999 GIT binary patch literal 6830 zcmbt3TWlNGm3PSDYdCyK)WfnKW7?L@#G>LyqO99EmTlS+qE{yht)gBSilU738pX>G=l>;#*c^1F#C;|5!oYqWuNR% zadJQwhTUdV4w^ApG~48m*`6YDe3~=ka)%u5A_dOufL4UHI^}2=F}vg#)ZKC$)JJah z$nEdW5t0W!ddaOL$H)bT>;b@foP8y7OZ1X_7*{ZA+~c7aDeNN?A8O=#?mnV%$2wE2W$X{Um3FMoWgaP*5k!`AR{B z-Qtp3QmF#FbH%(B)r@SeTrw362DPf1w>*oAsT8KHXi+tlJhZcSRf@o??z5@0me(*9 zvtn32ovL>=b=eY)ijGvM`Ru}q708u~#Y#ytSCkVTLWH5AA)&}nUzBpHemi?dUCC;t z#j+qM74vqPUX*6la#1Ns(^I2Al)isiF(%*2tCi{{K`>z2%;r@iM>QSUxhPF4C1pvK zt^%Y_B>;8WND6{OUslQI)%-;%aW3^vLJ(T_2@;f{*4=Ybj;aa*JtO7Sf{HCkDdnY# z4m_(yiftJ;r5uo^6pXS|&}{1vNpdzxC8em^q~HRo{D zU8Ynu^l~Y0;0B~ZwD92|vRT2?^eUPfUzARv1)h>LgP}dc1Y{pAX;~{290P?0at25Y zX015&0{K)mD->*6`osW}2h-In<^di67aI6qz~unZnsDsnfT&+U)?({uWs0<<-BnAC z!?KTd-_Z88iL%b!E^9f}xG4R|=KhRB*{vp)rtPD2*r?4?)~4u{Okvg*m?I3y;br+Y zPzsDv4ICYs0<3C1_sCDFNe5DEoBVfk)ub6!ASfXY9HlVC4*Nxhf;j=^-~fKZZQ87` zlPx8RD`@D61jBC|5!-nv93@`_O~HEUv|a#DkEVAR)LcOW7E)|E1N@>WCARq`jOdq^ zK=|Mnz;7W0@Wg-+3EU@#xlcsO*fL(Cz~O;uvFbh!1UBM4M!g_>-GuPZA*4dgPZ8ZK za~V!{W%v}Y`!e1ckm12H4?FB4yHi}ommwKf#+~uV{D=elg0pLYWW2{o(o;Py+&I*` zD=p5^azh08)-|+f@fgA$do7<)SzytWSLhOiM#pL>w|C9lW$@D#<}|0%X>jj#}#gm zcsy4*SSiM9)6W4U$?;e_rK#ar7+@OaA>(pSbGj$Pe}{HC(5?rx%b|W{*CE=4j4R1k zJA?_47=-h}zB&zowgE=k4i#iau7W{h(6o3wn-p?SlJqjQ>??$6fcE1qCVVs|lsz1T z0o)ouy`TsUdJ_8g;cJXS)s*2go@Y`MUsG!I7(mInplTFIvFvlP0F4?*AWE*EAVha% z$YC^y|8k%p>9o`dam-*eZy`nJ208(qmq$2YrD_r-gz)%JQYv>N-x zNWHyd?dp2_iKpU;Z+x&AtW^x(GghyzwXX+z*ZE#sb^xteAn!)mk>`1kVvWo~k2tzGFpl>KPs%0xMtB?BfqDO*g&6p=GXk83{%LL>rW|b8xlZ;y zU53aa{Dm46>)<7494eZeLqiUqLECl;*vp}-X3#AiC~~A>L%RDlc14@J_}ADKYwq&A z#;&&JF7IpXYH#lHWnA*WUjg5F@U^`p<38ocUO#5nr6g z-Z3Ie!z2^B!trpEoJY%MWv1C`j1V$|?(INn+)efHJo-Uanq!fSw?V_w+em}lJ%fJl zK)IQ2NI;LuNA%b{#xhoBFp3>0Ge-{$KsOW1M4K?$CS9{$y$v+sVfW}hCV8mslHumj z*Rpc+DtQ04n?JeGi0Y0emcj1nWBS|4V*tg=pa53TUn>-JsWy~!rY#e>;GC5P9Ss=& z51F=S+B4xZEt=KiE!Gg2Ki;4q6K+^w%UIJYTgKvzbr~TOoH^OpzyB-uG*^hG*fl=K`_7LN&u#bZoO^84fTJWl$Y3&OWzYwkKlPrzg&ZujCs56drG zVmR8sEYJ{wl&w<4q}9;XoYn5UX27e+Z8di%3t>#P;_$MNUDOKjWC8)MT*_jaN8beq z6oDU14-j-!OfZZIysssyy$NXn9tjed*|DKg*~Du*yt?F57D@V$6Q#5S?=SGclE*NK zzYrVy@(oj{UWxVj<^)Lr~gSb(SmWSp2miyCU!QxkxVj;;< z^bU_nGH&sCEoV|Zd6rjKs8Td&6gm`rq$RWnF`#~`nOV%L;Jpeiy{1-FEqAHBOws%- zuYK#XTxG-J@m>xYSXnEvOn^!V5$&7~v$Yh=NYkyrqM{WT$Yh5Vv@;G{H>Yv!UsMod z-WC#*%A2_3j165i;5CK4bWt;J=i9MH63^@a)o6*$p}a<|P_t(f51Ps|p&RHcG0}Oc z*qM)jiIbBp{Ro!+2EN8Gz=hSwPFv?rG`@E7(aA46x1yJKLb27#!^&1@XeZpZ=6U3K za`*S%^_k52jhkEHIcN4sX)Bc8iFK^K{pjs4Pi)2B-|6gm=y~G#^6t~=@>Zw57xJ}x zYai^jk?_%a=dpUPRPQ^s=i|fP+I2|IL+uYH?@z9s`h%FLrR%}igZJ*gw;k-=2==aJ zHiIW>*XrT;gW~<-c6eYTJn%5E8GfU7-Y||#I=vsZmb`Fe?5HpcY|C0@vp<}fItvy zSL*?xcExUC_m81iJ=#@|#PAao>)~i!3`4ya@iSiH#2?=Fb;D=f*IkFUzx%1bd&8et zd*`V?@yAfdcBp?N)W01{Y=jbs#P9{6{cdUCjZ*LS%LD?+2Wv<7-QgZhbku8K1y~Q;mf{j2O2i zza5k|g3@L%0n!(vtMB~7WG#){i;3R7GUD(B!1!E4}QAbBAH9;edpKxm!8~x>c8|gFKqMir+j>a?_a&U#rMCQ zKVy_-`7x)c!#&5qf;M_Uztc} zC)2Z|*G6YYtp9wuD$-$-EBPdr^~o;okyTe)|q&bO_8u7&-6`tiTm*kR5Q6#NDThdx`#jv#~J9TSSi377XZK*Y7VXd$RB4x4Sj>K)a z5sjyB7iX3$4!AJMlTacS?IZBWKu>&0n)IGF^f~gF+=&3WiONmw;AQ?!H zGjHGf-kW)E-kYB!WeL!cdhuuTpE3Y{V~g$(gURvNbkYJJ1C?6<^Bm^pLpU_gV}2a0 z3OG#pp#^b1f+OSLJd@%VqVq8vo0qUOAII@=#uFBjIH`tJQH`k40;k4QX@u#fp={nC5x!7>b zwK^;}4Yv-db{sx2v1WNolGuO^h;#~7wG5In9j)4IVBMrla|;@aBxOOY8#>mtM-VX+ zTm03+RU$fA$F4&nHiC~#_?RRd*Ji#zLtEP7hGwo>W7(pg&_e{|CaDY)mUmP-(6!xCTAfdmJiwNK6dD-?=7YPGRIik6o?1 z4unWyLIdzsb`c$<@;@GVO`0=hE5YBr6BEv4tvlYs(0pE^up&p{ZuhA zw3N?2%k^3a5GkwPY}(y@=T7|hg{PIAjhgft*P!d(|LE+t^BsI!p_-7giO_J0g2qS{{( zR9;PA4^EGEp6nz$VJd%iRK)HMAJYh;Cw5HDRARrNVg`-J1=45ze5C;?Rg&cQ+$-BghYbNOho@OQ-)tX^1`VU z%6g(fd7!;aVAYvKg0e}%Uj3s6WgTs}7A;JgW5X)R1kH(dw;HT|!a5H`L(KLtBO?rz zrB;vShUQZM#l;g>fzLVZa8!v581mq)V%u{ZZ7>$|al~FHL@>-MCZeq)-Es)O0a;I{r5u}ddwiq$7{!>Q zEZ>OKvj-Mf5=U->wr$JO8wLer4xQN?Vwr+2GV&uvE-^C6$P^>*P~xz>Wko$0m z7LAz=XqgRjW(hTa3#-_fVS$_0{U^mt-CRRD+Vq$`ExllG62YR62y~T#sti3(uWAb% z$s;%BTX#BXkVt=iWBbNo{K8)R!cJvBeyMf4oy>l2ZCi)Q@xA2uv)F#}$E~|< zDp%evA0|fk5~DjC`-!Pm>7{(`P#)Wp#}4I*J$YhZzSO$&a$stI;GNd}m(swq@Utho z<5$|F6FcSh_*6Uh^H&i*KHIuQJ%}a`BSZ9WH!^e-&9pPa?NqwmmujanY(kgW;n#i9 znAFLFp|Qik{N7;x`Q-j!VLS0O^hKr}AAk1KgZNZ?;QaR8zYdH%z1s!JwEOx!5DznZ z-UB(NCk9AV)7Tkm8ofx8MxS)pMNrcaE3k;wNEBv-T_GY_bJqEZUw+C*vQ`wgMcW|u}ig2^9L!t0Z?pwg< zaT0K;P85U(cfxHU@pSX)Cv749#jiUNAsgBSsZL^~UuemlSveuJqMb1yWm@9D#TicO z7#yGqTL=8`UxeuI?|jPxVdOuZk9ZKxyaK#$X7-p`neFg&`Co@c;Y+C~f)`?cafE*{ H%u)J3y!tsK literal 0 HcmV?d00001 diff --git a/library/__pycache__/ceph_osd_flag.cpython-314.pyc b/library/__pycache__/ceph_osd_flag.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dda3e2d6ccca7fef2f92248371e36abc146833c8 GIT binary patch literal 3415 zcmb7GO>7&-6`tiTcgf|CD3aD+Wo!KtD{L$ZX`I@X9m8rwS86R;Sj%kC%2}*-N7BaR zE;Bo8M+yS)RM@8&MGiXp=pK71gq-4=j}|0A>OhA8bq~EUu>lu7_029xNvT~V18MkX z_WjSB_su)mjEZ37p8VDMTNa^z@I`0HZQ{k(Ks3?ENEc?%v_OUF7>!MfR2)IKB^rnQ z*o-`_P-O)DjB|-IiRmOwPODU%PSMl|-;-vtG^@vTSy%K#Sjk}m|6YJu; z<#(CvQ45b8cSMUy&kv@T{DX!Lh9Jqm>OA}&@9|oH9X%o zmTTsU>aT*3VPoGSPJ_EVq0O4Exq`J1fls?}b4m;O+SU!zwy`~-o%%)j;wkm8n5w}T zd%dt1YW62W9@nhnh~g6eqk?bpF{W`w&0E&!Dh{3QI#Ukya3+F34IIK9q9nlGZEMwY zEbLEce;jqapfP$*gJ0Xb=SGwl(1=(vtvkz3tro2aUNUQ@YvIvzT}|9bI6wixmIrBw zm?2-mQbgJqH^!p%?NHFF1wO^(M+Ldi39c;^M*X?~m(7}g5TT8i&7el3(+rLBg7|TH zomtjKp=^BY(oxOvwZO;rxiHI7iD*8iez@+O{((O{7hE20z#GNyK}NN}1ubt-a&T%# zgF19<9sl6u+NPq^a{Jxb+f1q%P^jW4-DBF6Q+FtFw43H#xJG86UAFy*yb6`FYSJ=X zjw|Z=H)>R36Fgpx*Gad)AfZQY$)Umx8P%?66lNWc!fDiSSAqZkG>m@!x#mz@*S`2- z^w801QJ}C>^SnkUny7$7wlQAC2Suu`i>JZQc2^63GBCq+M=w@mywffUosWxrHyj6) z4t=zR0`-mt%V3f)wf1lO0Z7(A5iIpN|HrqWLyQv=hzq z%HdT+>#6f?e;dM2kV2(vTK;#>3Iwi&sTm z4w0w8ab>Zm!=Y#Oo^q@zl!f2FrRO?%Ysj;`M|gT)#{-e&I(iO%N86-*EVKmnqESV9 z{$o^?^?X&*MZIvLooztA;%R;&!2iQhl{*=^2em*xYa|yB>8mEHN>#2VL7(tLeWgAo z`jjjHv`KWWfmxz+znRj$(bt&7?^8)6yaHvvcDz!IWlXXX)Io#cH?YMDZ6pLJUB%WNjvkmi_TCjf5S0nFdEBPE0`Of* zg4fVlm>l5`i@P2dQ(dovJuG$TQnO?iW|-o7A*igULADDqoE`cdd@|uRks|zY#U$IY zAb$;$n023tE0{k?01l`F2@_rK9!c`hd3iJSP*!+duoMYgcy-k4rfZi}a)xsrO&2Ng zy_0<6B}dNk=`B9J4U^Abe!MS7gIyY5_3C(h1=k(d8J{QKC)lF?I4^)PGkOb+*PJC{ zlC^NN`xYrT)|ga>w-31l8gFvd??FdsqW#R!e&5i3@3D>dAD{mop$RAU5~2JjvHjw} z27Zh;t2@Q1{e0g><#A>6S5McT{bA|ZoVAm;Un)|++EUT+lP@z$R&HKvc$sJ{IKRfiO^7#EnS2l~A@>69e z`{@JmdAN7s+3EK-&u^Z6I=Ykf?~C6mnO$W7{+=lV`-#F{VQ?>(-|NZk(difG`(ZhvX3zx4R@c7ORn`hM(Tp(9e*>*)_!b8)lnZ9orY^Z%u<*3$;fI2n5ykz>06;<4c*D_dU zSV*2rcx8q47FHfh&4dr-3Lze0eMgiY$o!H!lAD~nIrHo56=SxtFgZ23Fv&7gH$J>I zTbWy!T$s5r#}bu~CTHibS8g&{uPofs=fY~?!SeQC7=JZumpXhumrU(~b#bWWR!gq+zFS1C;wGt@azY*V)()ZWy z-`SJ$4}aHEq+;wD%C*wNy;3vNx|~i+%|vS$sfDKez5I@#wg7rTV0}j%{JWI+)3xtJ sBn|z$bxTBv!b>DZW-h(}D;HZLEdS&4Q}{}ql+hEpcXCL4GAO|OUm~S*K>z>% literal 0 HcmV?d00001 diff --git a/library/__pycache__/ceph_pool.cpython-314.pyc b/library/__pycache__/ceph_pool.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab31b9335ed0ddc62496e8b5c94b514e73efdf0c GIT binary patch literal 19935 zcmcJ1TX0)Pme{>`KM8;U2)=nqiVsKxD2b#7SJYwk6-`)kMH|*$ z(S>za^kIDyev@4_hK=Oc6gH9H%5WtsXBDiHRXJ%^&1$-(VKb`@TUcG#%Id>aY$ID? zqvq(ajcsBLBv%b(Mv|+6oQdRW;cF$y)j`foa`lk2kX!@QvyxmRTVrZ{u8EFIMqwUQ9K}dF~VDQ`z1?D1Q&h48I2Kca`^7<%U)ViQz}*mx^IX{D^@Kg{M;wnREJ{x}PY8-o z*b|O~1SNofET(Rc*UNbanU<5zffkLXxRr*1$5GN2G3pH2nQ5N$AmlE_ z%gu3^V}L45BnU*{LQYM9^cxx^Ag1Rcp)kjbF9>r-Cs>G&gSkD+k+Cz`MvUJx&p8Pg ztiW?0M0_yVLFR#HF2v=_z1#y&WG*byjg(@$1NDd%nZN@k3_TP25UOKm_!K$}^AWK> zoYaGWzWRXafQb)r{%{B53o(%p=j|fhh?6W5kqa>%2D@Z@4;WvV@dmh%zaz{5e}%&N z@gXV!K1*M$CRX;$!8~{$iShE-uN^jsi( z=}b@027J?8Xpng)Pu#BjKp{sky_fGa&luU6ARj`opbx|wA&v|IxF9&^oAv;?7W2GP zN~pci&G+9!dYZ*a!#kIWy6R$`NX4h%)eW%vcZ1A?T@KM#fkbQy|nkl=yPv}cY(g>g8}3(he4 z)-dhb-A4Sh=vAGfOi!uS3*ByQw zJS0I-RAQ?xhVy}V;hB!?>IC`GfkLta^m-`5bK;l}gi|pHGLkF^caRS}@XZ|z!^v6B zPvjaI6mzbCNEee6Bw4sj7H%;Q9-M+5*(ECukOzatePC^OD=X2a`ABG%!L~%hGM-S# zH{<8LC>u}{;^Z6xqrw!p5oQOoP)ZC?fB9))IMCcoo(;GUgWNP)L?CF~f&1^nOV)0{ zsoTd?V3^!sb_4NG7M2~yMf!SzMD`OqDo>Gp(8?i&6l97T7q6DrtGT@1AawG)gpGwp zgCGfQ9Lz^#+Q1A71c19xpD{gM#oY}u{CzMt3X7~{opfbK0wOgpUPR%WLN8e}DC`M6%ubUdiwdy`xvVvlg`YQL3zCMgD%0}OM_|Tq^G;%Z zJ$cfSC#`5+!3rUkQ=yoQG2e`6;^JmSlm=rq2fe72DXv{amVDiOS!vCk6e_eR|5ns; z0fdtqM+g+e24wEY_eE(d!%XM)jJTUo!+$A7$zBaWCJMVp7tKmsGN&x4n1YEHA6ClB zoD!G9MY$v{sY}kv`?D~DN|&;gvMUxFH6yd!^g}cbJnQC1#h%|neKN7{z|QQ~+Y zNJ0M#GDPYtg`f;Y?vs6x3={-xSF*OZAOn+?7gU(KA99ZcLw-jDdoI{dd`0t0K<+CQ zUjsja4Cj)sg)Co(?|QHRgAv$=5%Z3(#}C!`K?-}WkP33`%xbWQcLew*uThg8w0rm|GS~Z&|(_ z>y0-gwcXK+-)O4h^7y0J_cQfOrl}oMrZdxVYFjJU4n#+`Dkz;HdhHbepr+-RW*^-B z!yHksu0Rr{isprK$ymByBGwDaVdf;P)CIEzvq<*qF3PD1DkfcBVil5R=S(HYaEw4m zq|Tzm;fMbZ7u60&ZVnVc^9Q~eK?<6CQ9d8=JCE9F9*JjH336f)66zP|PCyAVoKQk; zz8NYP$c;L>1xf^4e#47Kf!Gacgn0f(Y%>B$l-kx%y876qm76K;#puONl`fP0XUwR~ z8I>WUf{dPI^jlV~P7%GhRZVFPzjrJ;lF?Q~ukl9!&h899NP1wA-Dx12%qcHnc^NA& zVR=Ov%PSY1n#-WkikCUs1`Ox-z;54*GPigoXasFhRo)K0^JYvem{>8X!lVI{Mo0=| zYtNC*g48O4LU@nwhRQFofjA=My4qM@{76EUm``fY9+dA6Kq{0C--%Rhgj7X}pHQlt zLWC%ShEyZE{Lln46Lv!Tg0vUua}sFt%ToGOF89XjL=bO-#v%z&U6j)X*W6c1QI-(`<`qdn zbBLl0@yIF;QIhGg7dLb&^4b~rt^vIn>Vr&_+Nz}VrlnJaR=N~Z(xn$jy)U956A9&;gw&jJ3Pxw*%VdCuc^ z_`NoieA^y0RH(%Ry$*PJ5h zvaAB|C`t?eL37Gp{Xfu=;tB=%C5JVtEMhoO)GL9gNH_&sAe?FnT8q!5syC}=7i7Re zG5%*R^}7`DG>@Slc?wF!XQJ4?%9b@#F3kkmT4hhpH{F}HwT#pqT}0xfB9cp>7e4s! ztg;JvG5feA*xwA`R8Nh8_u8eqAsGX$p{5$4MNs!HRa`r$DPZrSrjQooLFUPhRMnqd zm?dNTWzY~bmbBF$qOB=tWYsPWt0~51!jOYd_|1*|G!%J30oDVJN?Wn&Vi@pM-41of z<*YWBpEBj(y_WulgV29JJacmtteir{O&(Wet~ARFz1XrD_vUFB*UtWu^ z=^*&B6=f8vz+NX~lRxW$SVu(CMEJr-*$s;f(ZQuY(ozFOG8YmQVujr_ub?K|6k=(2 zKre!6hg(KaX1(t>gsc`i0Tt~UVyp9L#|koZy9)BTfX5rcT>-=ihy}x=nigbW>xTpx zdQt`H9Ot*21%>Fc-c>p?(56g83le@Bwipp;&+)uK2L-8{pXQGtbYr>g1g0uf;t@%r zG($MC;?4k+W8o2Hy%DVoLW3+7`P)#vAH~-vkbt~W4Ub?lZR4={xy9*n&jb9%9gQM|V|Msmvee2KON!6ZMtGK>edt&*`m5=`O zyD!44ePho)So#1;Rx7T*d^25fb4mJ5XYYz@qwng=j?A?i6nf4%X;PFY-iSO7glW-)@;LTl_SxyZ_kdgpJ`}Xk}i#Z zt*YH@=uDXY_+5NkRn>k|UH_%#4>a+?_12!%)}FQ2zP0L8D<7w;F9L=-Al;@`zdVxE z)+c54zxnkYnlg;jAsx)j&#i{h)3Tr26_8G6g*Oya6be0}7XF+}TQ?O1k;}?&(m~if z8L3NG!P1Zw6vU*Zc#-86{G_b7l%6Xq)qv# z%3;Vv$i&fbO*0ahhrK6BRmgbDDXD={khmrt_C>99j8-!Otxe+b4n{Fy}2Uuqy4Y7M^)v*k0&hMpl z#}b#*y54Omt?t{*m#pi0Q@Y+vQ}dF1X?|5zpE)`F;#~UVn@RSq^vSoeFs-UjHaJ&R z-J6|`#C*E*Z1UWVbmz_Nr`Y7Gsx5Q!(u*hQlXpt1YmL$C`lOI}=7S_3zWY&Eeiui@O^ zmhU?Ez$2wrb}1BA1>Z~T(9-W`=p2X5n6MY%pnc&vWyQS4))4%_UkJP@Ewg@0#+&QK z3l21>yZczQ`@?V+aWK@}hq|ne>}-HL_v#DU>-VNTTLIq<`+PUT_kHHhDKDaulr`-Q zDNmOi4PQU1*1bo?Rsvr8^j1v4@jrsP`}JlfG+f%dH)vmCT9k5@lP;mbKGe?Bpn2cE z_JL5+*Y3RaPo2>szAx-8Su0zWb@ib1l(Dul=~=sI){NkZA9VA8iq5)u6k^1O9|n=a zyC4x{=u+gTAiJn?I7CIwzlE>h(GJd#;5eLy5;H_CFPa^WojVv!A)ZL|Hu^ty3-Sq; z-AWgY4o5z!<-m|T{|@4ya5#vCpv*>L1w&bwx6GYlm%tw8!2?0I;Nku7A_@3;9st=T z`~&#eT^I20;-`WNKw^~XfxOY66$wZ*0Uw5nE2s!8U?q!QD{Hc0e2 zGHo5mSQ&ixrLD(TY-#K1Zz;O!^cE&1)wa|!>))rQK-S@IxB&|oGakBk%+B*2WKW!ZWp=*Cqn8Z<07A9$3H+`{I4 zezBp<;dlVQBSt<#N=Qn4vs#`27kM0ZWYqsg9`A>YXb_v0k0cw~PG-TzlGz8an6`#FO5g*uHZo}?U=bf3Vyg=0yjMhm_iNo(N_`h;#!GVSN02S-0$-kKVs~#D;k6Nj5f1w?? z5s}K|nXCk3w{!WYJ-tbQWzD9w!& z_T-)W%|I`306xWVg37V~EAH8ED}%bb+PfN;?yer4a`uYF8qFPYS``kimhK8P{3yT? zH>6THxFcw%=X`F+xWTVR^jw*q;K3hE@u>g#Nk{}*v)KI66K2*Ef>RR^gBMT5!kJYl zgo7~tzyjp#3LZQhq7oraIUe2cJg#IxAx6yY75sH9tHGo!B=R1Xp_UO8pey0LFMl6P zsxbirgyO-CK=C&)xd}M(ibIvPf_pEqY`+#EE&U}8Ob9Ftn5<@l2;Vf{Gnt| z(BCj!30JD``pe1WlqY#Vke&)cbDkbSnD+oQyj_x9!=DF|2!Z*Cz+BiH=BCLOJMvjW zviahA^Vn+h*vpH_$*J@tfcK>*-(PEfm^MB9vEo<#l(BwmkkXnkI<8*|Esw;^qTfuE zY%mA;2WKAn`qc`|U-6-Xf?td!Ax~D?pXL9OFR(4n9ZV=Gr&$H~+RomDYXto=vA3e{ z8VI67G=5izaHL@mh`+6J?VdgB_WYuU(VW|3G)N!Qo-Ox=S$U8)thqlMR0&cA`42uj z8EeVWDG0}TCbCq{&n#pn@>0A|%35>VjUdDq58SFM-%5IjRRa3r|?hvi=815I! zV$Ty72yiLbLE4~Zyyv&x!y83CFwld9Z4MrWW-YnVvj(!I7je}G_2cIcfolPZu?R{9 zK&ksKP=fkHv}()GqwuST z-!}L)!0$2mt$;empiVpFjZjttzX~X;fwEf2E4%4#UC=azflTsDh>kdlZAh23_>FZIzXhwNjJZxGz_nfeOh7=r{9(IYRXIC6 zXe-jX$JyiTiM-CJ1~-fXxnfk}T|vH>vgIJT=qa=d$U_bhSY0-kg+003F@j)Cur^p1 ztasJG&SD1kJZ6_U$9<#p%)cj)@vx={O+1v4gQpNngD{IzcshqXu7-C@_ZvOMJAfQ- zzcqNa^RNg*Mi^eImTg6&=`3PA56lg$u!My}DXf4#3Xv^gbzqy&r zVG(R9?+x75a<;b^;~X`-u;YN$SL13b$SGGNTcZLuFWXSug4L4M3jM>{ZJfvfD4;>X zdeG&@1+M7b>(rOjIk?8juk~EVGZ4Ah6O&^ly)^DeHP{uy9wNxNs@c9Xk~5{Vd}_j8 zG{9`u#lZP6rkFOlQFj%szlby}1Ly)QaLH%z8YVoS>&i5d3A#@W! zAJ}(Qo7kpex|C8Whrjp39X;V$3-S>8u`d*Y2}+l&G|0`cINBx%wFH}qbOBlA9U;`* z7an_>tR6TWStiqZif}Tq9ea-}k5SeYR*F$p-kI3iy&>c&1OB3VxQadg8Aw|d!3&P4 zl;PD^gqM``Ehn?SD?N&&E&6dpjXoHx8}~vFO40-Phs%2SZqocp87ITh9gqV1^Bl9I z@BSU5dyOWULh(o*7k#(xZq41=yY;S0ml>qSnSPb{1ul8(z{hfdzX$J^cvJy)DgO|D z1Xb>S7{*M=#lzd+PRCDt6u;wDX}q%vfmd>OmA?k1c3GhY{{vDjdks{O-G?g-dJ z@bi%J0sKs#5a=GUCVkv)D0ooBD-aw%gHnpTiHtXAitgWlvj?s78>y z=Wko_eyj;>4B-Gl5BDbEVm6#Z2*G70!JNJ34Ohs??F15nz*!c(I8QN`$AJGazP^P? zY3Bqu{{eo$s005KOg@1`FciWg;t_4cO+;uIbD%6Kei0KwSX}LkI&fkEf=-VY1}xCd zMU@kdF~B|fnMF-E7oH~9D!Pg53kIaP&w&q$6Xc!;aO^?!ZM_X(AmAciu7mpmUO4O^ zUXSBH#U7L&JwX?jhW~wdgGns}LI&`2*GM2T=f$hd82u(8B)mzUo$uW3sO+@|2(e#> zMxq029CK(K5LXttRnLdLa7|^_`7*EX!2@8h-@z*X0+TG$JGKQZ{cv?UOCZAse!XO z**~_xC!k%61RQ|xDA6ajQCYQNw8YvHZO^Mx#tR#8_Ae3-r}SML6_qi0Lca1?s2hM| zF^0i!pjvyP?|ECkS|p`+Y?!REzC>TjbYjC`j#(4dl%YEdFl9UrrvT%lE2A%ZQ?*x0 z@|9Jw(ZuNUvqIA_05@Nx;ecGcZKdr+O{#hfa3WuOQ>A7`n{i+tifNR<#(Tf>f<+Jgppy)^zRB(M*NqU~Qp~Cr_WO8<RJENC&g;(A9rTz>3dbNGE+MYamZms$}#vOMr zt*rjs@u_3IvURnxH9o#pc``b-sj7IUf2vf>z1}vOI!Tmn&sq@db5P$@2qR>Uu*3d`^lBLlzkYgy_D_IuB!g?`A_H9t2$Pz zI+C5I*Qy4iH#2q3U!MEo+sT%lU-k6nt7uM8wB z&t_Dn<;$_j`25OSN%I>SIQcn~>=?*29!okdKxnmbnBLsA%oykD{MpdzFi`n5= zSq4J4m*ehaU0+6JSzd^%6X%|HC95uHRE=?4^62>_1K_dpk+?qbC}}*EQB^Ka#%7b? z$Zs%IE0QwHb_-QeC9=f2zGYS4vZg-S4X~rFF>e0sNj#jk z9ZS5Qwhch*4TI^~!qbIkA3yy#c0GA?aLsTgdgU9fVVRFtrjUm>mAYkhT3NGE-7snB*nN(#W!Tc^a08*&3ri?O0O&t)XgZVWYKY zb1$)P(**JOI=Zt|^%>7lu`bMwi-$6FNj zefT3i@Y8!$lGMRbxb9Wm@eK>i+d!uC?DIF%oo~E?VD=EbH2RI9a=Gs(i?N~K``(7F zE;jn*wJ)yy;q^6}eOZ>NbtXPY!`WbE#$21MJF#v)nKGYTdGx~apT3u=ZiqdI>B$FY z+T6X;n=&rEm zOI%D@PHfgOUk-jT_$OzVM<5IntN#2({QdRz{?+#W=hEksFK+$i)LQ$swB_2rx`Cf< zpO44iUT-_K+IDIs^nCP1>t9}bv5-D{bFFP6ZJGFN{8w_Sz7xVMb&WCZ50Az__`-=SeCEDy(|v0L%h-$01AvU=Ur0{@bxmJJND zY&am4o;iIkb9yLq?d{CfyD+EDw{cFJ-=W3$jCA={tQDro$i($&gEO(PZs<=L`ZpSn zCQfY}%bkzDmAvgqk3azE{q)Gg!f|P>ybF&@E1Ed^Q|+>3d1(2ASTo>Zsfw9?+K-`s zeY)xV^M!QN<(GY_ril&lj5dJ^Al>Pa_mZC3^oXwj63%Or6WDe*fn6A!h@d8IfRmQ% zhTfE+chhM3hfQ_Ga{DJuS@wma48VZTk7Rn!B0B+10JQTO=h z?V*c9w}%A%#hW8{#z$}59=d(?<_$qLdUt4i;`-<$WCTMpFs#=_H6XJ&S1u7$9oQ0ITc*JH`UL=*-V%zJ@fZCBt~PwHO1&>=)B^(AnOf zJ*V!2S2Qh4nIxENt5~<{)a%r#Q|FvIb@y5=MhYJL&wd*IFFuO;Cwvhftq^%WprEKF z>H~^VoTtty5{h=J8Q+xWmFH9m)j4%SeNL0mv}2m;ygs2PzlMZ?{2CKR@@q<%$geqJ zX4H&^(K5OL1*2ySgQ|puF(#~xDPd#U7_*0pC=zz2ow1OZgRy$3L>*&;(8<^#bTJMH z>jAcoK)501Bry-;f}94X9>PY(4Pg`Gfv}lrfUt#Wgs_!qg0PKghOnJ!fsnr5!L%NE zhoVA&ix%oS-9ZfrLvM!>?IM3PbzQ5aLRN$zBUPT)Z619qb@4l4>bRavRphIolmOv59}*xZuia4Z4UAEQS&HjrQm7=7vT z**JYGoVY<=y<#ajype{-OJ9kH=V$4}4ZffNNiQVA5$FURSPXy%1+ZbRtRztGki75lGMhj-{sq!J7axeVdL%XT!ljgy#Dd9}xO- zX+BcbB#xOD5E!Rn=;8V3LY$6r^ekzH8!#H9Wc=}Cq>^A{A)a74{sl+)(vUF`A1^&6 z44M`S(er^hmhTG!aO}4h!WnkJw-$o``bnwi}j} zp`$Zo0$|c!-+J&yG#q5($LRNary~n2X|(tH%bk-lNmd|8Xha59Asi{Of)ySdXEPDF z$qw+Og>*vb3J9N024XQxfSLH#0?XaTcnByOx>jop32vc+K-my@V(IOtd!a)7nGPg^ zH!uS0BL+l+QS8kkkT%96;dlan!}E*mJRtmbt+Dg9l5yb*-1eMoCQCvNI6!_}hz+1| zdInN(xd19?tbNPj@(f^hTRk{GL+YbpeFf;qV!yD|fSz%AVUtLoBC+sG2bMu}Ax4K0 zBodvUjYBVMb*MtY(Rj!|6Ay>B)g@dZIL@)l=g&%HQfpmctvjsqH8qB&x7EWlG*C$* zjMR-&Rci)i^EMrc25%08rng2>XxhKH{VALNmroc?@1p0U-wGU~M-T4Xza^q?1tPyU zyg=PsBL)C=fEj@PgQ6zLEyQoYmWWKqAB+Uz!j^z+ZHrytYB&<1<7|SgFj9xO;7J|w z^}aQBLUTdxb}X^2>4CLQnvBv@z!@ceatr$ci=CY(Rl#OS_Hd!*t(zyDQyBjCMt+7y zZi);=;M~aKVt_Zo_+jby;`S?w1d+u#*ujxI)A5CAK>(_BiQhc<9Xu9`!1hk0h8bGe zOMt5h%)L_P6`UmQkFlIEl5JrV+0^{P+%(JKV8|W?BM}4aNE5{U=*>YZzos~Z)Rv|`-T|sEIKp|TY_wB*Wj6Gm*e7@GzdMA#34vgjX<@YO0)w@{3ulfT!tD zH5kLjK^{kahi`{t6ARotv;+(4qc{i#KtOYWM0K%$Ij=(Yxh?!$L_QmfCO~8j!{ic0 z>Y{HPRx7U~($L9(z@@g_{w1+@`^}<;5ST?ifg2;>L=c=S=%{AH#=t@X)JwhzFMVb{ z#Id*N%OJM1^Yrf1ApXO9oV@mt-g@cF>~tWW2t)ok&<5X(v(u+Ra{|S005*z~#=v*L z1Yz_N0rnW#`9Nq5$QCuokNj2_pt-;;v9GxBQG$j>a3>O!U%p>5{pnbxBGMfq^V5Sc z;K6t(NRkBoN7xgL#_jcG9(XR<|FK3I)gDRMjFWU;p2YQvnWA+0k~p{X&SQh#$f7hH z6uSs2yoktV4rTOvNaCDn{7@8H@^I zY7)~xOhaN?i0Md74>3K786ajPF%!hh81pG-)M#ayQpJ?x4xefg>aTu$${``Oow_FV z3Sc$Wkd$Q0u~S#nS2PQ%0ct@x)<99WloZ8{G8V=fCNX-wzSF?Pnm)Gx zU}`cZZG!+ah_b;TNM^^9Nz0}L$X^U_BxwN_0?C&rB-OY+lBPj65gbJN1PFullGO8+ zEzSw`M8eYo)HfDVv6vv{M`y)agnS_(sq`);HFxeFyK}cxKQcOb&OdT#V(QHKiBZNs ze175#^KMB614*i3%a}`=z1|oXUJOUrSvHge>(@M+IB_EyPhdP83-WP`*9(jkWbJ5p zK5?Q?ThcDXV8O5@ReWKtqzNb3xp+y-GyRe>3h`MMSnpz=p`@J-&j+~MC7Wcl^M~hv zJC?L_(S`X0*ya3UB+do>p)gm{h2mgn!-!z5D%mTAh3d@0O!?6`7&ic2^z`(gh(^<; zqk}i1bL`+O3%0B9;3O9%wjzQpFUV?WTpR>!a?mZ3C~&!f*zJ`BwHoEryWc?D%JDt*%Rg}q=(mk|3 zu&&(7+4toP`yuNq1SuXm@?4{|9b0~1R}QTkI?|^y`UAflXw{_pd8s6VcS z!T-$NHauhu%Gm`b6NdI9f*fE!8lR#U;`oGY$dax}tmx`~pPEq|7FbVAI}U7aKoL_x zDXmY#D2X}Qg=Ku&Ndbzli4TE6DhFgttTvR75}8Lk)U%7~QzzSHOCFbkzCU>mxpDr+fc zW2#|UTeP~CRU0~M%A41<7xkw5#(T!pg|sK*?91!-Y^Zhr_U-Syy}X!LyEjbMrMI6$ zwNy~x${43$pTH@jo*#i=iIPoM9ipD0YV93Y?GpEVTblSobX*ylE@_CtY!i}^32|0FDqx*K{T%+I0D=& zR1Ia`1f_XpZ5BBkV`Xdu%9!~Y@@?{5{h3cSByL+4pJft-jjD&S`_%nNlfpBgkZnUs zsb#`rp$60hsvgE6D^b}*w<=L5E1?zJ2=QBJ?Or;4+D%)iQ7W}nJ1$u}i0dB$u3O1Q zUAvX~YcLc1<1>k6H?^x~++@3NCbj4o&yibD=T<#$@aZ4{R^%T{`TK{ZCSnU?I*D{r!hmrTgD#TT7m4KLZiXdpKgCl6L7&~w7s znPi&^%gxc9q-&kY5eBgUhi!- zo&-Tp!0R_IkooXH!^@8w1VFH!p(H2_Kk4v#vEyE_ZwoLW%!wz$-zdHNlQyrHpwb(f z_M(9q)hmb!h)jYS1$Y%-QIrJhMgUYN6>Nwc@(&^sa@{zoS~%|offH1$1lMOpbqW0; zc&Ss%8~e!YbEmM}X$;O_FpR+n24^uC#b6AB9t`>*h=ZR)MO`8~6A{$7a|nS!8KIto z><&Vy8HCdNbNA*}&aYd0K=-L@c<6oLO>X7O&>O{ zVq^Pft$)#4Xxx)++>_aR>I+w{aV(>DzXICR`ta_9y9LkgtY>$o@A#vsoM#wS7rv!J z-L7oiuJriYP_FI>v}koY1s50E~PAZx=z%wz*PQGnUq+u_s1n zYHamt&UkQXq{s`_DeX%0y1gr7>&~ltzBD;R0sL@Y-wZ;#_QK z8Rx0I{`51DcLkvxfyDkukiCEFT=nPrPGp=X^ZHX8y1LZOysjfXx0cBF9G9TF(|0q@ zgL(ZSQQp6sSGPXXc~&m09?!QN5?g>OI>j>1U3q=iGpO%Q`exp|FilhQzRJO;Elxt=F(%`x@3hEWJQp4umEg94*h*A%IgRa&09mVt^<$(UqK zh)sH+n&^SM1U*ou=7E_9&auIkDC&Y7><$#yg~3}8^r?vMN7f^U5@w&KWUL&;k+sOL zL8ASUwS`iOOxv?zbAo|^=zKoN?}wEYe+ME<)YnEz?!#;WKO!O(nHuOM$k|XWCTCERA$4QQRrV9&j}mShpqxu@FvfgcInO z1Vmm$7=$5l`yiO$4r3HeK-_T*-o$_i4@6o(*2kTPAgKlM1?@H$@dXUR;(1HbB*syT ze;eU1K+vb>CNO>p1GM#2#2LaqiE)A`{e4!B!F(czmte$Q=Qs!JM@dY&8PVc=|xb z*_+qz-kuax5jy0SN*&A;*|7GJnX1j$sw_Op+E6vwpAv)!p}kjGhH92m%gU)`o6}IG zHuTj7W~!+$Qy&<{eM&~h=m!;y;jkLU1`im%RaMJgTg$otj2Li$4;r;=K`=%+1TY3u z@|2Mrfs+&Z5FEw?6Z9<>JO#jEhXdC)@DCQxd~bl4$_Pj)G@g%-3+Lm!X361NoFGht z2t*|{9>88YG)&+Aul_4+Bgb`@RC#g@Gp@2O1l z=@&YcX;`r|^0l7Qn?-sb%Il>C^e1_ByG#U=Q#8&xq3E-uzeF@v$^_?*3I)NrEI?L2 zjQscmyzKi_$wq#bMUMlSt3k;iy>RbCO`j`yW=v48mZ83aYwTd$+QKnq7f0s z3P(;xxGrLXsWT@%#*rH=c;2DeXCAl_&3+7PJ#IuR%6#+&8wi2Vdpv0seVv5!W;HUJ zq~7a|vk_o($@<8mAN{sZ+_~GoI4efc1QhR6@aJ@R-d0jZ7N^GgwB!JbUmG|X9E*9H zNHh=vyG(?g?=zCcQc^C?aWQ@VkZK53|D`W&*+7@QsWqyma;{ zSR&4@b?1#omPVd7bftZ{hW%M}{nGIA4r6zA1qsmY3*e51*aFO z+$)Y{YPP4AmD|Zd4V*7a?d?o=td!fyL5)t<-cEE*y{z0$bWVe;+)i{(BV&^AVr0yW z1(>i&#xZ^><(hrfhXv+ZPC$%YzpI$wdDFhRrs!hx={IRY70qm`QuZpXt72UuM-vsU zb9I+A2BoiNpXIJmKD8u}wN>euqf$L;sK)?Hx9ah!Hy!&_j%4_nY$Ev8S*a%ND{9hi zs>ux8(Gjo!$1q++hf8!-ddZ&?Z}SkRMgS1(&Hd=WT;)V(thzKN?2iNFY8GgFaJ`9( z-sZhX@mfseR^T=`G4p<;@hDp66GomvqnddOWXC8Mo&_JD2;3wS+(m0R7J|?zdGetn zpRpGe%U;}{!S8AwE~r6Q$-T8(EL>a@D}tMAa(f-#Y2(YS|yTuOurP)rBi_GP7ZIK^ifUsh#DQebFrl*wy= za_K>Ffyis$hsdk;%_N}qyJ6Il)9DX@f9_#H1rX+Vm?3&P5n>Z%h-5_pC_ zIZ%#OXP}BBoe*F`)db@%mt@QAQlrsMB!+R90N^p??IgV00MA>l1w{&g8Cpq zmu#^5u|7ZFLOhPBL3cb}?x`2E!}EU99Ju_2;0hxEeTjt3D*>QM0AYXuWaY@Zr3(b; zDsxDE&N8?(1`3+qn)2jz4WIdbVRLSHT37C5+7V#9+x!3KYbr@S!B2bP)kXfTqBgPgKuD zh*>9L6^h#G9MZl|Ijm5FRp}aPE99A!VqyyTX-kIoO4^VR1>2co63>&Xp3N#ED5wq* zKq{$Wy1@dbW(>&IW{L4q$A=ZA?1&1NXx8SKB>oqjCk!KR;zKNtI-4PJs#5sU=#-JS|wlk^-{WG>SK*cP~Sv$Os zod>$M*2eT?pV)roml>>yZAr;s_KEF_9Si-gaxeb`$DxkasY_?liKg}$n+&x*Mh z{wMr>N$9vn{k3rVTg)1ayanjv0?=CRanx$3*4)jCH64MPYGRS;7T0P!wg^XCcu(aT z3hd;m@RykW8cO)&@ma)D65ipg>{VQ&R&SZkmnm&_*ADC0^>Q5{pcJ51X_a1W5Z8pb zCYez?H3pn9W(V9J5Jyz`_1T&Iauxgn@J__ZsABv>=8c$v>5Ul~WekjkF&(7&V0~K& zA0etX=2TkW9;b#{4nQsZVQ%eO(BqmsmHDV17t{iNmyAi^HHIDHvZI2_n1eaM41w@m zcMa7wp&;n2t|4d3TF@=~%CEvi9)90MTgVn0XC-c~pW6bBQ9$7Q&R1%%> z0{$T|rEY(BbNL)n`8$Xg=EyrPR0~2Q1WPXFh@AJpK3p+Z0L{zk31(D=6S1e_uSjD% z-H*l($T9GHX+b&Vgjtj1nH@_u4Za4Ss$VJ(>!-0I_r0UP2AJfZ%K4lutwH1$vNXxR z^c~$hhDne*svqX4Sn3_)*7-gybMmktSAe9b1C#nKm|c*P2v0g)T{BWBbBcMBIWY() zk-XFg+~Bp63A1k(MN`)#KYIA06OC#s4OL^{J}n z-|#gvho=lktFd~B8A;4NRexOaik>AaqMe#@$2?OWklttb$s{qQe;xGe z`Y@%N(lW;nYj`xplrrqX{sP$Df?}owssfnPRcMCS3iXL_Knco=Qz9kwjqx5Je*~~& zB{drT;L~5~#O)Yc(u>!n(SS!j{lGi4pjQgmBK{bDxVsR*Wi0+vP29IJ35A!EmbWBw z3z%|_012KU+%zUh1`c9P1S^MB8-MCu%X)VP3&QRXr$E!uXoNeB?_>C`g6omI1MWDc zA&20-a4%zQv(;~^PhGOWDH42KY(C+S$Jk)WDSVj>zHo*{8Db?V)y*v=7SOs1R~_-P zS$L5xnM92E!!soc9EoA%fqA&WiC;Sl;aL>^!f_NA!ER~b6KEWsPk}qJCd5V(f#lFg zbOG+cq9Z_v4T3d>zqb!XV+-)1tvh$=87?|Uj!(rqv3Kq!z44o2@JbH#)9?+mAWOq7 zFy5L3p9Txv=08XV#$7zV1M4aGT^u#`AnANR;7y+KzPHbNbnoExI|q*3J#ivAGKF6@ zga6oz8_`?fhACXO?FGv-e>RI>8M;9{q2v7s2gkSY?X;+v&Eid0xL!1o)PeUlobIwo z&t`%u|{ zSyswgms&_KtXsTg8z$H(M?F@(Zr@jSU;-Rq*i%>1SJq8?%T7#iQ8wp_W>vFp9Vpjh zf}659Qf!)CH}{u4n9v}!w{9COH)28))k3GQtX)}eK3;CdgcfY2XZ7GCXy-Z1YetCTIqvYxV>RY0`f_`+haYL~_U zC)T>6y}oF3;D3|3Xtfv3RtR6%^axOPfuX#hYk>b5T|>U((Ar|5SToFaD!-AzVYw65c!2Aw|_D$w^fb#U_+`E@9p^ubv>8%Z&CDoAE zHG*%|?R@LLr!Bj|+3|cvP)$MDxP?nA11$_Oz--^pDdpP z_bIch=xls=;=zfud)>Kv`E0R$qR@UN+kPeEdoS1i{_m^Y;J+r(7lULb-~>7 z*xZse_pB^FHupexTKgb1@9JCac{G@Fy^U>NR0xeewc78`e0L^Ozi0KWwaYo{(eJ7@ z47Sv_ipywyP5ac%EUTV^8%g5+(eEBz(SBz63rnG)FWbo68zigK^pX z0-W4jsj;=QdF#<-)syD-&yIe2G=2FmPiDaLPv20`w`cY3X>(rh6-g=M>cS>_#5JI9 zHH;{}bh^PSETwwVLVx!5r*Eg@e|c$n{Oco>-3`~^SH{7a7BJAfd)2Vs^hQeerL*bj z?xA8s+h1$aGdcPs)Yo@f0r#v@!z-7cHnn`#^J!0}K*2l>yAS04zZWqj0c`}l7JH678p-yY%9wY_qwR;j zrWEw z!lVan{wN1NM|EF*PAZ<+z}KD0_a7!edPcc{OJM+tHpR;o>N)r1qN5(I(;JECYhvhMz=^55Q-x!a-g}K_n{zXrVqT$8KdWG$)eJX|hE3-=B#K4My?S=U$1RpD$klo)4 z!IXkP8X($Wf}q8&aD3IX7h=~GRz>$$ogtN?{i_L`Lg4@o3+*TKs>WZa^?x?@D;1?~ s`b~KjScdZjrQ-YVeGdKimR0cjP22%vTs96<)ZbH%VVCOfoeBv5KNt$+6951J literal 0 HcmV?d00001 diff --git a/library/__pycache__/radosgw_user.cpython-314.pyc b/library/__pycache__/radosgw_user.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b2558577f0c95785cd1db2001e7c4ca0b538851 GIT binary patch literal 19023 zcmeG^ZERcDb&urZ*W>q>D9MsVS+Zm%`tY|M*@`X8vMt+^m1mhw8`%s^KFN$Fl6{Yq zBW+ok4rt}IMU-w?m<>Z%Y(eCq1Cn9b9IQV&Y#6ek0jQFynHOX6whY03RgDd_H3n?w z+($lsm~zBAXjh;Y*5!To-E;1}=bd}bx#yniC(R~?f@J-Ncf&WHrl|kKhhowc3ip%> zib_(KDONE`ol?XVr<8H!DOFt6MqQ%gYN%I^YEEh6+BR&X8r8*h%GstfXXzl8uxctR31o z;n_i+T~Kn8QUjD+q~wND11U8^$xTX4P--NlW_ByvL`qxOX28|LZh^e@(pI+R;IkAJ z0&28Sms;DYy@koQ!Hhb{pSSJ5vCgX!ErBaSbT$#^{NcG+l#h$LP2t|dN>;8nm@eCte z1t5Qj6M}p=hGQ7;j0GZr8P3BF9Y1&8b21R;t_K!87f9>E82o`yhzkvPw(agaxQ$`T zp&1XP-r6{@#~pXdx7cGcqvakCtjJb_5alZe4QIiZhV1Xpl2A;dXedO+k4 z^@f$0dOed@IZu#_UG)@zJdwa0*GJG`1D^XzBFy8!rvtMBC)bC#=|EyOK0sheEy8W8 zM{MYcPJ80eb0xuWvYs7@aL8M00C7Gc^^f0iGhK~Nd%EFU3tS}L?FkDWKp*NM-Adn8 zA}S|%0{Dvegr_~>xF;0lgh+Q>o+(m+doe7;JyE_uZ)_$t4$Q)L2`xws;4@(3V#4l$ zjW_K5dU$r$BLIKTE=V7hBV)s%Jd7wDnU2<;1VO`d@dOVn0T@HT{NY)c)Rkx=jub~O<10|Ve`Ca?yk7}HB+3}`o0yBsKpN<}3s07VH& zK%kWy2U2H5uz^^O5^-0czlZ2A88fJ$@btn%n32qzz$r;D?BeK^0N)pudMXnxp0ii3 zAD@WrK6Wg+`+R6NE*yO7!kjdm((3n|2+W_|pWvRk7`&Lc7}@!yXBVE?GZ{I(XKr%O zg%|dnKj}L)eBs&QeP<*MAQ@dT9xs}p~W@Ta0E`rZ^2k`(4rnka8osA~t~d@t|@rkK*FWRCm!|i2~gY~ zv_8?I3xh2i!Wjpn5m4k2Ush2dWtnR76l`G(5n=cQ9;xI3viKnFD+`Wgd+idLA1!D2Lm1~W6@fwC^Rt^X6)x)Phqc3p5xIl)}8<6aLoB$8K^p~%^zSx3jE_H%P`36Ax z2k866W!;gSW@8Mne-WnGCGLaE6-vZTtimh-vm!#GiO1Jed>~A-J zw>hVM@=kNho8CY1W;zG5&4-pO>pIG~B||r_tEnw*OP0HaW_aM;02nMREGWWygg+oC zeeWO?k}%zFtc4VUF#^ftwyeTO?JX?Tn8K$3@+kTgQpu}a>}F1Kuzpa07dWL?QY|3D zro%7dk_*6+V-`&nue}PwI9?^0y{HP#g#_%Rt3ZQB0Qb)nF#C%<2(9=wXte@=f)}zR z^)nr%bKfzyr2H$k z4iuhZDpu)(ZF8S0M*CC~%m zoZ+_s(g*t9-oiFfq)`UnRFXN18tz390HK>|Q7gX>@G}CTAiotrMctKv!1l_x3?Xmad`Wog+DBI61toq4dtY&XU)e*R`}&lRUL^D2Sxq>mjnZz z7XwHfkOxF_dDWsu+E+^-T*(JT6>LF8702_1Nx;@v`sgO)t0JVpqwL7TpMbL_Ld4m8 z+p&Gsu{|BkI-Yp>Z1ULB`MUVO09stjkW1I{dSf{C@TR-dX|RI@f8UH%Lr z-t1Es`6bkh{NgiDpdhVEtlg)sfRtA?h<(kbFgSdg4KZjpg~935Ziqp*DGV;3ZbJ2HS1c>;r{4d+w1DsGyMvC3yv#>3db-PvL$}?bS>Zz?%%o?+7z%{Mx)(L$PQ|Z@d zEskd+N=zAwt$j+L*{5LJ4uCYn6tnqk9Wd55_={P>l-KjX!3-C-(-H2vw0-8gpsh&b zj#)I8I|fBH+2f9jOlemuG9}lQs3r~}Q7=1ZL>i0W<|qD+6QY_Jdpr(Z)JPlo2b$hq za4!!&u=MtpcQ*Jy2m2cI2o64g9gfsvFSy#sqtvPNXoT(7%=KQ<_uvDo^dt{q@PVOJ zLy$^?d7uTP=m!z$_(F^m6@wxj}$CO1fwB}XGXobOQofe<^v{vdkX;(BH7#@ual z+p4)OttB%g}<*Z|G^6bY3 zciz2a$(T3VUm3eGcH7vxYHUr-q~lp*PjV!0bHCpDYU^#=wpH7<^x=1$S=;{P=?!XY zW3V>7ZhF;p+uFWrZBIYB63<%qCP%AAm!8ZTo`CV09Isruap|^c>#AvM>ctgT*3_Ro zQ9ZIm*4PUpt1-X3M(6U?wDYxSru*Qn!)x6qGlo;isn%Ak_NN!uT)Q&P-8tQ!4Xb4&m($#u>&c9>C#UQERBJ6D$+)^#ylbvK8Ry=d zZr`U`LlOC<^yHe$n{hso(>?jA)>N!Mozu0Ijchch+X_G*9vRL$kG~hcrAnRnGsE&r z>7jIRrR%M0=}TGHp7-KQ`i%2<5yO|#j<*}X+qmM*w(nhY?aes%<#hW$VYa5P=R0@g z`=7qARvL~fz>=5tL%;aULb*m10-lEcOJD05H61Jn5vZUj6s_<~-}`IARCOp>S^3pv zq?Js&dJHn~1<8g$JqDS&46Yu7Oj8D@1^<(-%4AT+bPt&fx|$|~z7CV2HU_2+lc6>S z!y`-v1t>m!nc{ncHC8G)*5uQ(=E}MX#mB_-zS{kQ={14;Rh4|q=PEu~UHVi$9qh|( zvf?xN46@?WgW|LE`vHBKXhtzSUXsJ23XX>PW7y^-W~VS4#S9cb#gAb&j@dcP&=TgK zf$XN5XR&-9Gt>?I1RZw_=w{$AVn)Qh5lUo(UXb^apw~%C z0fu@B-fw{|q|gCbT~hwBcUOuzjlsf67{P?fITVDA@)p1eblk(zw1KV#VT8zYDK%RsP-127a!#I*V${Tt#n z+q@^}w;r;KN9^Z-6jmU@rk4L#x{+fcbMv6L-oHZe6 z^&G0t59>AKYdCAFT_OM9oirtfjG+q^xI=c9bgi_kx%Oq8`*XSjWp0rF-wv9MohuI? zmB~uq`OEuL;N%%MqSdLUL0|Hx)OA!==HIBt4^!?>smBjfzU_c8dhn-I9?9t85Jvlu zqSDq>R7@R;sx}5g9g3o8|X6R;DxBjtLH}lx5 z+fc{qHdd`})1rl8xw+^(Mj4Vw$})qfYGw1!1Nmz1;c+GNGnie4OsKIO4Ls>YT9MbF z(O=<;yaR<_^@{xB_v)Xw9%Ts&v&AD)*kn`p%*4)`f})f*t3fe322)IXG10?FW%8_> zl0yJ|sxqmrE}GV5b=n$rx?&yZ0Y$9?V3v|w(4X^O1CePuX>8zarzXnASsXD$GphP2 zpZ=JFrr^?-5;P6Hs5CKNofI*ohIn1L>ZQW-O*KUR!qYB*#fw`Z3?#x|h$Vs;v#Wu~ z3>O-|Nx>PZ4sSrk!*G7AibSuAbSNAI973EIH9X{DIL?GiFL6JrBQGN-c)%o4AVy6gtx1BQOd3q80_`vwb)rD0VhjAOxhF0~1>u6+ zER?0VNa2`s^H6eZ2gIFK$D%2rZu6LIa{uZ&pzJb~rVc2F3%aXxnwAP z9Z(H26f;#2i14-LQi%s9D=U{DJ zFjSw~=PZ;;(OpG|hAM~#0>J=;RMOzC0#j&|^ihg(8zRpdXlW-fS|~Y4$<8*GW3^Hz zyr7_6J{M~*ulEtUOA(%DKqA9?N5y+Beuu*Q4tNhe=`eTbKt+xzF>cG}@Hr2Hr4=)l zMUj0#My8m+D-KnuIGdTj$2JxG0+C=+`W${K(eMq@4^XOn)Nml*P&kl>TLG~KUjuxH ziWX0S9xR)614N-g%o>S8b3^;8)-~`|_4o*UjexJQIOkC7LvzN!b&`AvU!%_i>!cv5 zi@aI3PTY^c1DItz?nmPp`x;Bb*Lw8VX?bX*czuARM}M8M^OvcZd29k!HQ}DnY4VNC z(wYwKBu|g-8)#C+8SR_v8|O9V!2a2$3EIZ-MgGi$C$%E`FbFKA@Fq z!2sMMhehgopb}a5DwNAVgj9YOkm*Tq>S~8yfhEPvU#L)sJ_r zMU8ab%S)HPEL>aWUxuayj1rh867|nUXGHo+G&(ENvvAMbYbx!BM6^laD!uF{ezcpI zKMMFsG#%WGGC=@cVh)Dn7h+scbQJ!#0SMX0Yde0h8abYX>MbEqtsbfZ^pKN98ln<~ zMb`zqVD~#O6>qCct{Sg)L-Uqj#7@w*#k)RV!-wC;Y#K7JgP+H;1~V2j5>pb#QUWtz zRf@#u^XOQC*h4U-MfGen5E4YDY&Icm7r+cdL`XCsj~hQxBe{flG?BecqCv8lr3gdd zSE@cb$WxK7m*5^jvg3DS35|Jv8)UFgNbo$eggPKx<3geeNXPr|jd3n42yh+g>cYx}TYGbkqZMTd z7F*aGQX?xPIs0x2b0TNzy<>5t8lXQ{&T<5CX;xgf)H(Ch*kXP~oip{_-(q%Xl4tIF zDD#$lLwml(b6>3@+Xy2C0XYwaVpay}WQ|=A(pbaezd?AW)V?w}Gi%%VP-v!9R~c5B zHSKsPs8YgLBc$>ZlS>N6ylvXHYTA}Hbpou}kvw@{L%F-N4Lg$OK55*Zp8np|cjj|_ z{%qs(tF#O6kt}!QwZ>)rnzjixTg$d@9{^&%KJcA^RPYavEFHgV-I}J;m)AUd^VY^x z=bJm;*pWWCav|HiCvSD9oNqS1(U|UD*_UnF1(%Q9=B1PCM#|QfHe%~8Y~q4?mw9RA zXAEU^)UB?6>VCf~ z!%k%)FXfK?Ubg3}tBu~2GVMqQKX!LW4R*Z0KXcx{4hvW@q`Xg2!-`|ddlc0>td!gS z{BA?@(n(-Qh!uu^8AoUBIlK|P>uPvac~@^<-jUNclTZDv1D_9<3g#P{Z=Aeqvo9UX z+r78#J6G*HA-dSUe`)v=+Oj;K-m|j%*6{n9+w}9l_^g-ed|vSjvHgIuVEeQKS!U@P&htmLR&}TL_NZ5z#s{ zetz`W*%ANP$mG!Rp~)f9bo|`#g|QL1u{=3?Zd}xjJUcWtadzaqs9{GYFR&9_~jZT`@pI5q#bW*pzn_?VE&1V|3qVcnBjAG0EX1&64ztI9&CuE;>Y89>b zo%`XDSF}QYw2<$H@(YS)$OlMfR +# Copyright 2018 Red Hat, Inc. +# +# GNU General Public License v3.0+ + +from ansible.module_utils.basic import AnsibleModule +from socket import error as socket_error +import boto +import radosgw + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_add_users_buckets +short_description: bulk create user and buckets +description: + - Bulk create Ceph Object Storage users and buckets + +option: + rgw_host: + description: + - a radosgw host in the ceph cluster + required: true + port: + description: + - tcp port of the radosgw host + required: true + is_secure: + description: + - boolean indicating whether the instance is running over https + required: false + default: false + admin_access_key: + description: + - radosgw admin user's access key + required: true + admin_secret_key: + description: + - radosgw admin user's secret key + required: true + users: + description: + - list of users to be created containing sub options + required: false + sub_options: + username: + description: + - username for new user + required: true + fullname: + description: + - fullname for new user + required: true + email: + description: + - email for new user + required: false + maxbucket: + description: + - max bucket for new user + required: false + default: 1000 + suspend: + description: + - suspend a new user apon creation + required: false + default: false + autogenkey: + description: + - auto generate keys for new user + required: false + default: true + accesskey: + description: + - access key for new user + required: false + secretkey: + description: + - secret key for new user + required: false + userquota: + description: + - enable/disable user quota for new user + required: false + default: false + usermaxsize: + description: + - with user quota enabled specify quota size in kb + required: false + default: unlimited + usermaxobjects: + description: + - with user quota enabled specify maximum number of objects + required: false + default: unlimited + bucketquota: + description: + - enable/disable bucket quota for new user + required: false + default: false + bucketmaxsize: + description: + - with bucket quota enabled specify bucket size in kb + required: false + default: unlimited + bucketmaxobjects: + description: + - with bucket quota enabled specify maximum number of objects # noqa: E501 + required: false + default: unlimited + buckets: + description: + - list of buckets to be created containing sub options + required: false + sub_options: + bucket: + description: + - name for new bucket + required: true + user: + description: + - user new bucket will be linked too + required: true + + +requirements: ['radosgw', 'boto'] + +author: + - 'Daniel Pivonka' + +''' + +EXAMPLES = ''' +# single basic user +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + + +# single complex user +- name: single complex user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + suspend: true + autogenkey: true + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + +# multi user +- name: multi user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + suspend: true + autogenkey: true + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000K' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000K' + bucketmaxobjects: 3 + - username: 'test2' + fullname: 'tester' + +# single bucket +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + +# multi bucket +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + - bucket: 'heyimabucket2' + user: 'test2' + - bucket: 'heyimabucket3' + user: 'test2' + +# buckets and users +- name: single basic user + ceph_add_users_buckets: + rgw_host: '172.16.0.12' + port: 8080 + admin_access_key: 'N61I8625V4XTWGDTLBLL' + admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan@email.com' + maxbucket: 666 + - username: 'test2' + fullname: 'tester' + email: 'dan1@email.com' + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + buckets: + - bucket: 'heyimabucket1' + user: 'test1' + - bucket: 'heyimabucket2' + user: 'test2' + - bucket: 'heyimabucket3' + user: 'test2' + +''' + +RETURN = ''' +error_messages: + description: error for failed user or bucket. + returned: always + type: list + sample: [ + "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa: E501 + ] + +failed_users: + description: users that were not created. + returned: always + type: str + sample: "test2" + +added_users: + description: users that were created. + returned: always + type: str + sample: "test1" + +failed_buckets: + description: buckets that were not created. + returned: always + type: str + sample: "heyimabucket3" + +added_buckets: + description: buckets that were created. + returned: always + type: str + sample: "heyimabucket1, heyimabucket2" + +''' + + +def create_users(rgw, users, result): + + added_users = [] + failed_users = [] + + for user in users: + + # get info + username = user['username'] + fullname = user['fullname'] + email = user['email'] + maxbucket = user['maxbucket'] + suspend = user['suspend'] + autogenkey = user['autogenkey'] + accesskey = user['accesskey'] + secretkey = user['secretkey'] + userquota = user['userquota'] + usermaxsize = user['usermaxsize'] + usermaxobjects = user['usermaxobjects'] + bucketquota = user['bucketquota'] + bucketmaxsize = user['bucketmaxsize'] + bucketmaxobjects = user['bucketmaxobjects'] + + fail_flag = False + + # check if user exists + try: + user_info = rgw.get_user(uid=username) + except radosgw.exception.RadosGWAdminError: + # it doesnt exist + user_info = None + + # user exists can not create + if user_info: + result['error_messages'].append(username + ' UserExists') + failed_users.append(username) + else: + # user doesnt exist create it + if email: + if autogenkey: + try: + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501 + generate_key=autogenkey, + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + try: + rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501 + access_key=accesskey, secret_key=secretkey, # noqa: E501 + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + if autogenkey: + try: + rgw.create_user(username, fullname, key_type='s3', + generate_key=autogenkey, + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + else: + try: + rgw.create_user(username, fullname, key_type='s3', + access_key=accesskey, secret_key=secretkey, # noqa: E501 + max_buckets=maxbucket, suspended=suspend) # noqa: E501 + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if not fail_flag and userquota: + try: + rgw.set_quota(username, 'user', max_objects=usermaxobjects, + max_size_kb=usermaxsize, enabled=True) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if not fail_flag and bucketquota: + try: + rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa: E501 + max_size_kb=bucketmaxsize, enabled=True) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501 + fail_flag = True + + if fail_flag: + try: + rgw.delete_user(username) + except radosgw.exception.RadosGWAdminError: + pass + failed_users.append(username) + else: + added_users.append(username) + + result['added_users'] = ", ".join(added_users) + result['failed_users'] = ", ".join(failed_users) + + +def create_buckets(rgw, buckets, result): + + added_buckets = [] + failed_buckets = [] + + for bucket_info in buckets: + bucket = bucket_info['bucket'] + user = bucket_info['user'] + + # check if bucket exists + try: + bucket_info = rgw.get_bucket(bucket_name=bucket) + except TypeError: + # it doesnt exist + bucket_info = None + + # if it exists add to failed list + if bucket_info: + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' BucketExists') + else: + # bucket doesn't exist, so we need to create it + bucket_info = create_bucket(rgw, bucket) + if bucket_info: + # bucket created ok, link to user + + # check if user exists + try: + user_info = rgw.get_user(uid=user) + except radosgw.exception.RadosGWAdminError: + # it doesnt exist + user_info = None + + # user exists, link + if user_info: + try: + rgw.link_bucket(bucket_name=bucket, + bucket_id=bucket_info.id, + uid=user) + added_buckets.append(bucket) + except radosgw.exception.RadosGWAdminError as e: + result['error_messages'].append(bucket + e.get_code()) + try: + rgw.delete_bucket(bucket, purge_objects=True) + except radosgw.exception.RadosGWAdminError: + pass + failed_buckets.append(bucket) + + else: + # user doesnt exist cant be link delete bucket + try: + rgw.delete_bucket(bucket, purge_objects=True) + except radosgw.exception.RadosGWAdminError: + pass + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa: E501 + + else: + # something went wrong + failed_buckets.append(bucket) + result['error_messages'].append(bucket + ' could not be created') # noqa: E501 + + result['added_buckets'] = ", ".join(added_buckets) + result['failed_buckets'] = ", ".join(failed_buckets) + + +def create_bucket(rgw, bucket): + conn = boto.connect_s3(aws_access_key_id=rgw.provider._access_key, + aws_secret_access_key=rgw.provider._secret_key, + host=rgw._connection[0], + port=rgw.port, + is_secure=rgw.is_secure, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa: E501 + ) + + try: + conn.create_bucket(bucket_name=bucket) + bucket_info = rgw.get_bucket(bucket_name=bucket) + except boto.exception.S3ResponseError: + return None + else: + return bucket_info + + +def main(): + # arguments/parameters that a user can pass to the module + fields = dict(rgw_host=dict(type='str', required=True), + port=dict(type='int', required=True), + is_secure=dict(type='bool', + required=False, + default=False), + admin_access_key=dict(type='str', required=True), + admin_secret_key=dict(type='str', required=True), + buckets=dict(type='list', required=False, elements='dict', + options=dict(bucket=dict(type='str', required=True), # noqa: E501 + user=dict(type='str', required=True))), # noqa: E501 + users=dict(type='list', required=False, elements='dict', + options=dict(username=dict(type='str', required=True), # noqa: E501 + fullname=dict(type='str', required=True), # noqa: E501 + email=dict(type='str', required=False), # noqa: E501 + maxbucket=dict(type='int', required=False, default=1000), # noqa: E501 + suspend=dict(type='bool', required=False, default=False), # noqa: E501 + autogenkey=dict(type='bool', required=False, default=True), # noqa: E501 + accesskey=dict(type='str', required=False), # noqa: E501 + secretkey=dict(type='str', required=False), # noqa: E501 + userquota=dict(type='bool', required=False, default=False), # noqa: E501 + usermaxsize=dict(type='str', required=False, default='-1'), # noqa: E501 + usermaxobjects=dict(type='int', required=False, default=-1), # noqa: E501 + bucketquota=dict(type='bool', required=False, default=False), # noqa: E501 + bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa: E501 + bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa: E501 + + # the AnsibleModule object + module = AnsibleModule(argument_spec=fields, + supports_check_mode=False) + + # get vars + rgw_host = module.params.get('rgw_host') + port = module.params.get('port') + is_secure = module.params.get('is_secure') + admin_access_key = module.params.get('admin_access_key') + admin_secret_key = module.params.get('admin_secret_key') + users = module.params['users'] + buckets = module.params.get('buckets') + + # seed the result dict in the object + result = dict( + changed=False, + error_messages=[], + added_users='', + failed_users='', + added_buckets='', + failed_buckets='', + ) + + # radosgw connection + rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host, + port=port, + access_key=admin_access_key, # noqa: E501 + secret_key=admin_secret_key, # noqa: E501 + aws_signature='AWS4', + is_secure=is_secure) + + # test connection + connected = True + try: + rgw.get_usage() + except radosgw.exception.RadosGWAdminError as e: + connected = False + result['error_messages'] = e.get_code() + except socket_error as e: + connected = False + result['error_messages'] = str(e) + + if connected and users: + create_users(rgw, users, result) + + if connected and buckets: + create_buckets(rgw, buckets, result) + + if result['added_users'] != '' or result['added_buckets'] != '': + result['changed'] = True + + # conditional state caused a failure + if result['added_users'] == '' and result['added_buckets'] == '': + module.fail_json(msg='No users or buckets were added successfully', + **result) + + # EXIT + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_authtool.py b/library/ceph_authtool.py new file mode 100644 index 0000000..e469bcb --- /dev/null +++ b/library/ceph_authtool.py @@ -0,0 +1,131 @@ +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import container_exec, \ + is_containerized +except ImportError: + from module_utils.ca_common import container_exec, \ + is_containerized +import datetime +import os + + +class KeyringExists(Exception): + pass + + +def build_cmd(create_keyring=False, + gen_key=False, + add_key=False, + import_keyring=None, + caps={}, + name=None, + path=None, + container_image=None, + **a): + + auth_tool_binary: str = 'ceph-authtool' + + if container_image: + c = container_exec(auth_tool_binary, + container_image) + else: + c = [auth_tool_binary] + + if name: + c.extend(['-n', name]) + if create_keyring: + if os.path.exists(path): + raise KeyringExists + c.append('-C') + if gen_key: + c.append('-g') + if caps: + for k, v in caps.items(): + c.extend(['--cap'] + [k] + [v]) + + c.append(path) + + if import_keyring: + c.extend(['--import-keyring', import_keyring]) + + return c + + +def run_module(): + module_args = dict( + name=dict(type='str', required=False), + create_keyring=dict(type='bool', required=False, default=False), + gen_key=dict(type='bool', required=False, default=False), + add_key=dict(type='str', required=False, default=None), + import_keyring=dict(type='str', required=False, default=None), + caps=dict(type='dict', required=False, default=None), + path=dict(type='str', required=True) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + add_file_common_args=True, + ) + + cmd = [] + changed = False + + result = dict( + changed=changed, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode: + module.exit_json(**result) + + startd = datetime.datetime.now() + + # will return either the image name or None + container_image = is_containerized() + try: + cmd = build_cmd(**module.params, container_image=container_image) + except KeyringExists: + rc = 0 + out = f"{module.params['path']} already exists. Skipping" + err = "" + else: + rc, out, err = module.run_command(cmd) + if rc == 0: + changed = True + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + # file_args = module.load_file_common_arguments(module.params) + # module.set_fs_attributes_if_different(file_args, False) + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_config.py b/library/ceph_config.py new file mode 100644 index 0000000..c82c8cf --- /dev/null +++ b/library/ceph_config.py @@ -0,0 +1,208 @@ +# Copyright Red Hat +# SPDX-License-Identifier: Apache-2.0 +# Author: Guillaume Abrioux + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule # type: ignore +try: + from ansible.module_utils.ca_common import exit_module, generate_cmd, fatal, is_containerized # type: ignore +except ImportError: + from module_utils.ca_common import exit_module, generate_cmd, fatal, is_containerized # type: ignore + +import datetime +import json + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_config +short_description: set ceph config +version_added: "2.10" +description: + - Set Ceph config options. +options: + fsid: + description: + - the fsid of the Ceph cluster to interact with. + required: false + image: + description: + - The Ceph container image to use. + required: false + action: + description: + - whether to get or set the parameter specified in 'option' + required: false + default: 'set' + who: + description: + - which daemon the configuration should be set to + required: true + option: + description: + - name of the parameter to be set + required: true + value: + description: + - value of the parameter + required: true if action is 'set' + +author: + - Guillaume Abrioux +''' + +EXAMPLES = ''' +- name: set osd_memory_target for osd.0 + ceph_config: + action: set + who: osd.0 + option: osd_memory_target + value: 5368709120 + +- name: set osd_memory_target for host ceph-osd-02 + ceph_config: + action: set + who: osd/host:ceph-osd-02 + option: osd_memory_target + value: 5368709120 + +- name: get osd_pool_default_size value + ceph_config: + action: get + who: global + option: osd_pool_default_size + value: 1 +''' + +RETURN = '''# ''' + + +def set_option(module, + who, + option, + value, + container_image=None): + + args = [] + args.extend([who, option, value]) + + cmd = generate_cmd(sub_cmd=['config', 'set'], + args=args, + cluster=module.params.get('cluster'), + container_image=container_image) + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out.strip(), err + + +def rm_option(module, + who, + option, + container_image=None): + + args = [] + args.extend([who, option]) + + cmd = generate_cmd(sub_cmd=['config', 'rm'], + args=args, + cluster=module.params.get('cluster'), + container_image=container_image) + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out.strip(), err + + +def get_config_dump(module, container_image=None): + cmd = generate_cmd(sub_cmd=['config', 'dump', '--format', 'json'], + args=[], + cluster=module.params.get('cluster'), + container_image=container_image) + rc, out, err = module.run_command(cmd) + if rc: + fatal(message=f"Can't get current configuration via `ceph config dump`.Error:\n{err}", module=module) + out = out.strip() + return rc, cmd, out, err + + +def get_current_value(who, option, config_dump): + for config in config_dump: + if config['section'] == who and config['name'] == option: + return config['value'] + return None + + +def main() -> None: + module = AnsibleModule( + argument_spec=dict( + who=dict(type='str', required=True), + action=dict(type='str', required=False, choices=['get', 'set', 'rm'], default='set'), + option=dict(type='str', required=True), + value=dict(type='str', required=False), + fsid=dict(type='str', required=False), + image=dict(type='str', required=False), + cluster=dict(type='str', required=False, default='ceph') + ), + supports_check_mode=True, + required_if=[['action', 'set', ['value']]] + ) + + # Gather module parameters in variables + who = module.params.get('who') + option = module.params.get('option') + value = module.params.get('value') + action = module.params.get('action') + + container_image = is_containerized() + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + cmd=[], + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + rc, cmd, out, err = get_config_dump(module, container_image=container_image) + config_dump = json.loads(out) + current_value = get_current_value(who, option, config_dump) + + if action == 'set': + if current_value and value.lower() == current_value.lower(): + out = 'who={} option={} value={} already set. Skipping.'.format(who, option, value) + else: + rc, cmd, out, err = set_option(module, who, option, value, container_image=container_image) + changed = True + elif action == 'get': + if current_value is None: + out = '' + err = 'No value found for who={} option={}'.format(who, option) + else: + out = current_value + elif action == 'rm': + if current_value: + rc, cmd, out, err = rm_option(module, who, option, container_image=container_image) + changed = True + + exit_module(module=module, out=out, rc=rc, + cmd=cmd, err=err, startd=startd, + changed=changed) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_crush.py b/library/ceph_crush.py new file mode 100644 index 0000000..8b2e2a9 --- /dev/null +++ b/library/ceph_crush.py @@ -0,0 +1,245 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Red Hat, Inc. +# +# GNU General Public License v3.0+ + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_crush + +author: Sebastien Han + +short_description: Create Ceph CRUSH hierarchy + +version_added: "2.6" + +description: + - By using the hostvar variable 'osd_crush_location' + ceph_crush creates buckets and places them in the right CRUSH hierarchy + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + location: + description: + - osd_crush_location dict from the inventory file. It contains + the placement of each host in the CRUSH map. + required: true + containerized: + description: + - Weither or not this is a containerized cluster. The value is + assigned or not depending on how the playbook runs. + required: false + default: None +''' + +EXAMPLES = ''' +- name: configure crush hierarchy + ceph_crush: + cluster: "{{ cluster }}" + location: "{{ hostvars[item]['osd_crush_location'] }}" + containerized: "{{ container_exec_cmd }}" + with_items: "{{ groups[osd_group_name] }}" + when: crush_rule_config | bool +''' + +RETURN = '''# ''' + + +def generate_cmd(cluster, subcommand, bucket, bucket_type, containerized=None): + ''' + Generate command line to execute + ''' + cmd = [ + 'ceph', + '--cluster', + cluster, + 'osd', + 'crush', + subcommand, + bucket, + bucket_type, + ] + if containerized: + cmd = containerized.split() + cmd + return cmd + + +def sort_osd_crush_location(location, module): + ''' + Sort location tuple + ''' + if len(location) < 2: + fatal("You must specify at least 2 buckets.", module) + + if not any(item for item in location if item[0] == "host"): + fatal("You must specify a 'host' bucket.", module) + + try: + crush_bucket_types = [ + "host", + "chassis", + "rack", + "row", + "pdu", + "pod", + "room", + "datacenter", + "region", + "root", + ] + return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa: E501 + except ValueError as error: + fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa: E501 + + +def get_crush_tree(module, cluster, containerized=None): + ''' + Get the CRUSH map + ''' + cmd = [ + 'ceph', + '--cluster', + cluster, + 'osd', + 'crush', + 'tree', + '--format', + 'json', + ] + if containerized: + cmd = containerized.split() + cmd + + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def create_and_move_buckets_list(cluster, location, crush_map, containerized=None): # noqa: E501 + ''' + Creates Ceph CRUSH buckets and arrange the hierarchy + ''' + def bucket_exists(bucket_name, bucket_type): + for item in crush_map['nodes']: + if item['name'] == bucket_name and item['type'] == bucket_type: + return True + return False + + def bucket_in_place(bucket_name, target_bucket_name, target_bucket_type): # noqa: E501 + bucket_id = None + target_bucket = None + for item in crush_map['nodes']: + if item['name'] == bucket_name: + bucket_id = item['id'] + if item['name'] == target_bucket_name and item['type'] == target_bucket_type: # noqa: E501 + target_bucket = item + + if not bucket_id or not target_bucket: + return False + + return bucket_id in target_bucket['children'] + + previous_bucket = None + cmd_list = [] + for item in location: + bucket_type, bucket_name = item + # ceph osd crush add-bucket maroot root + if not bucket_exists(bucket_name, bucket_type): + cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa: E501 + if previous_bucket: + # ceph osd crush move monrack root=maroot + if not bucket_in_place(previous_bucket, bucket_name, bucket_type): # noqa: E501 + cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa: E501 + previous_bucket = item[1] + return cmd_list + + +def exec_commands(module, cmd_list): + ''' + Creates Ceph commands + ''' + for cmd in cmd_list: + rc, out, err = module.run_command(cmd) + return rc, cmd, out, err + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + location=dict(type='dict', required=True), + containerized=dict(type='str', required=False, default=None), + ), + supports_check_mode=True, + ) + + cluster = module.params['cluster'] + location_dict = module.params['location'] + location = sort_osd_crush_location(tuple(location_dict.items()), module) + containerized = module.params['containerized'] + + diff = dict(before="", after="") + startd = datetime.datetime.now() + + # get the CRUSH map + rc, cmd, out, err = get_crush_tree(module, cluster, containerized) + if rc != 0 and not module.check_mode: + module.fail_json(msg='non-zero return code', rc=rc, stdout=out, stderr=err) # noqa: E501 + + # parse the JSON output + if rc == 0: + crush_map = module.from_json(out) + else: + crush_map = {"nodes": []} + + # run the Ceph command to add buckets + cmd_list = create_and_move_buckets_list(cluster, location, crush_map, containerized) # noqa: E501 + + changed = len(cmd_list) > 0 + if changed: + diff['after'] = module.jsonify(cmd_list) + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, cmd_list) # noqa: E501 + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + diff=diff + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_crush_rule.py b/library/ceph_crush_rule.py new file mode 100644 index 0000000..c55eb5b --- /dev/null +++ b/library/ceph_crush_rule.py @@ -0,0 +1,256 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_crush_rule +short_description: Manage Ceph Crush Replicated/Erasure Rule +version_added: "2.8" +description: + - Manage Ceph Crush rule(s) creation, deletion and updates. +options: + name: + description: + - name of the Ceph Crush rule. If state is 'info' - empty string + can be provided as a value to get all crush rules + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + If 'present' is used, the module creates a rule if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the rule. + required: false + choices: ['present', 'absent'] + default: present + rule_type: + description: + - The ceph CRUSH rule type. + required: false + choices: ['replicated', 'erasure'] + required: false + bucket_root: + description: + - The ceph bucket root for replicated rule. + required: false + bucket_type: + description: + - The ceph bucket type for replicated rule. + required: false + choices: ['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', + 'room', 'datacenter', 'zone', 'region', 'root'] + device_class: + description: + - The ceph device class for replicated rule. + required: false + profile: + description: + - The ceph erasure profile for erasure rule. + required: false +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph Crush replicated rule + ceph_crush_rule: + name: foo + bucket_root: default + bucket_type: host + device_class: ssd + rule_type: replicated + +- name: create a Ceph Crush erasure rule + ceph_crush_rule: + name: foo + profile: bar + rule_type: erasure + +- name: get a Ceph Crush rule information + ceph_crush_rule: + name: foo + state: info + +- name: delete a Ceph Crush rule + ceph_crush_rule: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_rule(module, container_image=None): + ''' + Create a new crush replicated/erasure rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + rule_type = module.params.get('rule_type') + bucket_root = module.params.get('bucket_root') + bucket_type = module.params.get('bucket_type') + device_class = module.params.get('device_class') + profile = module.params.get('profile') + + if rule_type == 'replicated': + args = ['create-replicated', name, bucket_root, bucket_type] + if device_class: + args.append(device_class) + else: + args = ['create-erasure', name] + if profile: + args.append(profile) + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def get_rule(module, container_image=None): + ''' + Get existing crush rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['dump', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_rule(module, container_image=None): + ''' + Remove a crush rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['rm', name] + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=False), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, choices=['present', 'absent'], default='present'), # noqa: E501 + rule_type=dict(type='str', required=False, choices=['replicated', 'erasure']), # noqa: E501 + bucket_root=dict(type='str', required=False), + bucket_type=dict(type='str', required=False, choices=['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', # noqa: E501 + 'room', 'datacenter', 'zone', 'region', 'root']), # noqa: E501 + device_class=dict(type='str', required=False), + profile=dict(type='str', required=False) + ), + supports_check_mode=True, + required_if=[ + ('state', 'present', ['rule_type']), + ('state', 'present', ['name']), + ('state', 'absent', ['name']), + ('rule_type', 'replicated', ['bucket_root', 'bucket_type']), + ('rule_type', 'erasure', ['profile']) + ] + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + rule_type = module.params.get('rule_type') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc != 0: + rc, cmd, out, err = exec_command(module, create_rule(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rule = json.loads(out) + if (rule['type'] == 1 and rule_type == 'erasure') or (rule['type'] == 3 and rule_type == 'replicated'): # noqa: E501 + module.fail_json(msg="Can not convert crush rule {} to {}".format(name, rule_type), changed=False, rc=1) # noqa: E501 + elif state == "absent": + if rc == 0: + rc, cmd, out, err = exec_command(module, remove_rule(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Crush Rule {} doesn't exist".format(name) + else: + pass + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +if __name__ == '__main__': + main() diff --git a/library/ceph_crush_rule_info.py b/library/ceph_crush_rule_info.py new file mode 100644 index 0000000..78e1d6c --- /dev/null +++ b/library/ceph_crush_rule_info.py @@ -0,0 +1,119 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized, \ + exec_command +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_crush_rule_info +short_description: Lists Ceph Crush Replicated/Erasure Rules +version_added: "2.8" +description: + - Retrieces Ceph Crush rule(s). +options: + name: + description: + - name of the Ceph Crush rule. If state is 'info' - empty string + can be provided as a value to get all crush rules + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph +author: + - Teoman ONAY <@asm0deuz> +''' + +EXAMPLES = ''' +- name: get a Ceph Crush rule information + ceph_crush_rule_info: + name: foo +''' + +RETURN = '''# ''' + + +def get_rule(module, container_image=None): + ''' + Get existing crush rule + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['dump', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['osd', 'crush', 'rule'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=False), + cluster=dict(type='str', required=False, default='ceph'), + ), + supports_check_mode=True, + ) + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +if __name__ == '__main__': + main() diff --git a/library/ceph_dashboard_user.py b/library/ceph_dashboard_user.py new file mode 100644 index 0000000..89fdaff --- /dev/null +++ b/library/ceph_dashboard_user.py @@ -0,0 +1,289 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module, \ + fatal +except ImportError: + from module_utils.ca_common import generate_cmd, is_containerized, exec_command, exit_module, fatal # noqa: E501 + +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_dashboard_user + +short_description: Manage Ceph Dashboard User + +version_added: "2.8" + +description: + - Manage Ceph Dashboard user(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph Dashboard user. + required: true + state: + description: + If 'present' is used, the module creates a user if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the user. + If 'info' is used, the module will return all details about the + existing user (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + password: + description: + - password of the Ceph Dashboard user. + required: false + roles: + description: + - roles of the Ceph Dashboard user. + required: false + default: [] + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph Dashboard user + ceph_dashboard_user: + name: foo + password: bar + +- name: create a read-only/block-manager Ceph Dashboard user + ceph_dashboard_user: + name: foo + password: bar + roles: + - 'read-only' + - 'block-manager' + +- name: create a Ceph Dashboard admin user + ceph_dashboard_user: + name: foo + password: bar + roles: ['administrator'] + +- name: get a Ceph Dashboard user information + ceph_dashboard_user: + name: foo + state: info + +- name: delete a Ceph Dashboard user + ceph_dashboard_user: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_user(module, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-create', '-i', '-', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image, + interactive=True) + + return cmd + + +def set_roles(module, container_image=None): + ''' + Set user roles + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + roles = module.params.get('roles') + + args = ['ac-user-set-roles', name] + + args.extend(roles) + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def set_password(module, container_image=None): + ''' + Set user password + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-set-password', '-i', '-', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image, + interactive=True) + + return cmd + + +def get_user(module, container_image=None): + ''' + Get existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-show', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_user(module, container_image=None): + ''' + Remove a user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['ac-user-delete', name] + + cmd = generate_cmd(sub_cmd=['dashboard'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + password=dict(type='str', required=False, no_log=True), + roles=dict(type='list', + required=False, + choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], # noqa: E501 + default=[]), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['password']]] + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + roles = module.params.get('roles') + password = module.params.get('password') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + if rc == 0: + user = json.loads(out) + user['roles'].sort() + roles.sort() + if user['roles'] != roles: + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501 + changed = True + rc, cmd, out, err = exec_command(module, set_password(module, container_image=container_image), stdin=password) # noqa: E501 + else: + rc, cmd, out, err = exec_command(module, create_user(module, container_image=container_image), stdin=password) # noqa: E501 + if rc != 0: + fatal(err, module) + rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + if rc == 0: + rc, cmd, out, err = exec_command(module, remove_user(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Dashboard User {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_ec_profile.py b/library/ceph_ec_profile.py new file mode 100644 index 0000000..778a7a7 --- /dev/null +++ b/library/ceph_ec_profile.py @@ -0,0 +1,256 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, \ + generate_cmd, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import is_containerized, \ + generate_cmd, \ + exec_command, \ + exit_module +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_ec_profile + +short_description: Manage Ceph Erasure Code profile + +version_added: "2.8" + +description: + - Manage Ceph Erasure Code profile +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the profile. + required: true + state: + description: + If 'present' is used, the module creates a profile. + If 'absent' is used, the module will delete the profile. + required: false + choices: ['present', 'absent', 'info'] + default: present + stripe_unit: + description: + - The amount of data in a data chunk, per stripe. + required: false + k: + description: + - Number of data-chunks the object will be split in + required: true + m: + description: + - Compute coding chunks for each object and store them on different + OSDs. + required: true + crush_device_class: + description: + - Restrict placement to devices of a specific class (hdd/ssd) + required: false + +author: + - Guillaume Abrioux +''' + +EXAMPLES = ''' +- name: create an erasure code profile + ceph_ec_profile: + name: foo + k: 4 + m: 2 + +- name: delete an erassure code profile + ceph_ec_profile: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def get_profile(name, cluster='ceph', container_image=None): + ''' + Get existing profile + ''' + + args = ['get', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def create_profile(name, user_profile, force, cluster='ceph', container_image=None): # noqa: E501 + ''' + Create a profile + ''' + + args = ['set', name] + for key, value in user_profile.items(): + args.append('{}={}'.format(key, value)) + if force: + args.append('--force') + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def delete_profile(name, cluster='ceph', container_image=None): + ''' + Delete a profile + ''' + + args = ['rm', name] + + cmd = generate_cmd(sub_cmd=['osd', 'erasure-code-profile'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def parse_user_profile(module): + profile_keys = ['plugin', + 'k', 'm', 'd', 'l', 'c', + 'stripe_unit', 'scalar_mds', 'technique', + 'crush-root', 'crush-device-class', 'crush-failure-domain'] + + profile = {} + for key in profile_keys: + ansible_lookup_key = key.replace('-', '_') + value = module.params.get(ansible_lookup_key) + if value: + profile[key] = value + + return profile + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, + choices=['present', 'absent'], default='present'), + stripe_unit=dict(type='str', required=False), + plugin=dict(type='str', required=False, default='jerasure'), + k=dict(type='str', required=False), + m=dict(type='str', required=False), + d=dict(type='str', required=False), + l=dict(type='str', required=False), + c=dict(type='str', required=False), + scalar_mds=dict(type='str', required=False), + technique=dict(type='str', required=False), + crush_root=dict(type='str', required=False), + crush_failure_domain=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + force=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['k', 'm']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + force = module.params.get('force') + user_profile = parse_user_profile(module) + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + diff = dict(before="", after="") + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_profile(name, cluster, container_image=container_image)) # noqa: E501 + current_profile = {} + if rc == 0: + current_profile = json.loads(out) + + changed = current_profile != user_profile + if changed: + diff['before'] = json.dumps(current_profile) + diff['after'] = json.dumps(user_profile) + rc, cmd, out, err = exec_command(module, + create_profile(name, + user_profile, + force, + cluster, + container_image=container_image), # noqa: E501 + check_rc=True) + + elif state == "absent": + rc, cmd, out, err = exec_command(module, delete_profile(name, cluster, container_image=container_image)) # noqa: E501 + if not err: + out = 'Profile {} removed.'.format(name) + changed = True + else: + rc = 0 + out = "Skipping, the profile {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed, diff=diff) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_fs.py b/library/ceph_fs.py new file mode 100644 index 0000000..10db2b1 --- /dev/null +++ b/library/ceph_fs.py @@ -0,0 +1,278 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_cmd, \ + exit_module +except ImportError: + from module_utils.ca_common import is_containerized, \ + exec_command, \ + generate_cmd, \ + exit_module + +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_fs + +short_description: Manage Ceph File System + +version_added: "2.8" + +description: + - Manage Ceph File System(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph File System. + required: true + state: + description: + If 'present' is used, the module creates a filesystem if it + doesn't exist or update it if it already exists. + If 'absent' is used, the module will simply delete the filesystem. + If 'info' is used, the module will return all details about the + existing filesystem (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + data: + description: + - name of the data pool. + required: false + metadata: + description: + - name of the metadata pool. + required: false + max_mds: + description: + - name of the max_mds attribute. + required: false + + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a Ceph File System + ceph_fs: + name: foo + data: bar_data + metadata: bar_metadata + max_mds: 2 + +- name: get a Ceph File System information + ceph_fs: + name: foo + state: info + +- name: delete a Ceph File System + ceph_fs: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def create_fs(module, container_image=None): + ''' + Create a new fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + data = module.params.get('data') + metadata = module.params.get('metadata') + + args = ['new', name, metadata, data] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def get_fs(module, container_image=None): + ''' + Get existing fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['get', name, '--format=json'] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def remove_fs(module, container_image=None): + ''' + Remove a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['rm', name, '--yes-i-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def fail_fs(module, container_image=None): + ''' + Fail a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['fail', name] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def set_fs(module, container_image=None): + ''' + Set parameter to a fs + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + max_mds = module.params.get('max_mds') + + args = ['set', name, 'max_mds', str(max_mds)] + + cmd = generate_cmd(sub_cmd=['fs'], + args=args, + cluster=cluster, + container_image=container_image) + + return cmd + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + data=dict(type='str', required=False), + metadata=dict(type='str', required=False), + max_mds=dict(type='int', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'present', ['data', 'metadata']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + max_mds = module.params.get('max_mds') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + fs = json.loads(out) + if max_mds and fs["mdsmap"]["max_mds"] != max_mds: + rc, cmd, out, err = exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + else: + rc, cmd, out, err = exec_command(module, create_fs(module, container_image=container_image)) # noqa: E501 + if max_mds and max_mds > 1: + exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + exec_command(module, fail_fs(module, container_image=container_image)) # noqa: E501 + rc, cmd, out, err = exec_command(module, remove_fs(module, container_image=container_image)) # noqa: E501 + if rc == 0: + changed = True + else: + rc = 0 + out = "Ceph File System {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_key.py b/library/ceph_key.py new file mode 100644 index 0000000..e047615 --- /dev/null +++ b/library/ceph_key.py @@ -0,0 +1,683 @@ +# Copyright 2018, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + is_containerized, \ + container_exec, \ + fatal +except ImportError: + from module_utils.ca_common import generate_cmd, \ + is_containerized, \ + container_exec, \ + fatal +import datetime +import json +import os +import struct +import time +import base64 +import socket + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_key + +author: Sebastien Han + +short_description: Manage Cephx key(s) + +version_added: "2.6" + +description: + - Manage CephX creation, deletion and updates. + It can also list and get information about keyring(s). +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the CephX key + required: true + user: + description: + - entity used to perform operation. + It corresponds to the -n option (--name) + required: false + user_key: + description: + - the path to the keyring corresponding to the + user being used. + It corresponds to the -k option (--keyring) + state: + description: + - If 'present' is used, the module creates a keyring + with the associated capabilities. + If 'present' is used and a secret is provided the module + will always add the key. Which means it will update + the keyring if the secret changes, the same goes for + the capabilities. + If 'absent' is used, the module will simply delete the keyring. + If 'list' is used, the module will list all the keys and will + return a json output. + If 'generate_secret' is used, the module will simply output a cephx keyring. + required: false + choices: ['present', 'update', 'absent', 'fetch_initial_keys', 'generate_secret'] + default: present + caps: + description: + - CephX key capabilities + default: None + required: false + secret: + description: + - keyring's secret value + required: false + default: None + import_key: + description: + - Wether or not to import the created keyring into Ceph. + This can be useful for someone that only wants to generate keyrings + but not add them into Ceph. + required: false + default: True + dest: + description: + - Destination to write the keyring, can a file or a directory + required: false + default: /etc/ceph/ + fetch_initial_keys: + description: + - Fetch client.admin and bootstrap key. + This is only needed for Nautilus and above. + Writes down to the filesystem the initial keys generated by the monitor. # noqa: E501 + This command can ONLY run from a monitor node. + required: false + default: false + output_format: + description: + - The key output format when retrieving the information of an + entity. + required: false + default: json +''' + +EXAMPLES = ''' + +keys_to_create: + - { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa: E501 + - { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } # noqa: E501 + +caps: + mon: "allow rwx" + mds: "allow *" + +- name: create ceph admin key + ceph_key: + name: client.admin + state: present + secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw== + caps: + mon: allow * + osd: allow * + mgr: allow * + mds: allow + mode: 0400 + import_key: False + +- name: create monitor initial keyring + ceph_key: + name: mon. + state: present + secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q== + caps: + mon: allow * + dest: "/var/lib/ceph/tmp/" + import_key: False + +- name: create cephx key + ceph_key: + name: "{{ keys_to_create }}" + user: client.bootstrap-rgw + user_key: /var/lib/ceph/bootstrap-rgw/ceph.keyring + state: present + caps: "{{ caps }}" + +- name: create cephx key but don't import it in Ceph + ceph_key: + name: "{{ keys_to_create }}" + state: present + caps: "{{ caps }}" + import_key: False + +- name: delete cephx key + ceph_key: + name: "my_key" + state: absent + +- name: fetch cephx keys + ceph_key: + state: fetch_initial_keys +''' + +RETURN = '''# ''' + + +CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501 + 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501 + + +def str_to_bool(val): + try: + val = val.lower() + except AttributeError: + val = str(val).lower() + if val == 'true': + return True + elif val == 'false': + return False + else: + raise ValueError("Invalid input value: %s" % val) + + +def generate_secret(): + ''' + Generate a CephX secret + ''' + + key = os.urandom(16) + header = struct.pack(' + +short_description: Manage Cephx key(s) + +version_added: "2.6" + +description: + - Manage CephX creation, deletion and updates. + It can also list and get information about keyring(s). +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the CephX key + required: true + user: + description: + - entity used to perform operation. + It corresponds to the -n option (--name) + required: false + user_key: + description: + - the path to the keyring corresponding to the + user being used. + It corresponds to the -k option (--keyring) + state: + description: + If 'list' is used, the module will list all the keys and will + return a json output. + If 'info' is used, the module will return in a json format the + description of a given keyring. + required: false + choices: ['list', 'info'] + default: list + output_format: + description: + - The key output format when retrieving the information of an + entity. + required: false + default: json +''' + +EXAMPLES = ''' + +- name: info cephx key + ceph_key: + name: "my_key"" + state: info + +- name: info cephx admin key (plain) + ceph_key: + name: client.admin + output_format: plain + state: info + register: client_admin_key + +- name: list cephx keys + ceph_key: + state: list +''' + +RETURN = '''# ''' + + +CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501 + 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501 + + +def info_key(cluster, name, user, user_key, output_format, container_image=None): # noqa: E501 + ''' + Get information about a CephX key + ''' + + cmd_list = [] + + args = [ + 'get', + name, + '-f', + output_format, + ] + + cmd_list.append(generate_cmd(sub_cmd=['auth'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image)) + + return cmd_list + + +def list_keys(cluster, user, user_key, container_image=None): + ''' + List all CephX keys + ''' + + cmd_list = [] + + args = [ + 'ls', + '-f', + 'json', + ] + + cmd_list.append(generate_cmd(sub_cmd=['auth'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image)) + + return cmd_list + + +def exec_commands(module, cmd_list): + ''' + Execute command(s) + ''' + + for cmd in cmd_list: + rc, out, err = module.run_command(cmd) + if rc != 0: + return rc, cmd, out, err + + return rc, cmd, out, err + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=False), + state=dict(type='str', required=False, default='info', choices=['list', 'info']), # noqa: E501 + user=dict(type='str', required=False, default='client.admin'), + user_key=dict(type='str', required=False, default=None), + output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml']) # noqa: E501 + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + add_file_common_args=True, + ) + + # Gather module parameters in variables + state = module.params['state'] + name = module.params.get('name') + cluster = module.params.get('cluster') + user = module.params.get('user') + user_key = module.params.get('user_key') + output_format = module.params.get('output_format') + + # Can't use required_if with 'name' for some reason... + if state in ['info'] and not name: + fatal(f'"state" is "{state}" but "name" is not defined.', module) + + changed = False + cmd = '' + rc = 0 + out = '' + err = '' + + result = dict( + changed=changed, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode: + module.exit_json(**result) + + startd = datetime.datetime.now() + + # will return either the image name or None + container_image = is_containerized() + + if not user_key: + user_key_filename = '{}.{}.keyring'.format(cluster, user) + user_key_dir = '/etc/ceph' + user_key_path = os.path.join(user_key_dir, user_key_filename) + else: + user_key_path = user_key + + if state == "info": + rc, cmd, out, err = exec_commands( + module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501 + + elif state == "list": + rc, cmd, out, err = exec_commands( + module, list_keys(cluster, user, user_key_path, container_image)) + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_mgr_module.py b/library/ceph_mgr_module.py new file mode 100644 index 0000000..0c2d177 --- /dev/null +++ b/library/ceph_mgr_module.py @@ -0,0 +1,133 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_mgr_module +short_description: Manage Ceph MGR module +version_added: "2.8" +description: + - Manage Ceph MGR module +options: + name: + description: + - name of the ceph MGR module. + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + - If 'enable' is used, the module enables the MGR module. + If 'absent' is used, the module disables the MGR module. + required: false + choices: ['enable', 'disable'] + default: enable +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: enable dashboard mgr module + ceph_mgr_module: + name: dashboard + state: enable + +- name: disable multiple mgr modules + ceph_mgr_module: + name: '{{ item }}' + state: disable + loop: + - 'dashboard' + - 'prometheus' +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, default='enable', choices=['enable', 'disable']), # noqa: E501 + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + + startd = datetime.datetime.now() + + container_image = is_containerized() + + cmd = generate_cmd(sub_cmd=['mgr', 'module'], + args=[state, name], + cluster=cluster, + container_image=container_image) + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + if 'is already enabled' in err: + changed = False + else: + changed = True + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed + ) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_orch_apply.py b/library/ceph_orch_apply.py new file mode 100644 index 0000000..d5b1b32 --- /dev/null +++ b/library/ceph_orch_apply.py @@ -0,0 +1,202 @@ +# Copyright Red Hat +# SPDX-License-Identifier: Apache-2.0 +# Author: Guillaume Abrioux +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +from typing import List, Tuple, Dict +__metaclass__ = type + +import datetime +import yaml + +from ansible.module_utils.basic import AnsibleModule # type: ignore +try: + from ansible.module_utils.ca_common import exit_module, build_base_cmd # type: ignore +except ImportError: + from module_utils.ca_common import exit_module, build_base_cmd + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_orch_apply +short_description: apply service spec +version_added: "2.9" +description: + - apply a service spec +options: + fsid: + description: + - the fsid of the Ceph cluster to interact with. + required: false + image: + description: + - The Ceph container image to use. + required: false + cluster: + description: + - The Ceph cluster name. Defaults to ceph + required: false + spec: + description: + - The service spec to apply + required: true +author: + - Guillaume Abrioux +''' + +EXAMPLES = ''' +- name: apply osd spec + ceph_orch_apply: + spec: | + service_type: osd + service_id: osd + placement: + label: osds + spec: + data_devices: + all: true +''' + + +def parse_spec(spec: str) -> Dict: + """ parse spec string to yaml """ + yaml_spec = yaml.safe_load(spec) + return yaml_spec + + +def retrieve_current_spec(module: AnsibleModule, expected_spec: Dict) -> Dict: + """ retrieve current config of the service """ + service: str = expected_spec["service_type"] + cmd = build_base_cmd(module) + cluster = module.params.get('cluster') + if cluster != 'ceph': + conf_path = f"/etc/ceph/{cluster}.conf" + keyring_path = f"/etc/ceph/{cluster}.client.admin.keyring" + cmd.extend(['--config', conf_path, '--keyring', keyring_path]) + cmd.extend(['ceph', 'orch', 'ls', service]) + if 'service_name' in expected_spec: + cmd.extend([expected_spec["service_name"]]) + else: + cmd.extend([expected_spec["service_type"] + "." + expected_spec["service_id"]]) + cmd.extend(['--format=yaml']) + out = module.run_command(cmd) + if isinstance(out, str): + # if there is no existing service, cephadm returns the string 'No services reported' + return {} + else: + return yaml.safe_load(out[1]) + + +def apply_spec(module: "AnsibleModule", + data: str) -> Tuple[int, List[str], str, str]: + cmd = build_base_cmd(module) + cluster = module.params.get('cluster') + if cluster != 'ceph': + conf_path = f"/etc/ceph/{cluster}.conf" + keyring_path = f"/etc/ceph/{cluster}.client.admin.keyring" + cmd.extend(['--config', conf_path, '--keyring', keyring_path]) + cmd.extend(['ceph', 'orch', 'apply', '-i', '-']) + rc, out, err = module.run_command(cmd, data=data) + + if rc: + raise RuntimeError(err) + + return rc, cmd, out, err + + +def change_required(current: Dict, expected: Dict) -> bool: + """ checks if the current config differs from what is expected """ + if not current: + return True + + for key, value in expected.items(): + if key in current: + if current[key] != value: + return True + continue + else: + return True + return False + + +def run_module() -> None: + + module_args = dict( + spec=dict(type='str', required=True), + fsid=dict(type='str', required=False), + docker=dict(type=bool, + required=False, + default=False), + image=dict(type='str', required=False), + cluster=dict(type='str', required=False, + default='ceph') + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + startd = datetime.datetime.now() + spec = module.params.get('spec') + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=[], + err='', + startd=startd, + changed=False + ) + + # Idempotency check + expected = parse_spec(module.params.get('spec')) + current_spec = retrieve_current_spec(module, expected) + + if change_required(current_spec, expected): + rc, cmd, out, err = apply_spec(module, spec) + changed = True + else: + rc = 0 + cmd = [] + out = '' + err = '' + changed = False + + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed + ) + + +def main() -> None: + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_osd.py b/library/ceph_osd.py new file mode 100644 index 0000000..0eadc93 --- /dev/null +++ b/library/ceph_osd.py @@ -0,0 +1,146 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, generate_cmd, is_containerized # noqa: E501 +except ImportError: + from module_utils.ca_common import exit_module, generate_cmd, is_containerized # noqa: E501 +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_osd +short_description: Manage Ceph OSD state +version_added: "2.8" +description: + - Manage Ceph OSD state +options: + ids: + description: + - The ceph OSD id(s). + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + - The ceph OSD state. + required: true + choices: ['destroy', 'down', 'in', 'out', 'purge', 'rm'] +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: destroy OSD 42 + ceph_osd: + ids: 42 + state: destroy + +- name: set multiple OSDs down + ceph_osd: + ids: [0, 1, 3] + state: down + +- name: set OSD 42 in + ceph_osd: + ids: 42 + state: in + +- name: set OSD 42 out + ceph_osd: + ids: 42 + state: out + +- name: purge OSD 42 + ceph_osd: + ids: 42 + state: purge + +- name: rm OSD 42 + ceph_osd: + ids: 42 + state: rm +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + ids=dict(type='list', required=True), + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=True, choices=['destroy', 'down', 'in', 'out', 'purge', 'rm']), # noqa: E501 + ), + supports_check_mode=True, + ) + + ids = module.params.get('ids') + cluster = module.params.get('cluster') + state = module.params.get('state') + + if state in ['destroy', 'purge'] and len(ids) > 1: + module.fail_json(msg='destroy and purge only support one OSD at at time', rc=1) # noqa: E501 + + startd = datetime.datetime.now() + + container_image = is_containerized() + + cmd = generate_cmd(sub_cmd=['osd', state], args=ids, cluster=cluster, container_image=container_image) # noqa: E501 + + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + changed = True + if state in ['down', 'in', 'out'] and 'marked' not in err: + changed = False + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed + ) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_osd_flag.py b/library/ceph_osd_flag.py new file mode 100644 index 0000000..16c4aec --- /dev/null +++ b/library/ceph_osd_flag.py @@ -0,0 +1,130 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +except ImportError: + from module_utils.ca_common import exit_module, \ + generate_cmd, \ + is_containerized +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_osd_flag +short_description: Manage Ceph OSD flag +version_added: "2.8" +description: + - Manage Ceph OSD flag +options: + name: + description: + - name of the ceph OSD flag. + required: true + choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', + 'norecover', 'noscrub', 'nodeep-scrub'] + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + state: + description: + - If 'present' is used, the module sets the OSD flag. + If 'absent' is used, the module will unset the OSD flag. + required: false + choices: ['present', 'absent'] + default: present +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: set noup OSD flag + ceph_osd_flag: + name: noup + +- name: unset multiple OSD flags + ceph_osd_flag: + name: '{{ item }}' + state: absent + loop: + - 'noup' + - 'norebalance' +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub', 'noautoscale']), # noqa: E501 + cluster=dict(type='str', required=False, default='ceph'), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501 + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + state = module.params.get('state') + + startd = datetime.datetime.now() + + container_image = is_containerized() + + if state == 'present': + cmd = generate_cmd(sub_cmd=['osd', 'set'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 + else: + cmd = generate_cmd(sub_cmd=['osd', 'unset'], args=[name], cluster=cluster, container_image=container_image) # noqa: E501 + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_pool.py b/library/ceph_pool.py new file mode 100644 index 0000000..651f2fc --- /dev/null +++ b/library/ceph_pool.py @@ -0,0 +1,739 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import generate_cmd, \ + pre_generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module +except ImportError: + from module_utils.ca_common import generate_cmd, \ + pre_generate_cmd, \ + is_containerized, \ + exec_command, \ + exit_module + + +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_pool + +author: Guillaume Abrioux + +short_description: Manage Ceph Pools + +version_added: "2.8" + +description: + - Manage Ceph pool(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the Ceph pool + required: true + state: + description: + If 'present' is used, the module creates a pool if it doesn't exist + or update it if it already exists. + If 'absent' is used, the module will simply delete the pool. + If 'list' is used, the module will return all details about the + existing pools. (json formatted). + required: false + choices: ['present', 'absent', 'list'] + default: present + size: + description: + - set the replica size of the pool. + required: false + default: 3 + min_size: + description: + - set the min_size parameter of the pool. + required: false + default: default to `osd_pool_default_min_size` (ceph) + pg_num: + description: + - set the pg_num of the pool. + required: false + default: default to `osd_pool_default_pg_num` (ceph) + pgp_num: + description: + - set the pgp_num of the pool. + required: false + default: default to `osd_pool_default_pgp_num` (ceph) + pg_autoscale_mode: + description: + - set the pg autoscaler on the pool. + required: false + default: 'on' + target_size_ratio: + description: + - set the target_size_ratio on the pool + required: false + default: None + pool_type: + description: + - set the pool type, either 'replicated' or 'erasure' + required: false + default: 'replicated' + erasure_profile: + description: + - When pool_type = 'erasure', set the erasure profile of the pool + required: false + default: 'default' + rule_name: + description: + - Set the crush rule name assigned to the pool + required: false + default: 'replicated_rule' when pool_type is 'erasure' else None + expected_num_objects: + description: + - Set the expected_num_objects parameter of the pool. + required: false + default: '0' + application: + description: + - Set the pool application on the pool. + required: false + default: None +''' + +EXAMPLES = ''' + +pools: + - { name: foo, size: 3, application: rbd, pool_type: 'replicated', + pg_autoscale_mode: 'on' } + +- hosts: all + become: true + tasks: + - name: create a pool + ceph_pool: + name: "{{ item.name }}" + state: present + size: "{{ item.size }}" + application: "{{ item.application }}" + pool_type: "{{ item.pool_type }}" + pg_autoscale_mode: "{{ item.pg_autoscale_mode }}" + with_items: "{{ pools }}" +''' + +RETURN = '''# ''' + + +def check_pool_exist(cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Check if a given pool exists + ''' + + args = ['stats', name, '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def generate_get_config_cmd(param, + cluster, + user, + user_key, + container_image=None): + _cmd = pre_generate_cmd('ceph', container_image=container_image) + args = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster, + 'config', + 'get', + 'mon.*', + param + ] + cmd = _cmd + args + return cmd + + +def get_application_pool(cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Get application type enabled on a given pool + ''' + + args = ['application', 'get', name, '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def get_crush_rule_pool(cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Get crush rule type on a given pool + ''' + + args = ['get', name, 'crush_rule', '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def enable_application_pool(cluster, + name, + application, + user, + user_key, + container_image=None): + ''' + Enable application on a given pool + ''' + + args = ['application', 'enable', name, application] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def init_rbd_pool(cluster, + name, + user, + user_key, + container_image=None): + ''' + Initialize a rbd pool + ''' + + args = [name] + + cmd = generate_cmd(cmd='rbd', + sub_cmd=['pool', 'init'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def disable_application_pool(cluster, + name, + application, + user, + user_key, + container_image=None): + ''' + Disable application on a given pool + ''' + + args = ['application', 'disable', name, + application, '--yes-i-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def get_pool_details(module, + cluster, + name, + user, + user_key, + output_format='json', + container_image=None): + ''' + Get details about a given pool + ''' + + args = ['ls', 'detail', '-f', output_format] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + rc, cmd, out, err = exec_command(module, cmd) + + if rc == 0: + out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0] + + _rc, _cmd, application_pool, _err = exec_command(module, + get_application_pool(cluster, # noqa: E501 + name, # noqa: E501 + user, # noqa: E501 + user_key, # noqa: E501 + container_image=container_image)) # noqa: E501 + _rc, _cmd, crush_rule, _err = exec_command(module, + get_crush_rule_pool(cluster, # noqa: E501 + name, # noqa: E501 + user, # noqa: E501 + user_key, # noqa: E501 + container_image=container_image)) # noqa: E501 + + # This is a trick because "target_size_ratio" isn't present at the same + # level in the dict + # ie: + # { + # 'pg_num': 8, + # 'pgp_num': 8, + # 'pg_autoscale_mode': 'on', + # 'options': { + # 'target_size_ratio': 0.1 + # } + # } + # If 'target_size_ratio' is present in 'options', we set it, this way we + # end up with a dict containing all needed keys at the same level. + if 'target_size_ratio' in out['options'].keys(): + out['target_size_ratio'] = out['options']['target_size_ratio'] + else: + out['target_size_ratio'] = None + + application = list(json.loads(application_pool.strip()).keys()) + + if len(application) == 0: + out['application'] = '' + else: + out['application'] = application[0] + + out['crush_rule'] = json.loads(crush_rule.strip())['crush_rule'] + + return rc, cmd, out, err + + +def compare_pool_config(user_pool_config, running_pool_details): + ''' + Compare user input config pool details with current running pool details + ''' + + delta = {} + filter_keys = ['pg_num', 'pg_placement_num', 'size', + 'pg_autoscale_mode', 'target_size_ratio', + 'crush_rule'] + for key in filter_keys: + if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501 + user_pool_config[key]['value']): + delta[key] = user_pool_config[key] + + if (running_pool_details['application'] != + user_pool_config['application']['value'] and + user_pool_config['application']['value']): + delta['application'] = {} + delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501 + # to be improved (for update_pools()...) + delta['application']['value'] = delta['application']['new_application'] + delta['application']['old_application'] = running_pool_details['application'] # noqa: E501 + + return delta + + +def list_pools(cluster, + user, + user_key, + details, + output_format='json', + container_image=None): + ''' + List existing pools + ''' + + args = ['ls'] + + if details: + args.append('detail') + + args.extend(['-f', output_format]) + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def create_pool(cluster, + user, + user_key, + user_pool_config, + container_image=None): + ''' + Create a new pool + ''' + + args = ['create', user_pool_config['pool_name']['value'], + user_pool_config['type']['value']] + + pg_autoscale_mode = user_pool_config['pg_autoscale_mode']['value'] + if pg_autoscale_mode in ['off', 'warn']: + args.extend(['--pg_num', + user_pool_config['pg_num']['value'], + '--pgp_num', + user_pool_config['pgp_num']['value'] or + user_pool_config['pg_num']['value']]) + + if pg_autoscale_mode in ['on', 'warn'] and user_pool_config['target_size_ratio']['value']: + args.extend(['--target_size_ratio', + user_pool_config['target_size_ratio']['value']]) + + if user_pool_config['type']['value'] == 'replicated': + args.extend([user_pool_config['crush_rule']['value'], + '--expected_num_objects', + user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + user_pool_config['pg_autoscale_mode']['value']]) + + if (user_pool_config['size']['value'] and + user_pool_config['type']['value'] == "replicated"): + args.extend(['--size', user_pool_config['size']['value']]) + + elif user_pool_config['type']['value'] == 'erasure': + args.extend([user_pool_config['erasure_profile']['value']]) + + if user_pool_config['crush_rule']['value']: + args.extend([user_pool_config['crush_rule']['value']]) + + args.extend(['--expected_num_objects', + user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + user_pool_config['pg_autoscale_mode']['value']]) + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def remove_pool(cluster, name, user, user_key, container_image=None): + ''' + Remove a pool + ''' + + args = ['rm', name, name, '--yes-i-really-really-mean-it'] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + return cmd + + +def update_pool(module, cluster, name, + user, user_key, delta, container_image=None): + ''' + Update an existing pool + ''' + + report = "" + + for key in delta.keys(): + if key != 'application': + args = ['set', + name, + delta[key]['cli_set_opt'], + delta[key]['value']] + + cmd = generate_cmd(sub_cmd=['osd', 'pool'], + args=args, + cluster=cluster, + user=user, + user_key=user_key, + container_image=container_image) + + rc, cmd, out, err = exec_command(module, cmd) + if rc != 0: + return rc, cmd, out, err + + else: + rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501 + if rc != 0: + return rc, cmd, out, err + + rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501 + if rc != 0: + return rc, cmd, out, err + + report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa: E501 + + out = report + return rc, cmd, out, err + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'list']), + details=dict(type='bool', required=False, default=False), + size=dict(type='str', required=False), + min_size=dict(type='str', required=False), + pg_num=dict(type='str', required=False), + pgp_num=dict(type='str', required=False), + pg_autoscale_mode=dict(type='str', required=False, default='on'), + target_size_ratio=dict(type='str', required=False, default=None), + pool_type=dict(type='str', required=False, default='replicated', + choices=['replicated', 'erasure', '1', '3']), + erasure_profile=dict(type='str', required=False, default='default'), + rule_name=dict(type='str', required=False, default=None), + expected_num_objects=dict(type='str', required=False, default="0"), + application=dict(type='str', required=False, default=None), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + # Gather module parameters in variables + cluster = module.params.get('cluster') + name = module.params.get('name') + state = module.params.get('state') + details = module.params.get('details') + size = module.params.get('size') + min_size = module.params.get('min_size') + pg_num = module.params.get('pg_num') + pgp_num = module.params.get('pgp_num') + pg_autoscale_mode = module.params.get('pg_autoscale_mode') + target_size_ratio = module.params.get('target_size_ratio') + application = module.params.get('application') + + if (module.params.get('pg_autoscale_mode').lower() in + ['true', 'on', 'yes']): + pg_autoscale_mode = 'on' + elif (module.params.get('pg_autoscale_mode').lower() in + ['false', 'off', 'no']): + pg_autoscale_mode = 'off' + else: + pg_autoscale_mode = 'warn' + + if module.params.get('pool_type') == '1': + pool_type = 'replicated' + elif module.params.get('pool_type') == '3': + pool_type = 'erasure' + else: + pool_type = module.params.get('pool_type') + + if not module.params.get('rule_name'): + rule_name = 'replicated_rule' if pool_type == 'replicated' else None + else: + rule_name = module.params.get('rule_name') + + erasure_profile = module.params.get('erasure_profile') + expected_num_objects = module.params.get('expected_num_objects') + user_pool_config = { + 'pool_name': {'value': name}, + 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'}, + 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'}, + 'pg_autoscale_mode': {'value': pg_autoscale_mode, + 'cli_set_opt': 'pg_autoscale_mode'}, + 'target_size_ratio': {'value': target_size_ratio, + 'cli_set_opt': 'target_size_ratio'}, + 'application': {'value': application}, + 'type': {'value': pool_type}, + 'erasure_profile': {'value': erasure_profile}, + 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'}, + 'expected_num_objects': {'value': expected_num_objects}, + 'size': {'value': size, 'cli_set_opt': 'size'}, + 'min_size': {'value': min_size} + } + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + user = "client.admin" + keyring_filename = cluster + '.' + user + '.keyring' + user_key = os.path.join("/etc/ceph/", keyring_filename) + + diff = dict(before="", after="") + + if state == "present": + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 + changed = rc != 0 + if not changed: + running_pool_details = get_pool_details(module, + cluster, + name, + user, + user_key, + container_image=container_image) # noqa: E501 + user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501 + delta = compare_pool_config(user_pool_config, + running_pool_details[2]) + if len(delta) > 0: + keys = list(delta.keys()) + details = running_pool_details[2] + if details['erasure_code_profile'] and 'size' in keys: + del delta['size'] + if details['pg_autoscale_mode'] == 'on': + delta.pop('pg_num', None) + delta.pop('pgp_num', None) + if not module.params.get('rule_name'): + delta.pop('crush_rule', None) + + for key in delta.keys(): + diff['before'] += "{}: {}\n".format(key, details[key]) + diff['after'] += "{}: {}\n".format(key, delta[key]['value']) + + changed = len(delta) > 0 + if changed and not module.check_mode: + rc, cmd, out, err = update_pool(module, + cluster, + name, + user, + user_key, + delta, + container_image=container_image) # noqa: E501 + elif not module.check_mode: + rc, cmd, out, err = exec_command(module, + create_pool(cluster, + user, + user_key, + user_pool_config=user_pool_config, # noqa: E501 + container_image=container_image)) # noqa: E501 + if user_pool_config['application']['value']: + rc, _, _, _ = exec_command(module, + enable_application_pool(cluster, + name, + user_pool_config['application']['value'], # noqa: E501 + user, + user_key, + container_image=container_image)) # noqa: E501 + if rc == 0 and user_pool_config['application']['value'] == 'rbd': # noqa: E501 + rc, cmd, out, err = exec_command(module, + init_rbd_pool(cluster, + user_pool_config['pool_name']['value'], # noqa: E501 + user, + user_key, + container_image=container_image)) # noqa: E501 + if user_pool_config['min_size']['value']: + # not implemented yet + pass + + elif state == "list": + rc, cmd, out, err = exec_command(module, + list_pools(cluster, + name, user, + user_key, + details, + container_image=container_image)) # noqa: E501 + if rc != 0: + out = "Couldn't list pool(s) present on the cluster" + + elif state == "absent": + rc, cmd, out, err = exec_command(module, + check_pool_exist(cluster, + name, user, + user_key, + container_image=container_image)) # noqa: E501 + changed = rc == 0 + if changed and not module.check_mode: + rc, cmd, out, err = exec_command(module, + remove_pool(cluster, + name, + user, + user_key, + container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, + changed=changed, diff=diff) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_volume.py b/library/ceph_volume.py new file mode 100644 index 0000000..ade0d55 --- /dev/null +++ b/library/ceph_volume.py @@ -0,0 +1,732 @@ +#!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal +except ImportError: + from module_utils.ca_common import exec_command, \ + is_containerized, \ + fatal +import datetime +import copy +import json +import os +import re + +ANSIBLE_METADATA = { + 'metadata_version': '1.0', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume + +short_description: Create ceph OSDs with ceph-volume + +description: + - Using the ceph-volume utility available in Ceph this module + can be used to create ceph OSDs that are backed by logical volumes. + - Only available in ceph versions luminous or greater. + +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + objectstore: + description: + - The objectstore of the OSD. + required: false + choices: ['bluestore'] + default: bluestore + action: + description: + - The action to take. Creating OSDs and zapping or querying devices. + required: true + choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory'] + default: create + data: + description: + - The logical volume name or device to use for the OSD data. + required: true + data_vg: + description: + - If data is a lv, this must be the name of the volume group it belongs to. + required: false + osd_fsid: + description: + - The OSD FSID + required: false + osd_id: + description: + - The OSD ID + required: false + db: + description: + - A partition or logical volume name to use for block.db. + required: false + db_vg: + description: + - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501 + required: false + wal: + description: + - A partition or logical volume name to use for block.wal. + required: false + wal_vg: + description: + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501 + required: false + crush_device_class: + description: + - Will set the crush device class for the OSD. + required: false + dmcrypt: + description: + - If set to True the OSD will be encrypted with dmcrypt. + required: false + batch_devices: + description: + - A list of devices to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + osds_per_device: + description: + - The number of OSDs to create per device. + - Only applicable if action is 'batch'. + required: false + default: 1 + block_db_size: + description: + - The size in bytes of bluestore block db lvs. + - The default of -1 means to create them as big as possible. + - Only applicable if action is 'batch'. + required: false + default: -1 + block_db_devices: + description: + - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + wal_devices: + description: + - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand. + - Only applicable if action is 'batch'. + required: false + report: + description: + - If provided the --report flag will be passed to 'ceph-volume lvm batch'. + - No OSDs will be created. + - Results will be returned in json format. + - Only applicable if action is 'batch'. + required: false + list: + description: + - List potential Ceph LVM metadata on a device + required: false + inventory: + description: + - List storage device inventory. + required: false + +author: + - Andrew Schoen (@andrewschoen) + - Sebastien Han +''' + +EXAMPLES = ''' +- name: set up a bluestore osd with a raw device for data + ceph_volume: + objectstore: bluestore + data: /dev/sdc + action: create + + +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501 + ceph_volume: + objectstore: bluestore + data: data-lv + data_vg: data-vg + db: /dev/sdc1 + wal: /dev/sdc2 + action: create +''' + + +def container_exec(binary, container_image, mounts=None): + ''' + Build the docker CLI to run a command inside a container + ''' + _mounts = {} + _mounts['/run/lock/lvm'] = '/run/lock/lvm:z' + _mounts['/var/run/udev'] = '/var/run/udev:z' + _mounts['/dev'] = '/dev' + _mounts['/etc/ceph'] = '/etc/ceph:z' + _mounts['/run/lvm'] = '/run/lvm' + _mounts['/var/lib/ceph'] = '/var/lib/ceph:z' + _mounts['/var/log/ceph'] = '/var/log/ceph:z' + if mounts is None: + mounts = _mounts + else: + _mounts.update(mounts) + + volumes = sum( + [['-v', '{}:{}'.format(src_dir, dst_dir)] + for src_dir, dst_dir in _mounts.items()], []) + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run', + '--rm', + '--privileged', + '--net=host', + '--ipc=host'] + volumes + \ + ['--entrypoint=' + binary, container_image] + return command_exec + + +def build_cmd(action, container_image, + cluster='ceph', + binary='ceph-volume', mounts=None): + ''' + Build the ceph-volume command + ''' + + _binary = binary + + if container_image: + cmd = container_exec( + binary, container_image, mounts=mounts) + else: + binary = [binary] + cmd = binary + + if _binary == 'ceph-volume': + cmd.extend(['--cluster', cluster]) + + cmd.extend(action) + + return cmd + + +def get_data(data, data_vg): + if data_vg: + data = '{0}/{1}'.format(data_vg, data) + return data + + +def get_journal(journal, journal_vg): + if journal_vg: + journal = '{0}/{1}'.format(journal_vg, journal) + return journal + + +def get_db(db, db_vg): + if db_vg: + db = '{0}/{1}'.format(db_vg, db) + return db + + +def get_wal(wal, wal_vg): + if wal_vg: + wal = '{0}/{1}'.format(wal_vg, wal) + return wal + + +def batch(module, container_image, report=None): + ''' + Batch prepare OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + batch_devices = module.params.get('batch_devices', None) + crush_device_class = module.params.get('crush_device_class', None) + block_db_size = module.params.get('block_db_size', None) + block_db_devices = module.params.get('block_db_devices', None) + wal_devices = module.params.get('wal_devices', None) + dmcrypt = module.params.get('dmcrypt', None) + osds_per_device = module.params.get('osds_per_device', 1) + + if not osds_per_device: + fatal('osds_per_device must be provided if action is "batch"', module) + + if osds_per_device < 1: + fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501 + + if not batch_devices: + fatal('batch_devices must be provided if action is "batch"', module) + + # Build the CLI + action = ['lvm', 'batch'] + cmd = build_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + if not report: + cmd.append('--yes') + + if container_image: + cmd.append('--prepare') + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + if osds_per_device > 1: + cmd.extend(['--osds-per-device', str(osds_per_device)]) + + if objectstore == 'bluestore' and block_db_size != '-1': + cmd.extend(['--block-db-size', block_db_size]) + + cmd.extend(batch_devices) + + if block_db_devices and objectstore == 'bluestore': + cmd.append('--db-devices') + cmd.extend(block_db_devices) + + if wal_devices and objectstore == 'bluestore': + cmd.append('--wal-devices') + cmd.extend(wal_devices) + + return cmd + + +def ceph_volume_cmd(subcommand, container_image, cluster=None): + ''' + Build ceph-volume initial command + ''' + + if container_image: + binary = 'ceph-volume' + cmd = container_exec( + binary, container_image) + else: + binary = ['ceph-volume'] + cmd = binary + + if cluster: + cmd.extend(['--cluster', cluster]) + + cmd.append('lvm') + cmd.append(subcommand) + + return cmd + + +def prepare_or_create_osd(module, action, container_image): + ''' + Prepare or create OSD devices + ''' + + # get module variables + cluster = module.params['cluster'] + objectstore = module.params['objectstore'] + data = module.params['data'] + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + crush_device_class = module.params.get('crush_device_class', None) + dmcrypt = module.params.get('dmcrypt', None) + + # Build the CLI + action = ['lvm', action] + cmd = build_cmd(action, container_image, cluster) + cmd.extend(['--%s' % objectstore]) + cmd.append('--data') + cmd.append(data) + + if db and objectstore == 'bluestore': + db = get_db(db, db_vg) + cmd.extend(['--block.db', db]) + + if wal and objectstore == 'bluestore': + wal = get_wal(wal, wal_vg) + cmd.extend(['--block.wal', wal]) + + if crush_device_class: + cmd.extend(['--crush-device-class', crush_device_class]) + + if dmcrypt: + cmd.append('--dmcrypt') + + return cmd + + +def list_osd(module, container_image): + ''' + List will detect wether or not a device has Ceph LVM Metadata + ''' + + # get module variables + cluster = module.params['cluster'] + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + data = get_data(data, data_vg) + + # Build the CLI + action = ['lvm', 'list'] + cmd = build_cmd(action, + container_image, + cluster, + mounts={'/var/lib/ceph': '/var/lib/ceph:ro'}) + if data: + cmd.append(data) + cmd.append('--format=json') + + return cmd + + +def list_storage_inventory(module, container_image): + ''' + List storage inventory. + ''' + + action = ['inventory'] + cmd = build_cmd(action, container_image) + cmd.append('--format=json') + + return cmd + + +def activate_osd(): + ''' + Activate all the OSDs on a machine + ''' + + # build the CLI + action = ['lvm', 'activate'] + container_image = None + cmd = build_cmd(action, container_image) + cmd.append('--all') + + return cmd + + +def is_lv(module, vg, lv, container_image): + ''' + Check if an LV exists + ''' + + args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501 + + cmd = build_cmd(args, container_image, binary='lvs') + + rc, cmd, out, err = exec_command(module, cmd) + + if rc == 0: + result = json.loads(out)['report'][0]['lv'] + if len(result) > 0: + return True + + return False + + +def zap_devices(module, container_image): + ''' + Will run 'ceph-volume lvm zap' on all devices, lvs and partitions + used to create the OSD. The --destroy flag is always passed so that + if an OSD was originally created with a raw device or partition for + 'data' then any lvs that were created by ceph-volume are removed. + ''' + + # get module variables + data = module.params.get('data', None) + data_vg = module.params.get('data_vg', None) + db = module.params.get('db', None) + db_vg = module.params.get('db_vg', None) + wal = module.params.get('wal', None) + wal_vg = module.params.get('wal_vg', None) + osd_fsid = module.params.get('osd_fsid', None) + osd_id = module.params.get('osd_id', None) + destroy = module.params.get('destroy', True) + + # build the CLI + action = ['lvm', 'zap'] + cmd = build_cmd(action, container_image) + if destroy: + cmd.append('--destroy') + + if osd_fsid: + cmd.extend(['--osd-fsid', osd_fsid]) + + if osd_id: + cmd.extend(['--osd-id', osd_id]) + + if data: + data = get_data(data, data_vg) + cmd.append(data) + + if db: + db = get_db(db, db_vg) + cmd.extend([db]) + + if wal: + wal = get_wal(wal, wal_vg) + cmd.extend([wal]) + + return cmd + + +def allowed_in_check_mode(module): + ''' + Check if the action is allowed in check mode + ''' + + action = module.params['action'] + report = module.params.get('report', False) + + # batch is allowed in check mode if report is set + if action == 'batch' and report: + return True + + allowed_actions = ['list', 'inventory'] + + return action in allowed_actions + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + objectstore=dict(type='str', required=False, choices=[ + 'bluestore'], default='bluestore'), + action=dict(type='str', required=False, choices=[ + 'create', 'zap', 'batch', 'prepare', 'activate', 'list', + 'inventory'], default='create'), # noqa: 4502 + data=dict(type='str', required=False), + data_vg=dict(type='str', required=False), + db=dict(type='str', required=False), + db_vg=dict(type='str', required=False), + wal=dict(type='str', required=False), + wal_vg=dict(type='str', required=False), + crush_device_class=dict(type='str', required=False), + dmcrypt=dict(type='bool', required=False, default=False), + batch_devices=dict(type='list', required=False, default=[]), + osds_per_device=dict(type='int', required=False, default=1), + block_db_size=dict(type='str', required=False, default='-1'), + block_db_devices=dict(type='list', required=False, default=[]), + wal_devices=dict(type='list', required=False, default=[]), + report=dict(type='bool', required=False, default=False), + osd_fsid=dict(type='str', required=False), + osd_id=dict(type='str', required=False), + destroy=dict(type='bool', required=False, default=True), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + mutually_exclusive=[ + ('data', 'osd_fsid', 'osd_id'), + ], + required_if=[ + ('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True) + ] + ) + + result = dict( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + if module.check_mode and not allowed_in_check_mode(module): + module.exit_json(**result) + + # start execution + startd = datetime.datetime.now() + + # get the desired action + action = module.params['action'] + + # will return either the image name or None + container_image = is_containerized() + + # Assume the task's status will be 'changed' + changed = True + + if action == 'create' or action == 'prepare': + # First test if the device has Ceph LVM Metadata + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + # list_osd returns a dict, if the dict is empty this means + # we can not check the return code since it's not consistent + # with the plain output + # see: http://tracker.ceph.com/issues/36329 + # FIXME: it's probably less confusing to check for rc + + # convert out to json, ansible returns a string... + try: + out_dict = json.loads(out) + except ValueError: + fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501 + + if out_dict: + data = module.params['data'] + result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501 + result['rc'] = 0 + module.exit_json(**result) + + # Prepare or create the OSD + rc, cmd, out, err = exec_command( + module, prepare_or_create_osd(module, action, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + + elif action == 'activate': + if container_image: + fatal( + "This is not how container's activation happens, nothing to activate", module) # noqa: E501 + + # Activate the OSD + rc, cmd, out, err = exec_command( + module, activate_osd()) + + elif action == 'zap': + # Zap the OSD + skip = [] + for device_type in ['journal', 'data', 'db', 'wal']: + # 1/ if we passed vg/lv + if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + # 2/ check this is an actual lv/vg + ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501 + skip.append(ret) + # 3/ This isn't a lv/vg device + if not ret: + module.params['{}_vg'.format(device_type)] = False + module.params[device_type] = False + # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501 + elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501 + skip.append(True) + + cmd = zap_devices(module, container_image) + + if any(skip) or module.params.get('osd_fsid', None) \ + or module.params.get('osd_id', None): + rc, cmd, out, err = exec_command( + module, cmd) + for scan_cmd in ['vgscan', 'lvscan']: + module.run_command([scan_cmd, '--cache']) + else: + out = 'Skipped, nothing to zap' + err = '' + changed = False + rc = 0 + + elif action == 'list': + # List Ceph LVM Metadata on a device + changed = False + rc, cmd, out, err = exec_command( + module, list_osd(module, container_image)) + + elif action == 'inventory': + # List storage device inventory. + changed = False + rc, cmd, out, err = exec_command( + module, list_storage_inventory(module, container_image)) + + elif action == 'batch': + # Batch prepare AND activate OSDs + report = module.params.get('report', None) + + # Add --report flag for the idempotency test + report_flags = [ + '--report', + '--format=json', + ] + + cmd = batch(module, container_image, report=True) + batch_report_cmd = copy.copy(cmd) + batch_report_cmd.extend(report_flags) + + # Run batch --report to see what's going to happen + # Do not run the batch command if there is nothing to do + rc, cmd, out, err = exec_command( + module, batch_report_cmd) + try: + if not out: + out = '{}' + report_result = json.loads(out) + except ValueError: + strategy_changed_in_out = "strategy changed" in out + strategy_changed_in_err = "strategy changed" in err + strategy_changed = strategy_changed_in_out or \ + strategy_changed_in_err + if strategy_changed: + if strategy_changed_in_out: + out = json.dumps({"changed": False, + "stdout": out.rstrip("\r\n")}) + elif strategy_changed_in_err: + out = json.dumps({"changed": False, + "stderr": err.rstrip("\r\n")}) + rc = 0 + changed = False + else: + out = out.rstrip("\r\n") + result = dict( + cmd=cmd, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + rc=rc, + changed=changed, + ) + if strategy_changed: + module.exit_json(**result) + module.fail_json(msg='non-zero return code', **result) + + if not report: + if 'changed' in report_result: + # we have the old batch implementation + # if not asking for a report, let's just run the batch command + changed = report_result['changed'] + if changed: + # Batch prepare the OSD + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + else: + # we have the refactored batch, its idempotent so lets just + # run it + rc, cmd, out, err = exec_command( + module, batch(module, container_image)) + err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) + else: + cmd = batch_report_cmd + + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip('\r\n'), + stderr=err.rstrip('\r\n'), + changed=changed, + ) + + if rc != 0: + module.fail_json(msg='non-zero return code', **result) + + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/ceph_volume_simple_activate.py b/library/ceph_volume_simple_activate.py new file mode 100644 index 0000000..4825f0c --- /dev/null +++ b/library/ceph_volume_simple_activate.py @@ -0,0 +1,190 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume_simple_activate +short_description: Activate legacy OSD with ceph-volume +version_added: "2.8" +description: + - Activate legacy OSD with ceph-volume by providing the JSON file from + the scan operation or by passing the OSD ID and OSD FSID. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + path: + description: + - The OSD metadata as JSON file in /etc/ceph/osd directory, it + must exist. + required: false + osd_id: + description: + - The legacy OSD ID. + required: false + osd_fsid: + description: + - The legacy OSD FSID. + required: false + osd_all: + description: + - Activate all legacy OSDs. + required: false + systemd: + description: + - Using systemd unit during the OSD activation. + required: false + default: true +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: activate all legacy OSDs + ceph_volume_simple_activate: + cluster: ceph + osd_all: true + +- name: activate a legacy OSD via OSD ID and OSD FSID + ceph_volume_simple_activate: + cluster: ceph + osd_id: 3 + osd_fsid: 0c4a7eca-0c2a-4c12-beff-08a80f064c52 + +- name: activate a legacy OSD via the JSON file + ceph_volume_simple_activate: + cluster: ceph + path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json + +- name: activate a legacy OSD via the JSON file without systemd + ceph_volume_simple_activate: + cluster: ceph + path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json + systemd: false +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + path=dict(type='path', required=False), + systemd=dict(type='bool', required=False, default=True), + osd_id=dict(type='str', required=False), + osd_fsid=dict(type='str', required=False), + osd_all=dict(type='bool', required=False), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('osd_all', 'osd_id'), + ('osd_all', 'osd_fsid'), + ('path', 'osd_id'), + ('path', 'osd_fsid'), + ], + required_together=[ + ('osd_id', 'osd_fsid') + ], + required_one_of=[ + ('path', 'osd_id', 'osd_all'), + ('path', 'osd_fsid', 'osd_all'), + ], + ) + + path = module.params.get('path') + cluster = module.params.get('cluster') + systemd = module.params.get('systemd') + osd_id = module.params.get('osd_id') + osd_fsid = module.params.get('osd_fsid') + osd_all = module.params.get('osd_all') + + if path and not os.path.exists(path): + module.fail_json(msg='{} does not exist'.format(path), rc=1) + + startd = datetime.datetime.now() + + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + if container_binary and container_image: + cmd = [container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', container_image] + else: + cmd = ['ceph-volume'] + + cmd.extend(['--cluster', cluster, 'simple', 'activate']) + + if osd_all: + cmd.append('--all') + else: + if path: + cmd.extend(['--file', path]) + else: + cmd.extend([osd_id, osd_fsid]) + + if not systemd: + cmd.append('--no-systemd') + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/library/ceph_volume_simple_scan.py b/library/ceph_volume_simple_scan.py new file mode 100644 index 0000000..e76f279 --- /dev/null +++ b/library/ceph_volume_simple_scan.py @@ -0,0 +1,163 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: ceph_volume_simple_scan +short_description: Scan legacy OSD with ceph-volume +version_added: "2.8" +description: + - Scan legacy OSD with ceph-volume and store the output as JSON file + in /etc/ceph/osd directory with {OSD_ID}-{OSD_FSID}.json format. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + path: + description: + - The OSD directory or metadata partition. The directory or + partition must exist. + required: false + force: + description: + - Force re-scanning an OSD and overwriting the JSON content. + required: false + default: false + stdout: + description: + - Do not store the output to JSON file but stdout instead. + required: false + default: false +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: scan all running OSDs + ceph_volume_simple_scan: + cluster: ceph + +- name: scan an OSD with the directory + ceph_volume_simple_scan: + cluster: ceph + path: /var/lib/ceph/osd/ceph-3 + +- name: scan an OSD with the partition + ceph_volume_simple_scan: + cluster: ceph + path: /dev/sdb1 + +- name: rescan an OSD and print the result on stdout + ceph_volume_simple_scan: + cluster: ceph + path: /dev/nvme0n1p1 + force: true + stdout: true +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cluster=dict(type='str', required=False, default='ceph'), + path=dict(type='path', required=False), + force=dict(type='bool', required=False, default=False), + stdout=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + + path = module.params.get('path') + cluster = module.params.get('cluster') + force = module.params.get('force') + stdout = module.params.get('stdout') + + if path and not os.path.exists(path): + module.fail_json(msg='{} does not exist'.format(path), rc=1) + + startd = datetime.datetime.now() + + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + if container_binary and container_image: + cmd = [container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', container_image] + else: + cmd = ['ceph-volume'] + + cmd.extend(['--cluster', cluster, 'simple', 'scan']) + + if force: + cmd.append('--force') + + if stdout: + cmd.append('--stdout') + + if path: + cmd.append(path) + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/library/cephadm_adopt.py b/library/cephadm_adopt.py new file mode 100644 index 0000000..0f4977a --- /dev/null +++ b/library/cephadm_adopt.py @@ -0,0 +1,184 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime +import json + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: cephadm_adopt +short_description: Adopt a Ceph cluster with cephadm +version_added: "2.8" +description: + - Adopt a Ceph cluster with cephadm +options: + name: + description: + - The ceph daemon name. + required: true + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + style: + description: + - Cep deployment style. + required: false + default: legacy + image: + description: + - Ceph container image. + required: false + docker: + description: + - Use docker instead of podman. + required: false + pull: + description: + - Pull the Ceph container image. + required: false + default: true + firewalld: + description: + - Manage firewall rules with firewalld. + required: false + default: true +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: adopt a ceph monitor with cephadm (default values) + cephadm_adopt: + name: mon.foo + style: legacy + +- name: adopt a ceph monitor with cephadm (with custom values) + cephadm_adopt: + name: mon.foo + style: legacy + image: quay.io/ceph/ceph:v19 + pull: false + firewalld: false + +- name: adopt a ceph monitor with cephadm with custom image via env var + cephadm_adopt: + name: mon.foo + style: legacy + environment: + CEPHADM_IMAGE: quay.io/ceph/ceph:v19 +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + cluster=dict(type='str', required=False, default='ceph'), + style=dict(type='str', required=False, default='legacy'), + image=dict(type='str', required=False), + docker=dict(type='bool', required=False, default=False), + pull=dict(type='bool', required=False, default=True), + firewalld=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + name = module.params.get('name') + cluster = module.params.get('cluster') + style = module.params.get('style') + docker = module.params.get('docker') + image = module.params.get('image') + pull = module.params.get('pull') + firewalld = module.params.get('firewalld') + + startd = datetime.datetime.now() + + cmd = ['cephadm', 'ls', '--no-detail'] + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + + if rc == 0: + if name in [x["name"] for x in json.loads(out) if x["style"] == "cephadm:v1"]: # noqa: E501 + exit_module( + module=module, + out='{} is already adopted'.format(name), + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + module.fail_json(msg=err, rc=rc) + + cmd = ['cephadm'] + + if docker: + cmd.append('--docker') + + if image: + cmd.extend(['--image', image]) + + cmd.extend(['adopt', '--cluster', cluster, '--name', name, '--style', style]) # noqa: E501 + + if not pull: + cmd.append('--skip-pull') + + if not firewalld: + cmd.append('--skip-firewalld') + + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/library/cephadm_bootstrap.py b/library/cephadm_bootstrap.py new file mode 100644 index 0000000..05e779c --- /dev/null +++ b/library/cephadm_bootstrap.py @@ -0,0 +1,265 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import exit_module +except ImportError: + from module_utils.ca_common import exit_module +import datetime + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: cephadm_bootstrap +short_description: Bootstrap a Ceph cluster via cephadm +version_added: "2.8" +description: + - Bootstrap a Ceph cluster via cephadm +options: + mon_ip: + description: + - Ceph monitor IP address. + required: true + image: + description: + - Ceph container image. + required: false + docker: + description: + - Use docker instead of podman. + required: false + fsid: + description: + - Ceph FSID. + required: false + pull: + description: + - Pull the Ceph container image. + required: false + default: true + dashboard: + description: + - Deploy the Ceph dashboard. + required: false + default: true + dashboard_user: + description: + - Ceph dashboard user. + required: false + dashboard_password: + description: + - Ceph dashboard password. + required: false + monitoring: + description: + - Deploy the monitoring stack. + required: false + default: true + firewalld: + description: + - Manage firewall rules with firewalld. + required: false + default: true + allow_overwrite: + description: + - allow overwrite of existing –output-* config/keyring/ssh files. + required: false + default: false + registry_url: + description: + - URL for custom registry. + required: false + registry_username: + description: + - Username for custom registry. + required: false + registry_password: + description: + - Password for custom registry. + required: false + registry_json: + description: + - JSON file with custom registry login info (URL, + username, password). + required: false + ssh_user: + description: + - SSH user used for cephadm ssh to the hosts. + required: false + ssh_config: + description: + - SSH config file path for cephadm ssh client. + required: false +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: bootstrap a cluster via cephadm (with default values) + cephadm_bootstrap: + mon_ip: 192.168.42.1 + +- name: bootstrap a cluster via cephadm (with custom values) + cephadm_bootstrap: + mon_ip: 192.168.42.1 + fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82 + image: quay.io/ceph/ceph:v19 + dashboard: false + monitoring: false + firewalld: false + +- name: bootstrap a cluster via cephadm with custom image via env var + cephadm_bootstrap: + mon_ip: 192.168.42.1 + environment: + CEPHADM_IMAGE: quay.io/ceph/ceph:v19 +''' + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + mon_ip=dict(type='str', required=True), + image=dict(type='str', required=False), + docker=dict(type='bool', required=False, default=False), + fsid=dict(type='str', required=False), + pull=dict(type='bool', required=False, default=True), + dashboard=dict(type='bool', required=False, default=True), + dashboard_user=dict(type='str', required=False), + dashboard_password=dict(type='str', required=False, no_log=True), + monitoring=dict(type='bool', required=False, default=True), + firewalld=dict(type='bool', required=False, default=True), + allow_overwrite=dict(type='bool', required=False, default=False), + registry_url=dict(type='str', require=False), + registry_username=dict(type='str', require=False), + registry_password=dict(type='str', require=False, no_log=True), + registry_json=dict(type='path', require=False), + ssh_user=dict(type='str', required=False), + ssh_config=dict(type='str', required=False), + ), + supports_check_mode=True, + mutually_exclusive=[ + ('registry_json', 'registry_url'), + ('registry_json', 'registry_username'), + ('registry_json', 'registry_password'), + ], + required_together=[ + ('registry_url', 'registry_username', 'registry_password') + ], + ) + + mon_ip = module.params.get('mon_ip') + docker = module.params.get('docker') + image = module.params.get('image') + fsid = module.params.get('fsid') + pull = module.params.get('pull') + dashboard = module.params.get('dashboard') + dashboard_user = module.params.get('dashboard_user') + dashboard_password = module.params.get('dashboard_password') + monitoring = module.params.get('monitoring') + firewalld = module.params.get('firewalld') + allow_overwrite = module.params.get('allow_overwrite') + registry_url = module.params.get('registry_url') + registry_username = module.params.get('registry_username') + registry_password = module.params.get('registry_password') + registry_json = module.params.get('registry_json') + ssh_user = module.params.get('ssh_user') + ssh_config = module.params.get('ssh_config') + + startd = datetime.datetime.now() + + cmd = ['cephadm'] + + if docker: + cmd.append('--docker') + + if image: + cmd.extend(['--image', image]) + + cmd.extend(['bootstrap', '--mon-ip', mon_ip]) + + if fsid: + cmd.extend(['--fsid', fsid]) + + if not pull: + cmd.append('--skip-pull') + + if dashboard: + if dashboard_user: + cmd.extend(['--initial-dashboard-user', dashboard_user]) + if dashboard_password: + cmd.extend(['--initial-dashboard-password', dashboard_password]) + else: + cmd.append('--skip-dashboard') + + if not monitoring: + cmd.append('--skip-monitoring-stack') + + if not firewalld: + cmd.append('--skip-firewalld') + + if allow_overwrite: + cmd.append('--allow-overwrite') + + if registry_url and registry_username and registry_password: + cmd.extend(['--registry-url', registry_url, + '--registry-username', registry_username, + '--registry-password', registry_password]) + + if registry_json: + cmd.extend(['--registry-json', registry_json]) + + if ssh_user: + cmd.extend(['--ssh-user', ssh_user]) + + if ssh_config: + cmd.extend(['--ssh-config', ssh_config]) + + if module.check_mode: + exit_module( + module=module, + out='', + rc=0, + cmd=cmd, + err='', + startd=startd, + changed=False + ) + else: + rc, out, err = module.run_command(cmd) + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=True + ) + + +if __name__ == '__main__': + main() diff --git a/library/radosgw_caps.py b/library/radosgw_caps.py new file mode 100644 index 0000000..1d5f6b4 --- /dev/null +++ b/library/radosgw_caps.py @@ -0,0 +1,378 @@ +# Copyright 2022, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule + +try: + from ansible.module_utils.ca_common import ( + exit_module, + exec_command, + is_containerized, + container_exec, + ) +except ImportError: + from module_utils.ca_common import ( + exit_module, + exec_command, + is_containerized, + container_exec, + ) +import datetime +import json +import re +from enum import IntFlag + + +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +--- +module: radosgw_caps + +short_description: Manage RADOS Gateway Admin capabilities + +version_added: "2.10" + +description: + - Manage RADOS Gateway capabilities addition and deletion. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + type: str + name: + description: + - name of the RADOS Gateway user (uid). + required: true + type: str + state: + description: + If 'present' is used, the module will assign capabilities + defined in `caps`. + If 'absent' is used, the module will remove the capabilities. + required: false + choices: ['present', 'absent'] + default: present + type: str + caps: + description: + - The set of capabilities to assign or remove. + required: true + type: list + elements: str + +author: + - Mathias Chapelain +""" + +EXAMPLES = """ +- name: add users read capabilties to a user + radosgw_caps: + name: foo + state: present + caps: + - users=read + +- name: add users read write and all buckets capabilities + radosgw_caps: + name: foo + state: present + caps: + - users=read,write + - buckets=* + +- name: remove usage write capabilities + radosgw_caps: + name: foo + state: absent + caps: + - usage=write +""" + +RETURN = """ +--- +cmd: + description: The radosgw-admin command being run by the module to apply caps settings. + returned: always + type: str +start: + description: Timestamp of module execution start. + returned: always + type: str +end: + description: Timestamp of module execution end. + returned: always + type: str +delta: + description: Time of module execution between start and end. + returned: always + type: str +diff: + description: Dict containing the user capabilities before and after modifications. + returned: always + type: dict + contains: + before: + description: Contains user capabilities, json-formatted, as returned by `radosgw-admin user info`. + returned: always + type: str + after: + description: Contains user capabilities, json-formatted, as returned by `radosgw-admin caps add/rm`. + returned: success + type: str +rc: + description: Return code of the module command executed, see `cmd` return value. + returned: always + type: int +stdout: + description: Output of the executed command. + returned: always + type: str +stderr: + description: Error output of the executed command. + returned: always + type: str +changed: + description: Specify if user capabilities has been changed during module execution. + returned: always + type: bool +""" + + +def pre_generate_radosgw_cmd(container_image=None): + """ + Generate radosgw-admin prefix comaand + """ + if container_image: + cmd = container_exec("radosgw-admin", container_image) + else: + cmd = ["radosgw-admin"] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + """ + Generate 'radosgw' command line to execute + """ + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = ["--cluster", cluster, "caps"] + + cmd.extend(base_cmd + args) + + return cmd + + +def add_caps(module, container_image=None): + """ + Add capabilities + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + caps = module.params.get("caps") + + args = ["add", "--uid=" + name, "--caps=" + ";".join(caps)] + + cmd = generate_radosgw_cmd( + cluster=cluster, args=args, container_image=container_image + ) + + return cmd + + +def remove_caps(module, container_image=None): + """ + Remove capabilities + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + caps = module.params.get("caps") + + args = ["rm", "--uid=" + name, "--caps=" + ";".join(caps)] + + cmd = generate_radosgw_cmd( + cluster=cluster, args=args, container_image=container_image + ) + + return cmd + + +def get_user(module, container_image=None): + """ + Get existing user + """ + + cluster = module.params.get("cluster") + name = module.params.get("name") + + args = ["info", "--uid=" + name, "--format=json"] + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = ["--cluster", cluster, "user"] + + cmd.extend(base_cmd + args) + + return cmd + + +class RGWUserCaps(IntFlag): + INVALID = 0x0 + READ = 0x1 + WRITE = 0x2 + ALL = READ | WRITE + + +def perm_string_to_flag(perm): + splitted = re.split(",|=| |\t", perm) + if ("read" in splitted and "write" in splitted) or "*" in splitted: + return RGWUserCaps.ALL + elif "read" in splitted: + return RGWUserCaps.READ + elif "write" in splitted: + return RGWUserCaps.WRITE + return RGWUserCaps.INVALID + + +def perm_flag_to_string(perm): + if perm == RGWUserCaps.ALL: + return "*" + elif perm == RGWUserCaps.READ: + return "read" + elif perm == RGWUserCaps.WRITE: + return "write" + else: + return "invalid" + + +def params_to_caps_output(current_caps, params, deletion=False): + out_caps = current_caps + for param in params: + splitted = param.split("=", maxsplit=1) + cap = splitted[0] + + new_perm = perm_string_to_flag(splitted[1]) + current = next((item for item in out_caps if item["type"] == cap), None) + + if not current: + if not deletion: + out_caps.append(dict(type=cap, perm=perm_flag_to_string(new_perm))) + continue + + current_perm = perm_string_to_flag(current["perm"]) + + new_perm = current_perm & ~new_perm if deletion else new_perm | current_perm + + if new_perm == 0x0: + out_caps.remove(current) + + current["perm"] = perm_flag_to_string(new_perm) + + return out_caps + + +def run_module(): + module_args = dict( + cluster=dict(type="str", required=False, default="ceph"), + name=dict(type="str", required=True), + state=dict( + type="str", required=False, choices=["present", "absent"], default="present" + ), + caps=dict(type="list", required=True), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get("name") + state = module.params.get("state") + caps = module.params.get("caps") + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + diff = dict(before="", after="") + + # get user infos for diff + rc, cmd, out, err = exec_command( + module, get_user(module, container_image=container_image) + ) + + if rc == 0: + before_user = json.loads(out) + before_caps = sorted(before_user["caps"], key=lambda d: d["type"]) + diff["before"] = json.dumps(before_caps, indent=4) + + out = "" + err = "" + + if state == "present": + cmd = add_caps(module, container_image=container_image) + elif state == "absent": + cmd = remove_caps(module, container_image=container_image) + + if not module.check_mode: + rc, cmd, out, err = exec_command(module, cmd) + else: + out_caps = params_to_caps_output( + before_user["caps"], caps, deletion=(state == "absent") + ) + out = json.dumps(dict(caps=out_caps)) + + if rc == 0: + after_user = json.loads(out)["caps"] + after_user = sorted(after_user, key=lambda d: d["type"]) + diff["after"] = json.dumps(after_user, indent=4) + changed = diff["before"] != diff["after"] + else: + out = "User {} doesn't exist".format(name) + + exit_module( + module=module, + out=out, + rc=rc, + cmd=cmd, + err=err, + startd=startd, + changed=changed, + diff=diff, + ) + + +def main(): + run_module() + + +if __name__ == "__main__": + main() diff --git a/library/radosgw_realm.py b/library/radosgw_realm.py new file mode 100644 index 0000000..2d4ae51 --- /dev/null +++ b/library/radosgw_realm.py @@ -0,0 +1,339 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +import datetime +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_realm + +short_description: Manage RADOS Gateway Realm + +version_added: "2.8" + +description: + - Manage RADOS Gateway realm(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway realm. + required: true + state: + description: + If 'present' is used, the module creates a realm if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the realm. + If 'info' is used, the module will return all details about the + existing realm (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + default: + description: + - set the default flag on the realm. + required: false + default: false + url: + description: + - URL to the master RADOS Gateway zone. + required: false + access_key: + description: + - S3 access key of the master RADOS Gateway zone. + required: false + secret_key: + description: + - S3 secret key of the master RADOS Gateway zone. + required: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default realm + radosgw_realm: + name: foo + default: true + +- name: get a RADOS Gateway realm information + radosgw_realm: + name: foo + state: info + +- name: delete a RADOS Gateway realm + radosgw_realm: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'realm' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_realm(module, container_image=None): + ''' + Create a new realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + default = module.params.get('default', False) + + args = ['create', '--rgw-realm=' + name] + + if default: + args.append('--default') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['get', '--rgw-realm=' + name, '--format=json'] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def remove_realm(module, container_image=None): + ''' + Remove a realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + + args = ['delete', '--rgw-realm=' + name] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def pull_realm(module, container_image=None): + ''' + Pull a realm + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + url = module.params.get('url') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + default = module.params.get('default', False) + + args = [ + 'pull', + '--rgw-realm=' + name, + '--url=' + url, + '--access-key=' + access_key, + '--secret=' + secret_key + ] + if default: + args.append('--default') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info', 'pull'], default='present'), # noqa: E501 + default=dict(type='bool', required=False, default=False), + url=dict(type='str', required=False), + access_key=dict(type='str', required=False), + secret_key=dict(type='str', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + required_if=[['state', 'pull', ['url', 'access_key', 'secret_key']]], + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + if state == "present": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if rc != 0: + rc, cmd, out, err = exec_commands(module, create_realm(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if rc == 0: + rc, cmd, out, err = exec_commands(module, remove_realm(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Realm {} doesn't exist".format(name) + + elif state == "info": + rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + + elif state == "pull": + rc, cmd, out, err = exec_commands(module, pull_realm(module, container_image=container_image)) # noqa: E501 + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/radosgw_user.py b/library/radosgw_user.py new file mode 100644 index 0000000..d5b107c --- /dev/null +++ b/library/radosgw_user.py @@ -0,0 +1,581 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_user + +short_description: Manage RADOS Gateway User + +version_added: "2.8" + +description: + - Manage RADOS Gateway user(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway user (uid). + required: true + state: + description: + If 'present' is used, the module creates a user if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the user. + If 'info' is used, the module will return all details about the + existing user (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + display_name: + description: + - set the display name of the user. + required: false + default: None + email: + description: + - set the email of the user. + required: false + default: None + access_key: + description: + - set the S3 access key of the user. + required: false + default: None + secret_key: + description: + - set the S3 secret key of the user. + required: false + default: None + realm: + description: + - set the realm of the user. + required: false + default: None + zonegroup: + description: + - set the zonegroup of the user. + required: false + default: None + zone: + description: + - set the zone of the user. + required: false + default: None + system: + description: + - set the system flag on the user. + required: false + default: false + admin: + description: + - set the admin flag on the user. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway sytem user + radosgw_user: + name: foo + system: true + +- name: modify a RADOS Gateway user + radosgw_user: + name: foo + email: foo@bar.io + access_key: LbwDPp2BBo2Sdlts89Um + secret_key: FavL6ueQWcWuWn0YXyQ3TnJ3mT3Uj5SGVHCUXC5K + state: present + +- name: get a RADOS Gateway user information + radosgw_user: + name: foo + state: info + +- name: delete a RADOS Gateway user + radosgw_user: + name: foo + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'user' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def generate_caps_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute for caps + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'caps' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_user(module, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + display_name = module.params.get('display_name') + if not display_name: + display_name = name + email = module.params.get('email', None) + access_key = module.params.get('access_key', None) + secret_key = module.params.get('secret_key', None) + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + system = module.params.get('system', False) + admin = module.params.get('admin', False) + caps = module.params.get('caps') + + args = ['create', '--uid=' + name, '--display_name=' + display_name] + + if email: + args.extend(['--email=' + email]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + if system: + args.append('--system') + + if admin: + args.append('--admin') + + if caps: + caps_args = [f"{cap['type']}={cap['perm']}" for cap in caps] + args.extend(['--caps=' + ';'.join(caps_args)]) + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def caps_add(module, caps, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['add', '--uid=' + name] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + caps_args = [f"{cap['type']}={cap['perm']}" for cap in caps] + args.extend(['--caps=' + ';'.join(caps_args)]) + + cmd = generate_caps_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def caps_rm(module, caps, container_image=None): + ''' + Create a new user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['rm', '--uid=' + name] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + caps_args = [f"{cap['type']}={cap['perm']}" for cap in caps] + args.extend(['--caps=' + ';'.join(caps_args)]) + + cmd = generate_caps_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_user(module, container_image=None): + ''' + Modify an existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + display_name = module.params.get('display_name') + email = module.params.get('email', None) + access_key = module.params.get('access_key', None) + secret_key = module.params.get('secret_key', None) + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + system = module.params.get('system', False) + admin = module.params.get('admin', False) + + args = ['modify', '--uid=' + name] + + if display_name: + args.extend(['--display_name=' + display_name]) + + if email: + args.extend(['--email=' + email]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + if system: + args.append('--system') + + if admin: + args.append('--admin') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_user(module, container_image=None): + ''' + Get existing user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['info', '--uid=' + name, '--format=json'] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def remove_user(module, container_image=None): + ''' + Remove a user + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm', None) + zonegroup = module.params.get('zonegroup', None) + zone = module.params.get('zone', None) + + args = ['rm', '--uid=' + name] + + if realm: + args.extend(['--rgw-realm=' + realm]) + + if zonegroup: + args.extend(['--rgw-zonegroup=' + zonegroup]) + + if zone: + args.extend(['--rgw-zone=' + zone]) + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + display_name=dict(type='str', required=False), + email=dict(type='str', required=False), + access_key=dict(type='str', required=False, no_log=True), + secret_key=dict(type='str', required=False, no_log=True), + realm=dict(type='str', required=False), + zonegroup=dict(type='str', required=False), + zone=dict(type='str', required=False), + system=dict(type='bool', required=False, default=False), + admin=dict(type='bool', required=False, default=False), + caps=dict(type='list', required=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + display_name = module.params.get('display_name') + if not display_name: + display_name = name + email = module.params.get('email') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + system = module.params.get('system') + admin = module.params.get('admin') + caps = module.params.get('caps') + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc == 0: + user = json.loads(out) + current = { + 'display_name': user['display_name'], + 'system': user.get('system', False), + 'admin': user.get('admin', False), + } + asked = { + 'display_name': display_name, + 'system': system, + 'admin': admin, + } + if email: + current['email'] = user['email'] + asked['email'] = email + if caps: + current['caps'] = user['caps'] + asked['caps'] = caps + + if access_key and secret_key: + asked['access_key'] = access_key + asked['secret_key'] = secret_key + for key in user['keys']: + if key['access_key'] == access_key and key['secret_key'] == secret_key: # noqa: E501 + del asked['access_key'] + del asked['secret_key'] + break + + changed = current != asked + if changed and not module.check_mode: + rc, cmd, out, err = exec_commands(module, modify_user(module, container_image=container_image)) + + if caps: + missing_caps = [cap for cap in asked['caps'] if cap not in current['caps']] + extra_caps = [cap for cap in current['caps'] if cap not in asked['caps']] + if extra_caps: + rc, cmd, out, err = exec_commands(module, caps_rm(module, extra_caps, container_image=container_image)) + if missing_caps: + rc, cmd, out, err = exec_commands(module, caps_add(module, missing_caps, container_image=container_image)) + else: + changed = True + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, create_user(module, container_image=container_image)) # noqa: E501 + else: + rc = 0 + + elif state == "absent": + if rc == 0: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, remove_user(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "User {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/radosgw_zone.py b/library/radosgw_zone.py new file mode 100644 index 0000000..8ddab13 --- /dev/null +++ b/library/radosgw_zone.py @@ -0,0 +1,543 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_zone + +short_description: Manage RADOS Gateway Zone + +version_added: "2.8" + +description: + - Manage RADOS Gateway zone(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway zone. + required: true + state: + description: + If 'present' is used, the module creates a zone if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the zone. + If 'info' is used, the module will return all details about the + existing zone (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + realm: + description: + - name of the RADOS Gateway realm. + required: true + zonegroup: + description: + - name of the RADOS Gateway zonegroup. + required: true + endpoints: + description: + - endpoints of the RADOS Gateway zone. + required: false + default: [] + access_key: + description: + - set the S3 access key of the user. + required: false + default: None + secret_key: + description: + - set the S3 secret key of the user. + required: false + default: None + default: + description: + - set the default flag on the zone. + required: false + default: false + master: + description: + - set the master flag on the zone. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default zone + radosgw_zone: + name: z1 + realm: foo + zonegroup: bar + endpoints: + - http://192.168.1.10:8080 + - http://192.168.1.11:8080 + default: true + +- name: get a RADOS Gateway zone information + radosgw_zone: + name: z1 + state: info + +- name: delete a RADOS Gateway zone + radosgw_zone: + name: z1 + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image, container_args=[]): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + + command_exec = [container_binary, 'run', '--rm', '--net=host'] + command_exec.extend(container_args) + command_exec.extend([ + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, + container_image, + ]) + + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None, container_args=[]): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image, container_args) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None, container_args=[]): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image, container_args=container_args) # noqa: E501 + + base_cmd = [ + '--cluster', + cluster, + 'zone' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_zone(module, container_image=None): + ''' + Create a new zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + default = module.params.get('default') + master = module.params.get('master') + + args = [ + 'create', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_zone(module, container_image=None): + ''' + Modify a new zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + default = module.params.get('default') + master = module.params.get('master') + + args = [ + 'modify', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if access_key: + args.extend(['--access-key=' + access_key]) + + if secret_key: + args.extend(['--secret-key=' + secret_key]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zone(module, container_image=None): + ''' + Get existing zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + args = [ + 'get', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name, + '--format=json' + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zonegroup(module, container_image=None): + ''' + Get existing zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'zonegroup', + 'get', + '--rgw-zone=' + name, + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'realm', + 'get', + '--rgw-realm=' + realm, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def remove_zone(module, container_image=None): + ''' + Remove a zone + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + zonegroup = module.params.get('zonegroup') + + args = [ + 'delete', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + zonegroup, + '--rgw-zone=' + name + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def set_zone(module, container_image=None): + ''' + Set a zone + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + zone_doc = module.params.get('zone_doc') + + # store the zone_doc in a file + filename = module.tmpdir + 'zone_doc.json' + with open(filename, 'w') as f: + json.dump(zone_doc, f) + + container_args = [ + '-v', filename + ':' + filename + ':ro' + ] + args = [ + 'set', + '--rgw-realm=' + realm, + '--infile=' + filename, + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image, + container_args=container_args) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info', 'set'], default='present'), # noqa: E501 + realm=dict(type='str', require=True), + zonegroup=dict(type='str', require=True), + endpoints=dict(type='list', require=False, default=[]), + access_key=dict(type='str', required=False, no_log=True), + secret_key=dict(type='str', required=False, no_log=True), + default=dict(type='bool', required=False, default=False), + master=dict(type='bool', required=False, default=False), + zone_doc=dict(type='dict', required=False, default={}) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + endpoints = module.params.get('endpoints') + access_key = module.params.get('access_key') + secret_key = module.params.get('secret_key') + + if module.check_mode: + module.exit_json( + changed=False, + stdout='', + stderr='', + rc=0, + start='', + end='', + delta='', + ) + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_zone(module, container_image=container_image)) # noqa: E501 + + if state == "set": + zone = json.loads(out) if rc == 0 else {} + zone_doc = module.params.get('zone_doc') + if not zone_doc: + fatal("zone_doc is required when state is set", module) + + changed = zone_doc != zone + if changed: + rc, cmd, out, err = exec_commands(module, set_zone(module, container_image=container_image)) + + if state == "present": + if rc == 0: + zone = json.loads(out) + _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + realm = json.loads(_out) + _rc, _cmd, _out, _err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + zonegroup = json.loads(_out) + if not access_key: + access_key = '' + if not secret_key: + secret_key = '' + current = { + 'endpoints': next(zone['endpoints'] for zone in zonegroup['zones'] if zone['name'] == name), # noqa: E501 + 'access_key': zone['system_key']['access_key'], + 'secret_key': zone['system_key']['secret_key'], + 'realm_id': zone['realm_id'] + } + asked = { + 'endpoints': endpoints, + 'access_key': access_key, + 'secret_key': secret_key, + 'realm_id': realm['id'] + } + if current != asked: + rc, cmd, out, err = exec_commands(module, modify_zone(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc, cmd, out, err = exec_commands(module, create_zone(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + if rc == 0: + rc, cmd, out, err = exec_commands(module, remove_zone(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Zone {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/library/radosgw_zonegroup.py b/library/radosgw_zonegroup.py new file mode 100644 index 0000000..533d58f --- /dev/null +++ b/library/radosgw_zonegroup.py @@ -0,0 +1,397 @@ +# Copyright 2020, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +from ansible.module_utils.basic import AnsibleModule +try: + from ansible.module_utils.ca_common import fatal +except ImportError: + from module_utils.ca_common import fatal +import datetime +import json +import os + + +ANSIBLE_METADATA = { + 'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community' +} + +DOCUMENTATION = ''' +--- +module: radosgw_zonegroup + +short_description: Manage RADOS Gateway Zonegroup + +version_added: "2.8" + +description: + - Manage RADOS Gateway zonegroup(s) creation, deletion and updates. +options: + cluster: + description: + - The ceph cluster name. + required: false + default: ceph + name: + description: + - name of the RADOS Gateway zonegroup. + required: true + state: + description: + If 'present' is used, the module creates a zonegroup if it doesn't + exist or update it if it already exists. + If 'absent' is used, the module will simply delete the zonegroup. + If 'info' is used, the module will return all details about the + existing zonegroup (json formatted). + required: false + choices: ['present', 'absent', 'info'] + default: present + realm: + description: + - name of the RADOS Gateway realm. + required: true + endpoints: + description: + - endpoints of the RADOS Gateway zonegroup. + required: false + default: [] + default: + description: + - set the default flag on the zonegroup. + required: false + default: false + master: + description: + - set the master flag on the zonegroup. + required: false + default: false + +author: + - Dimitri Savineau +''' + +EXAMPLES = ''' +- name: create a RADOS Gateway default zonegroup + radosgw_zonegroup: + name: foo + realm: bar + endpoints: + - http://192.168.1.10:8080 + - http://192.168.1.11:8080 + default: true + +- name: get a RADOS Gateway zonegroup information + radosgw_zonegroup: + name: foo + realm: bar + state: info + +- name: delete a RADOS Gateway zonegroup + radosgw_zonegroup: + name: foo + realm: bar + state: absent +''' + +RETURN = '''# ''' + + +def container_exec(binary, container_image): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image] + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_radosgw_cmd(container_image=None): + ''' + Generate radosgw-admin prefix comaand + ''' + if container_image: + cmd = container_exec('radosgw-admin', container_image) + else: + cmd = ['radosgw-admin'] + + return cmd + + +def generate_radosgw_cmd(cluster, args, container_image=None): + ''' + Generate 'radosgw' command line to execute + ''' + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + base_cmd = [ + '--cluster', + cluster, + 'zonegroup' + ] + + cmd.extend(base_cmd + args) + + return cmd + + +def exec_commands(module, cmd): + ''' + Execute command(s) + ''' + + rc, out, err = module.run_command(cmd) + + return rc, cmd, out, err + + +def create_zonegroup(module, container_image=None): + ''' + Create a new zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + endpoints = module.params.get('endpoints') + default = module.params.get('default') + master = module.params.get('master') + + args = ['create', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def modify_zonegroup(module, container_image=None): + ''' + Modify a new zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + endpoints = module.params.get('endpoints') + default = module.params.get('default') + master = module.params.get('master') + + args = ['modify', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + if endpoints: + args.extend(['--endpoints=' + ','.join(endpoints)]) + + if default: + args.append('--default') + + if master: + args.append('--master') + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_zonegroup(module, container_image=None): + ''' + Get existing zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + + args = [ + 'get', + '--rgw-realm=' + realm, + '--rgw-zonegroup=' + name, + '--format=json' + ] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def get_realm(module, container_image=None): + ''' + Get existing realm + ''' + + cluster = module.params.get('cluster') + realm = module.params.get('realm') + + cmd = pre_generate_radosgw_cmd(container_image=container_image) + + args = [ + '--cluster', + cluster, + 'realm', + 'get', + '--rgw-realm=' + realm, + '--format=json' + ] + + cmd.extend(args) + + return cmd + + +def remove_zonegroup(module, container_image=None): + ''' + Remove a zonegroup + ''' + + cluster = module.params.get('cluster') + name = module.params.get('name') + realm = module.params.get('realm') + + args = ['delete', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name] + + cmd = generate_radosgw_cmd(cluster=cluster, + args=args, + container_image=container_image) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False): + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + ) + module.exit_json(**result) + + +def run_module(): + module_args = dict( + cluster=dict(type='str', required=False, default='ceph'), + name=dict(type='str', required=True), + state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 + realm=dict(type='str', require=True), + endpoints=dict(type='list', require=False, default=[]), + default=dict(type='bool', required=False, default=False), + master=dict(type='bool', required=False, default=False), + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True, + ) + + # Gather module parameters in variables + name = module.params.get('name') + state = module.params.get('state') + endpoints = module.params.get('endpoints') + master = module.params.get('master') + + startd = datetime.datetime.now() + changed = False + + # will return either the image name or None + container_image = is_containerized() + + rc, cmd, out, err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501 + if state == "present": + if rc == 0: + zonegroup = json.loads(out) + _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501 + if _rc != 0: + fatal(_err, module) + realm = json.loads(_out) + current = { + 'endpoints': zonegroup['endpoints'], + 'master': zonegroup.get('is_master', False), + 'realm_id': zonegroup['realm_id'] + } + asked = { + 'endpoints': endpoints, + 'master': master, + 'realm_id': realm['id'] + } + changed = current != asked + if changed and not module.check_mode: + rc, cmd, out, err = exec_commands(module, modify_zonegroup(module, container_image=container_image)) # noqa: E501 + else: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, create_zonegroup(module, container_image=container_image)) # noqa: E501 + changed = True + + elif state == "absent": + if rc == 0: + if not module.check_mode: + rc, cmd, out, err = exec_commands(module, remove_zonegroup(module, container_image=container_image)) # noqa: E501 + changed = True + else: + rc = 0 + out = "Zonegroup {} doesn't exist".format(name) + + exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501 + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/module_utils/__init__.py b/module_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/module_utils/ca_common.py b/module_utils/ca_common.py new file mode 100644 index 0000000..cfbf55a --- /dev/null +++ b/module_utils/ca_common.py @@ -0,0 +1,151 @@ +import os +import datetime +from typing import List +from ansible.module_utils.basic import AnsibleModule + + +def generate_cmd(cmd='ceph', + sub_cmd=None, + args=None, + user_key=None, + cluster='ceph', + user='client.admin', + container_image=None, + interactive=False): + ''' + Generate 'ceph' command line to execute + ''' + + if user_key is None: + user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user) + + cmd = pre_generate_cmd(cmd, container_image=container_image, interactive=interactive) # noqa: E501 + + base_cmd = [ + '-n', + user, + '-k', + user_key, + '--cluster', + cluster + ] + + if sub_cmd is not None: + base_cmd.extend(sub_cmd) + + cmd.extend(base_cmd) if args is None else cmd.extend(base_cmd + args) + + return cmd + + +def container_exec(binary, container_image, interactive=False): + ''' + Build the docker CLI to run a command inside a container + ''' + + container_binary = os.getenv('CEPH_CONTAINER_BINARY') + command_exec = [container_binary, 'run'] + + if interactive: + command_exec.extend(['--interactive']) + + command_exec.extend(['--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + binary, container_image]) + return command_exec + + +def is_containerized(): + ''' + Check if we are running on a containerized cluster + ''' + + if 'CEPH_CONTAINER_IMAGE' in os.environ: + container_image = os.getenv('CEPH_CONTAINER_IMAGE') + else: + container_image = None + + return container_image + + +def pre_generate_cmd(cmd, container_image=None, interactive=False): + ''' + Generate ceph prefix command + ''' + if container_image: + cmd = container_exec(cmd, container_image, interactive=interactive) + else: + cmd = [cmd] + + return cmd + + +def exec_command(module, cmd, stdin=None, check_rc=False): + ''' + Execute command(s) + ''' + + binary_data = False + if stdin: + binary_data = True + rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data, check_rc=check_rc) # noqa: E501 + + return rc, cmd, out, err + + +def build_base_cmd(module: "AnsibleModule") -> List[str]: + cmd = ['cephadm'] + docker = module.params.get('docker') + image = module.params.get('image') + fsid = module.params.get('fsid') + + if docker: + cmd.append('--docker') + if image: + cmd.extend(['--image', image]) + + cmd.append('shell') + + if fsid: + cmd.extend(['--fsid', fsid]) + + return cmd + + +def build_base_cmd_orch(module: "AnsibleModule") -> List[str]: + cmd = build_base_cmd(module) + cmd.extend(['ceph', 'orch']) + + return cmd + + +def exit_module(module, out, rc, cmd, err, startd, changed=False, diff=dict(before="", after="")): # noqa: E501 + endd = datetime.datetime.now() + delta = endd - startd + + result = dict( + cmd=cmd, + start=str(startd), + end=str(endd), + delta=str(delta), + rc=rc, + stdout=out.rstrip("\r\n"), + stderr=err.rstrip("\r\n"), + changed=changed, + diff=diff + ) + module.exit_json(**result) + + +def fatal(message, module): + ''' + Report a fatal error and exit + ''' + + if module: + module.fail_json(msg=message, rc=1) + else: + raise Exception(message) diff --git a/plugins/callback/__pycache__/installer_checkpoint.cpython-310.pyc b/plugins/callback/__pycache__/installer_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2d89bdba98170ef2cc031297b88a72ddd208232 GIT binary patch literal 3938 zcmb_f-E!N;72d@kL5fmoS+RZ+Iq;8?Fs`IdJMBfr?O3K5M-z#1Nu{1}rp16*kb(sQ z^a8Xa2%L6Cp60647s!RwyIiGxgkJXz_O@4f1!u;|*##&QtmLbbgWbiRJ?HG%{r2pK zV0N~I;8*$gzudz`g#OE4CVvXNJb)+MhJv6rBA7@X?&CJbye)fDUvA5yt$0dbZEF}k zLPRCnOGGqBK9$=AQXu+E)YeI{g{r0ai!DENJDy|MmgjXW`^Xr0!=CFKQD6+H>qmxV zgu{N{qQ{0E^aq|3xq)wlkrfR?Bj_5g4{gu0cz0=VXoXI=QTiSxh^md;kpxwJ`)M5!{xE)K-YxR*BNqh}tfEjk=gv!bbklTwH!@9FCna>e?zgaBrfGXt7@Fp{=+$pFZD(+}Zl#;IneJJ*Io`z0 z%jUH)IHpCgevQvWf?^)Wi}{t=82h!keEj36tiV%)CyXGV9-!?Xcq9>t$VB-@;s_Hp zU`y0|6L|j&DT{iR%!&GEq$28TWM0(Q$%3fgAvZ+5NR~vsOjbnwE_qMXtK_Dr@i%iL znY=$z5_uc_adxCeC{bWJm^`wNt!=$el$rDkLh`B)66kXn!|u3;TDM(MXRjh6%vhn zkQ5eBq9?_3B|&F;qMu{(;ol_it&@+AB(SKG>X8gJpO;2H2Ca1V_mT?t4{C`t_W8O#mx4CR9_b zO{=Zu)F3Wn7a;dAgIxUagaHV0eG-q;rf%d+ZI59Y-Z2+8&dIsnC!wGc`^z|`Xr;W_#jMO;Z-|-WC2RzZdB$ z6ihMc&{q$oS}FRDFdFxEWPD@B;d3 zX2&-k(*W+|F#O-+%kNRw^5Axl?-j+ppI+;>te&UBAcO(3{*l zIsi8_p-mmfHv`{0t`?cxx4vSs=lD#shhY@-;kG7j$WJfRI+pK45~-xgnKK`gI9_D2 z!o`qoK>Jq2X0rUvL#UQ0zhi*{0IdpH#ZB`FunI8$r?7cn;6QUxP=-+Uts}VFj14^rP~l^p^!CzB9#FE}L_X zyKv6TDV5+ZkDClu((!1cE>9v$aJF&6jKL=ov9M3d} z({+7^a12%p^d46!ys#+8CEet2xCS=%clVp-{^x8aZPlC2{U$4=FHg3bjjAFLMQ?G9 zvT!3dbQscKK-bjXi@p=+4_Ba&WWL3^3R@t-@@x7JP%7GMMT%FZmOsUo2pecNuvXs! z#G9;S0w>{A8`@>ld^WVaOj0mS64=mPslj1?xK;by{6oFj+SzZg*^8coFCW(@UK?9` zb+&XNZPe@AE%V7|I|udMoz{WiwDc1m6%UQgW<1$;JkK;IBAwwwIMfBl=1f@daUCgG zk>G)US<>Z7Psnp5o+F4OX#zzf906_;M5JZF$2lO1$O!^OU59?4_9NNXvk`JOLXH4U ziL?Z)5g4fg%n%qPa9IUD0krtO0gG!7;Y-RfqC>|ZZnx`D2WSuEspth{pf^CF@LA>) z!-0n%<!AO4ctZZBNEJB6C8;9afEA`()~FU~ z9qM!{OWF6!M)v)(VP~I+lz%O;LMEns>FIBI!8bw}ns6-gm`Gvz{S~G%8doyq5&6_uKq{Zt(kbHmsBDdUu&_77QNLC%Zyb53yF-S(&5JQbqS16h4CD=4> zx?+~iDmIT>u2^MTFKR=5h_Q4a#`=MI#H`7X?ToDh$qvRofFh34yQ6}XTgdZthRf#{ zxXg9Bm|t1S3A8NIMVu34nxmzaLV?4#=!{q>=6N|M3bZ71@`^-@i*!x^oX>NF>?$sE z5--JE5lV4nIT_j(c)*@9UHC<)hO#pw5-vfOGKPlaZ_*1d82}vOs8#dN7 z+mT2ylx$&4vXwE*HpT+FSY<~VouG(Ld57#|?4X;AaX{{7oU$iP$=)H()#99>BeT9e`=TeSn>S`vDICb^$&JI1Jbg_z>Vhz#hPd0Y?CP z0S^H_5;w(LisoymXi1xW=yDjLqIJHy!fDGqSz$HJog5At7=doJr4gNDM4y+UWnvH^ zC+b1WaGIL7?D84t41ffcgwX1G>oqQMf(P6$mT5NfVvJxAJF%g@AWOGU>#u}3xi za9M^tkkSsLcjnkBef?o| z9L(|4Kd#QuQ-kD&=oLX}<2w#yZk$tPQK~tDR3{Ovb+#Y-OTTv&kRO?@8tj z=G3lM%xUmkZz_RZm!f)6mN+1`APRfcEa5$BL@B#RO(84oQG=Im>`@~uN=i@DvEhYm zA&0Sum9D1snS72HWF_3R5;|n_B9~p@@|=+2@g99-Ica%8Vl* zf|%u5{$^3cGV~B^+9rdc3#GehU6s`;=VUkq{o~phPLILb&`A-F7SP^N%yYLEMDcp5 z8;f~fKRRIL;xZ>)m*NF3C&X?Q@{Q~;?j;`AGcDmpCSP5jQ!|(97+|em&yc({Q^x=% z@OlR3?YTMzSg6-Cq%PO@00z^@&{3xYbMX?LR1K$eux7%*oT8dNHL@S5V;@t^szleY zYL?c>)?LR2deM_$a2IuhD-G2M041Zf*4QYjy-w66!zfDwH8QHE+Q?jQHE(McTk5M} z)vdUZF<94Y3XGXf=*C`6Yn-&R|ytnV%zVjx1`wo5h_MM1A(GccZGh$xE|8b8C zG{j#JDoWD-?p6}W<+wZqouwmAH5}EV%?u~d3p@=>G6(xSOE27_VF*G_SR&mhXcl^K zOBu}b0xJsnTd7D3ZiNE}A0U~ZWNjpaQy*ctP#_#288~)?Npym{NERYlH_3WPMgp{m zQ!y8~n~FKl3yLkXBFSO_Le6YXA|64pEpUPWe#fWza}wgSSw1gwioM!oQN#sKR@}PF zC({sdVGjt0I2NuBlH$v0J_iD3wGmLX#+)qU+`@{?ONt9d!NMG_6a>Yp)*2F3zopoV zTs8~yp?EY_Z7CG(7Q&K&NhEMNbcogyZXqM^e|VfVd>hOhs|{$NA-ZkWh)k;VF&x~h z=!I=x$hF}J>>NVg!HTbAqcdLd1vf%xcdX_}%Z}sq)FVyX{y)^HI)>w_+>0=VxH^U* zl^Rp2=Ycw`Qb$zkX_a~fsBfrLO~NsiI$D(gW2)-*BcMjrw$KWwC)KtIpq@~v5tRyZ zU>@EA>JgPXtWuAv)C)k3wd`D{&S;Z&lS1tscU^yN3*SvV>kNJR{vQr(MBge$FMT;z zjwZJvsjbebUtImxjM@+F*pR1n?cGo2Kb|kU`zjva{p7vmwuj#I&>MXxo_I!{bw<{# zm7Z9+=j5ZMa?klScO`i6x6=CAt-({};Hk%b<>1*hd*wjj;pBtK_4ab;=+gs7D+7mr zckVam9@)wRryg6&1Lq&#EDu~+n|#)H>{0lS!(R+P{;+)f@|P>+1{Or!tj*-W08yBxVJ#(!R2tD*Z@NNg9n}KL0bagxQ&SvPH3VpOppL*;n(_=5q zRCj{<-fZf;OnuvifQ%1;W{2`cyen>Ez1uqmr z^VQ4Bw8P-q{TIz?^8n$XnsU@!It(6-qed!mDGf<$6S0)S3@@)>0iG?u{Xh=q4EQDE zX3t~oT8|F_zv39v-ZuCWJuZ9*?l@a_>8G>&Vou<*#Pde%I7(RTH#khZF+N60)ELN^ znw(I#3RMoR>Xj9Bg&qz+ z7`{94d|wdSs)qM_6xl}}B?J!60#r{n-ka*K78%>|zc}avmmA5TV-6 zvRN?$z&8fR>c!Eq2{xTzX2z#dinmIceQ!EZQ%sE}6H1_3n@S`u&9HM<#%B|g<1@4B zTLnG`)1pma8$5}?iIdB)yC)0uw_krS$>uV0hR^4L-Vf~@!8h9&f!g|fz9*m%l1@_ov@ z-`e#8L8i{2DyHtah}Z(`M9WJT>{qgXi_|=jv@~7&sG`&Q;F8FgYA<%MKaEEQcs@xZ zb>~AQ0Q=GqgqmI6`>GF6MRT&JrtMz6n)m3{Ja0ETnmbAxjYhDnn4yJ2#UfTrR8$<2jEjUpF$>}i#SBGgS4U%2ah=GnLAEc*tx zJcqYEu}x2GUE1=Tcr;q}ys?^i?)I%ts+N|DIPoMIf~sjie+rE0Pl2(F{uGGGDWKT( zYD}yVkCIF^dR2$FNs*j8T3F>cr)~Q4I0H2D$|Xf0+c8rV^{vT7S-!I&s^=SY>>Jeo X$~I=Ay4L1jA@YAUX>n8gRZ;&1Nk9Y3 literal 0 HcmV?d00001 diff --git a/plugins/callback/installer_checkpoint.py b/plugins/callback/installer_checkpoint.py new file mode 100644 index 0000000..de9234d --- /dev/null +++ b/plugins/callback/installer_checkpoint.py @@ -0,0 +1,154 @@ +"""Ansible callback plugin to print a summary completion status of installation +phases. +""" +from datetime import datetime +from ansible.plugins.callback import CallbackBase +from ansible import constants as C + + +class CallbackModule(CallbackBase): + """This callback summarizes installation phase status.""" + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'installer_checkpoint' + CALLBACK_NEEDS_WHITELIST = False + + def __init__(self): + super(CallbackModule, self).__init__() + + def v2_playbook_on_stats(self, stats): + + # Set the order of the installer phases + installer_phases = [ + 'installer_phase_ceph_mon', + 'installer_phase_ceph_mgr', + 'installer_phase_ceph_osd', + 'installer_phase_ceph_mds', + 'installer_phase_ceph_rgw', + 'installer_phase_ceph_nfs', + 'installer_phase_ceph_rbdmirror', + 'installer_phase_ceph_client', + 'installer_phase_ceph_rgw_loadbalancer', + 'installer_phase_ceph_dashboard', + 'installer_phase_ceph_grafana', + 'installer_phase_ceph_node_exporter', + 'installer_phase_ceph_crash', + 'installer_phase_ceph_exporter', + ] + + # Define the attributes of the installer phases + phase_attributes = { + 'installer_phase_ceph_mon': { + 'title': 'Install Ceph Monitor', + 'playbook': 'roles/ceph-mon/tasks/main.yml' + }, + 'installer_phase_ceph_mgr': { + 'title': 'Install Ceph Manager', + 'playbook': 'roles/ceph-mgr/tasks/main.yml' + }, + 'installer_phase_ceph_osd': { + 'title': 'Install Ceph OSD', + 'playbook': 'roles/ceph-osd/tasks/main.yml' + }, + 'installer_phase_ceph_mds': { + 'title': 'Install Ceph MDS', + 'playbook': 'roles/ceph-mds/tasks/main.yml' + }, + 'installer_phase_ceph_rgw': { + 'title': 'Install Ceph RGW', + 'playbook': 'roles/ceph-rgw/tasks/main.yml' + }, + 'installer_phase_ceph_nfs': { + 'title': 'Install Ceph NFS', + 'playbook': 'roles/ceph-nfs/tasks/main.yml' + }, + 'installer_phase_ceph_rbdmirror': { + 'title': 'Install Ceph RBD Mirror', + 'playbook': 'roles/ceph-rbd-mirror/tasks/main.yml' + }, + 'installer_phase_ceph_client': { + 'title': 'Install Ceph Client', + 'playbook': 'roles/ceph-client/tasks/main.yml' + }, + 'installer_phase_ceph_rgw_loadbalancer': { + 'title': 'Install Ceph RGW LoadBalancer', + 'playbook': 'roles/ceph-rgw-loadbalancer/tasks/main.yml' + }, + 'installer_phase_ceph_dashboard': { + 'title': 'Install Ceph Dashboard', + 'playbook': 'roles/ceph-dashboard/tasks/main.yml' + }, + 'installer_phase_ceph_grafana': { + 'title': 'Install Ceph Grafana', + 'playbook': 'roles/ceph-grafana/tasks/main.yml' + }, + 'installer_phase_ceph_node_exporter': { + 'title': 'Install Ceph Node Exporter', + 'playbook': 'roles/ceph-node-exporter/tasks/main.yml' + }, + 'installer_phase_ceph_crash': { + 'title': 'Install Ceph Crash', + 'playbook': 'roles/ceph-crash/tasks/main.yml' + }, + 'installer_phase_ceph_exporter': { + 'title': 'Install Ceph Exporter', + 'playbook': 'roles/ceph-exporter/tasks/main.yml' + }, + } + + # Find the longest phase title + max_column = 0 + for phase in phase_attributes: + max_column = max(max_column, len(phase_attributes[phase]['title'])) + + if '_run' in stats.custom: + self._display.banner('INSTALLER STATUS') + for phase in installer_phases: + phase_title = phase_attributes[phase]['title'] + padding = max_column - len(phase_title) + 2 + if phase in stats.custom['_run']: + phase_status = stats.custom['_run'][phase]['status'] + phase_time = phase_time_delta(stats.custom['_run'][phase]) + self._display.display( + '{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time), + color=self.phase_color(phase_status)) + if phase_status == 'In Progress' and phase != 'installer_phase_initialize': + self._display.display( + '\tThis phase can be restarted by running: {}'.format( + phase_attributes[phase]['playbook'])) + + self._display.display("", screen_only=True) + + def phase_color(self, status): + """ Return color code for installer phase""" + valid_status = [ + 'In Progress', + 'Complete', + ] + + if status not in valid_status: + self._display.warning('Invalid phase status defined: {}'.format(status)) + + if status == 'Complete': + phase_color = C.COLOR_OK + elif status == 'In Progress': + phase_color = C.COLOR_ERROR + else: + phase_color = C.COLOR_WARN + + return phase_color + + +def phase_time_delta(phase): + """ Calculate the difference between phase start and end times """ + time_format = '%Y%m%d%H%M%SZ' + phase_start = datetime.strptime(phase['start'], time_format) + if 'end' not in phase: + # The phase failed so set the end time to now + phase_end = datetime.now() + else: + phase_end = datetime.strptime(phase['end'], time_format) + delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds + + return delta diff --git a/plugins/filter/__init__.py b/plugins/filter/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/filter/__pycache__/dict2dict.cpython-310.pyc b/plugins/filter/__pycache__/dict2dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01c31d51519023db94d9f6534020fe2bd8d1de2f GIT binary patch literal 878 zcmY*X&5qMB5VjpB4NJS*l`0Ng=rs~-CC&&TftCyO!Yu+>Y3wfE&EMKdrB>;QeGCq} zM~*xKui+~vUV#H;Chb<;i9DXq&iMOg>~b(TB4EQ`KXu9o`HkSvIDj`W>~nCENScwH z#x!R!%e~m6MDf^vPb8Dx4UwJ-zIky^@=ql4|3C(bJa>}JY?(E+5<0I+Q@c>=g|@mZ z+;L@eQHyy~q&46aGY$0-4P{5D?7%5_tt|bgz*-JJqLF|)~uzshe3!>OGV$Qo#OXn|=tWkh`U8~%>AgL-< zNRZ8~%I5CzR--_T2Bnw)9SU>w%F)FSVvYg)jV7t8zP?Bbt7n;-R9SPO3p<&2wNEhp zCAza}=^pJ79%x74I32>rX~;s#{skMh;l0(9!^WNMlTr!^_l7Zctm_cI8cADnNi^-M zG(%X4=>3HIo!YHC0qA;Lg`EnE4=Z4Nvl>01-u*I-0v8HVB)Jm89SMTE&CF0wGWYL5dav(P|FPX}2{yZYNE~GedWK z(Q@*Q<(8Zv7qSNq$&sJHKS(ZM`a&d5+zgHaN2+I>)xw23d|lODT~%Lo&1|2mRRE>_ z;}1Nv0Dd#Z3vn09yobsJ_Q59~!46Sm$5Pggt?Uc1=P0)UuYzx_gKzH@dUk$C6@BLt zbll&K*|GKc;6NtPSTV{+X(E&^4fr9KJc;$WRPb2Q;W!Qz+F>3bN3t;HucH!R4SS%7 z56aqimAwWDR-_!0Te-fCSnwTH?1L)#E@m(2>NOrICO%CDwhaGMDWis5O7rhgnLrm(t50@c)N|?vV0vSu4%glR_yS{yUf=m2&(^}T z-?w%b9m|3&z=yu=lfL6wsJW;WJnK!Xx5$10u3rQwLldsQo3DKf*;?AZ!;Z9j7(`=+ zmdh0zNnHril*NNiQ9F`FL%oz!r-r&%!i2(56=n)FxueOUETYyjyI< zWA5#W4hx4eL09FKZux@M&nyNmfkSywPymqVW68H>GG+i*Ur90_jJF6+2lUL z1g7!tUo_#;_+Sv;?v*g%m z*fmc75szGgM?|dPwpc^x5K+TLnow=AjHrt_bE#MVMRBpaYP=f=6L?x(|9163^L}$? zTlLBeta3$|<416#Q`RL)<6y)n)fGxd*~p?^rS$W05G_v5Q##~As))xdPVl%$=^zOy z%?n&NxSn@z;j!A_IhsLFRd*Q!=Fw$7(E)hA@*Y90MkXcGOYIOvVvYK~@S@ zRY(rZNag7^jjw4)-q+H5c^9sTHtw0)9 literal 0 HcmV?d00001 diff --git a/plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-310.pyc b/plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e08db1d178fb02bf847e49b753d15424aa644bb GIT binary patch literal 1149 zcmY*Y&2H2%5Vjp}lI~_pTdfdqArc3a16^@N2({u*J+P`&Bt!&?(%5ZXvQBDyTXnUS z5cEOXBS#*A*YK4SufU;YoNUo{B2Q+<_RKe*?W*7JB9N6|KgBXd=(lqgB?M;!hW-SK zA%;sd#wi|?l#IR9!-&(g194LN;~)(%n)={*WtfH^knB;!mrfsH+xfn)A;SrrV7Vs#cHjv!7;&y3kd$R@M%n>&ymHJ}+yso)qiEpXq z<(`d1o&CpZm%wXL+PK0>g$Iakitfq3#|S2C0DI z89KxQ?;t}i;JhyJH=KB*rsOnzf578g3F8)KtoYGHz=6f}sX3_RV;#?FjCWy&)Fv-W z&f-eO22#v>jf+YeRl$r}PpAq2LQ_X|z_M$bEhB9=uiaHkuc!)Xp)!nnIOwpRb*LQo^0rjghXKzO(K_lZ(=42Dk=9elp|figGZK{)0*2pl!r@Kl)2V`AehtB z(9p1Adt1$GY)G(vwR^}5)1+UZqJh-#5k7CbdEri2yU+k2MTC$DM*xjn4Lbh-%^oF_ literal 0 HcmV?d00001 diff --git a/plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-314.pyc b/plugins/filter/__pycache__/ipaddrs_in_ranges.cpython-314.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d56e8ab62bd47f7e2cf1f8c47f33eaa64e285cf0 GIT binary patch literal 1643 zcmZux-ESL35TE<7eNJr0vE4dNljfqNX-(7^t5F-ohiIfWu@Zu1AtRxtD}P+#h4bBY zZ%vgxS-en*mpcHs0Wz>DfKtK*HNh= zgiYX58Mu5e&Xx0EBOtDpqQYPV4f-SwS? zD9HnLpiDzWUF>5x;fXYzUFaD)_Iv}4+n9R-))oOON-1*I8&3yur(kUbAPSdYtrUDK zNKsDO$f;h$GPnTjMT@0d0dS#i82o|FKJmg;$1$u1_DOXObIXfShYb4TMR6B&#*0(O zCDc0JM*4o9HgFN`nwy)`b=|v@Z`v&*ziqUrMe`+Qe{9sbo39&A^IFw%Y0Wh9j@jO( zmYd)5>B&<^q&<;h!P#M>7}6U*N#Me%GLmR_-$&7bgR${P@x$!Qqm{#n%a65R(&PK- z*}e4alMkP!ulToLNGb^hXP}>T@8|x zKWh^`3<>q)$rBqf7yc~NQ0c&bOkNRIT#XUIRv9Y)I|X@Z2JQ|xFNs#2S$NT;WA>X; zPWG`s{OE(Gp`U`R2mR8SalcJ*jCE&+H*M>RtM`f4Yxsxd@~UYX4c)eMjv@L@*Xy>$ znT-~HT*nG6y&9D1=IxnzAf%zPGSi(xuO+wEF-)kK>@CpImJu=bWC{PK+%C0 z!>KQBzD!O%$?Q*G-<`hRRpd;f3vxVhGz94j`>FZ8)O=T#MsM~MIlUw-OYA)U`VDO4 zqMk;GRc#rBcteD={M(CqjF9{7s@b=U5VA#?%S~z-mW}2JAq~4uNPy~ssP`Np*bGkV zq{=y?HGGR)^c*FmZdP5lLF>G3m?k0k0L4!n;%@@=yFO~Bq3Vk`DsC3VtIrYRb2ay; z8V*hVos5PM0SX_1@0p8<%D39Q%@73>6U4*_F!3aND&KJok+6mL)2~F7-{;Ba!>7j^ zF8c4L&xRMaYvLva)m#*@M85)Fw6}sLy~AF^Gf}$xHj1tyNzw~g-i76ZWTq4U4aU2w zDy0wA)KM%WEj_;R7vSercNnzv5y(>dWg_+2N;ibsKW+>oe5qcBuc8IzTdAP@3&b0a Ar2qf` literal 0 HcmV?d00001 diff --git a/plugins/filter/dict2dict.py b/plugins/filter/dict2dict.py new file mode 100644 index 0000000..5cf842f --- /dev/null +++ b/plugins/filter/dict2dict.py @@ -0,0 +1,23 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class FilterModule(object): + ''' Loop over nested dictionaries ''' + + def dict2dict(self, nested_dict): + items = [] + for key, value in nested_dict.items(): + for k, v in value.items(): + items.append( + ( + {'key': key, 'value': value}, + {'key': k, 'value': v}, + ), + ) + return items + + def filters(self): + return { + 'dict2dict': self.dict2dict + } diff --git a/plugins/filter/ipaddrs_in_ranges.py b/plugins/filter/ipaddrs_in_ranges.py new file mode 100644 index 0000000..eabdbc3 --- /dev/null +++ b/plugins/filter/ipaddrs_in_ranges.py @@ -0,0 +1,33 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible import errors + +try: + import netaddr +except ImportError: + # in this case, we'll make the filter return an error message (see bottom) + netaddr = None + + +class FilterModule(object): + ''' IP addresses within IP ranges ''' + + def ips_in_ranges(self, ip_addresses, ip_ranges): + ips_in_ranges = [] + for ip_addr in ip_addresses: + for ip_range in ip_ranges: + if netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(ip_range): + ips_in_ranges.append(ip_addr) + return ips_in_ranges + + def filters(self): + if netaddr: + return { + 'ips_in_ranges': self.ips_in_ranges + } + else: + # Need to install python's netaddr for these filters to work + raise errors.AnsibleFilterError( + "The ips_in_ranges filter requires python's netaddr be " + "installed on the ansible controller.") diff --git a/profiles/rgw-keystone-v2 b/profiles/rgw-keystone-v2 new file mode 100644 index 0000000..05fc815 --- /dev/null +++ b/profiles/rgw-keystone-v2 @@ -0,0 +1,30 @@ +--- +# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE +# GOAL: CONFIGURE RADOS GATEWAY WITH KEYSTONE V2 +# +# The following variables should be added in your group_vars/rgws.yml file +# The double quotes are important, do NOT remove them. + + +ceph_conf_overrides: + "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": + "rgw keystone api version": "2" + "rgw keystone url": "http://192.168.0.1:35357" + "rgw keystone admin token": "password" + "rgw keystone admin tenant": "admin" + "rgw keystone accepted roles": "member, _member_, admin" + "rgw keystone token cache size": "10000" + "rgw keystone revocation interval": "900" + "rgw s3 auth use keystone": "true" + "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss" + + +# NOTE (leseb): to authentivate with Keystone you have two options: +# * using a token (like shown above) +# - "rgw keystone admin token" = admin" +# - "rgw keystone token cache size" = 10000" +# +# * use credential: +# - "rgw keystone admin user" = "admin" +# - "rgw keystone admin password" = "password" +# diff --git a/profiles/rgw-keystone-v3 b/profiles/rgw-keystone-v3 new file mode 100644 index 0000000..a82d96b --- /dev/null +++ b/profiles/rgw-keystone-v3 @@ -0,0 +1,31 @@ +--- +# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE +# GOAL: CONFIGURE RADOS GATEWAY WITH KEYSTONE V3 +# +# The following variables should be added in your group_vars/rgws.yml file +# The double quotes are important, do NOT remove them. + + +ceph_conf_overrides: + "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": + "rgw keystone api version": "3" + "rgw keystone url": "http://192.168.0.1:35357" + "rgw keystone admin token": "password" + "rgw keystone admin project": "admin" + "rgw keystone admin domain": "default" + "rgw keystone accepted roles": "member, _member_, admin" + "rgw keystone token cache size": "10000" + "rgw keystone revocation interval": "900" + "rgw s3 auth use keystone": "true" + "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss" + + +# NOTE (leseb): to authentivate with Keystone you have two options: +# * using a token (like shown above) +# - "rgw keystone admin token" = admin" +# - "rgw keystone token cache size" = 10000" +# +# * use credential: +# - "rgw keystone admin user" = "admin" +# - "rgw keystone admin password" = "password" +# diff --git a/profiles/rgw-radosgw-static-website b/profiles/rgw-radosgw-static-website new file mode 100644 index 0000000..fc1ff89 --- /dev/null +++ b/profiles/rgw-radosgw-static-website @@ -0,0 +1,11 @@ +--- +# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE +# GOAL: CONFIGURE RADOS GATEWAY WITH STATIC WEBSITE +# +# The following variables should be added in your group_vars/rgws.yml file +# The double quotes are important, do NOT remove them. + +ceph_conf_overrides: + "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": + rgw enable static website = true + rgw dns s3website name = objects-website-region.domain.com diff --git a/profiles/rgw-usage-log b/profiles/rgw-usage-log new file mode 100644 index 0000000..2f0a602 --- /dev/null +++ b/profiles/rgw-usage-log @@ -0,0 +1,15 @@ +--- +# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE +# GOAL: CONFIGURE RADOS GATEWAY WITH USAGE LOG +# +# The following variables should be added in your group_vars/rgws.yml file +# The double quotes are important, do NOT remove them. + +ceph_conf_overrides: + "client.rgw.{{ rgw_zone }}.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}": + rgw enable usage log = true + rgw usage log tick interval = 30 + rgw usage log flush threshold = 1024 + rgw usage max shards = 32 + rgw usage max user shards = 1 + diff --git a/raw_install_python.yml b/raw_install_python.yml new file mode 100644 index 0000000..97be5a3 --- /dev/null +++ b/raw_install_python.yml @@ -0,0 +1,69 @@ +--- +- name: check for python + stat: + path: "{{ item }}" + changed_when: false + failed_when: false + register: systempython + with_items: + - /usr/bin/python + - /usr/bin/python3 + - /usr/libexec/platform-python + +- block: + - name: check for dnf-3 package manager (RedHat/Fedora/CentOS) + raw: stat /bin/dnf-3 + changed_when: false + failed_when: false + register: stat_dnf3 + + - name: check for yum package manager (RedHat/Fedora/CentOS) + raw: stat /bin/yum + changed_when: false + failed_when: false + register: stat_yum + + - name: check for apt package manager (Debian/Ubuntu) + raw: stat /usr/bin/apt-get + changed_when: false + failed_when: false + register: stat_apt + + - name: check for zypper package manager (SUSE/OpenSUSE) + raw: stat /usr/bin/zypper + changed_when: false + failed_when: false + register: stat_zypper + + - name: install python for RedHat based OS - dnf + raw: > + {{ 'dnf' if stat_dnf3.rc == 0 else 'yum' }} -y install python3; + ln -sf /usr/bin/python3 /usr/bin/python + creates=/usr/bin/python + register: result + until: (result is succeeded) and ('Failed' not in result.stdout) + when: stat_dnf3.rc == 0 or stat_yum.rc == 0 + + - name: install python for debian based OS + raw: apt-get -y install python-simplejson + register: result + until: result is succeeded + when: stat_apt.rc == 0 + + - name: install python for SUSE/OpenSUSE + raw: zypper -n install python-base + register: result + until: result is succeeded + when: stat_zypper.rc == 0 + when: not True in (systempython.results | selectattr('stat', 'defined') | map(attribute='stat.exists') | list | unique) + +- name: install python-xml for opensuse only if python2 is installed already + raw: zypper -n install python-xml + register: result + until: result is succeeded + with_items: "{{ systempython.results }}" + when: + - stat_zypper.rc is defined + - stat_zypper.rc == 0 + - item.stat.exists | bool + - item.stat.path == '/usr/bin/python' diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..82a355c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +# These are Python requirements needed to run ceph-ansible main +ansible-core>=2.15,<2.17,!=2.9.10 +netaddr diff --git a/requirements.yml b/requirements.yml new file mode 100644 index 0000000..8168617 --- /dev/null +++ b/requirements.yml @@ -0,0 +1,10 @@ +--- +# These are Ansible requirements needed to run ceph-ansible main +collections: + - name: https://opendev.org/openstack/ansible-config_template + version: 1.2.1 + type: git + - name: ansible.utils + version: '>=2.5.0' + - name: community.general + - name: ansible.posix diff --git a/roles/ceph-client/LICENSE b/roles/ceph-client/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-client/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-client/README.md b/roles/ceph-client/README.md new file mode 100644 index 0000000..5fda35a --- /dev/null +++ b/roles/ceph-client/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-client + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-client/defaults/main.yml b/roles/ceph-client/defaults/main.yml new file mode 100644 index 0000000..887f512 --- /dev/null +++ b/roles/ceph-client/defaults/main.yml @@ -0,0 +1,41 @@ +--- +########### +# GENERAL # +########### + +# Even though Client nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on Client nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + +user_config: false +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# test: +# name: "test" +# application: "rbd" +# target_size_ratio: 0.2 +test: + name: "test" + application: "rbd" +test2: + name: "test2" + application: "rbd" +pools: + - "{{ test }}" + - "{{ test2 }}" + +# Generate a keyring using ceph-authtool CLI or python. +# Eg: +# $ ceph-authtool --gen-print-key +# or +# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack(' 0 else inventory_hostname }}" + +- name: Set_fact admin_key_presence + ansible.builtin.set_fact: + admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}" + +- name: Create cephx key(s) + ceph_key: + name: "{{ item.name }}" + caps: "{{ item.caps }}" + secret: "{{ item.key | default('') }}" + cluster: "{{ cluster }}" + dest: "{{ ceph_conf_key_directory }}" + import_key: "{{ admin_key_presence }}" + mode: "{{ item.mode | default(ceph_keyring_permissions) }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: + - "{{ keys }}" + delegate_to: "{{ delegated_node }}" + when: + - cephx | bool + - keys | length > 0 + - inventory_hostname == groups.get('_filtered_clients') | first + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Slurp client cephx key(s) + ansible.builtin.slurp: + src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring" + with_items: "{{ keys }}" + register: slurp_client_keys + delegate_to: "{{ delegated_node }}" + when: + - cephx | bool + - keys | length > 0 + - inventory_hostname == groups.get('_filtered_clients') | first + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Pool related tasks + when: + - admin_key_presence | bool + - inventory_hostname == groups.get('_filtered_clients', []) | first + block: + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: get_def_crush_rule_name.yml + + - name: Create ceph pool(s) + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_num: "{{ item.pg_num | default(omit) }}" + pgp_num: "{{ item.pgp_num | default(omit) }}" + size: "{{ item.size | default(omit) }}" + min_size: "{{ item.min_size | default(omit) }}" + pool_type: "{{ item.type | default('replicated') }}" + rule_name: "{{ item.rule_name | default(omit) }}" + erasure_profile: "{{ item.erasure_profile | default(omit) }}" + pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}" + target_size_ratio: "{{ item.target_size_ratio | default(omit) }}" + application: "{{ item.application | default(omit) }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ pools }}" + changed_when: false + delegate_to: "{{ delegated_node }}" + +- name: Get client cephx keys + ansible.builtin.copy: + dest: "{{ item.source }}" + content: "{{ item.content | b64decode }}" + mode: "{{ item.item.get('mode', '0600') }}" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}" + when: not item.get('skipped', False) + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-client/tasks/main.yml b/roles/ceph-client/tasks/main.yml new file mode 100644 index 0000000..64e773e --- /dev/null +++ b/roles/ceph-client/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Include pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml + when: groups.get(mon_group_name, []) | length > 0 + +- name: Include create_users_keys.yml + ansible.builtin.include_tasks: create_users_keys.yml + when: + - user_config | bool + - not rolling_update | default(False) | bool diff --git a/roles/ceph-client/tasks/pre_requisite.yml b/roles/ceph-client/tasks/pre_requisite.yml new file mode 100644 index 0000000..80e3d91 --- /dev/null +++ b/roles/ceph-client/tasks/pre_requisite.yml @@ -0,0 +1,28 @@ +--- +- name: Copy ceph admin keyring + when: + - cephx | bool + - copy_admin_key | bool + block: + - name: Get keys from monitors + ceph_key_info: + name: client.admin + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _admin_key + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" + content: "{{ _admin_key.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-common/LICENSE b/roles/ceph-common/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-common/README.md b/roles/ceph-common/README.md new file mode 100644 index 0000000..cd55804 --- /dev/null +++ b/roles/ceph-common/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-common + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/ceph-common/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/ceph-common/files/cephstable.asc b/roles/ceph-common/files/cephstable.asc new file mode 100644 index 0000000..d2961c5 --- /dev/null +++ b/roles/ceph-common/files/cephstable.asc @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFX4hgkBEADLqn6O+UFp+ZuwccNldwvh5PzEwKUPlXKPLjQfXlQRig1flpCH +E0HJ5wgGlCtYd3Ol9f9+qU24kDNzfbs5bud58BeE7zFaZ4s0JMOMuVm7p8JhsvkU +C/Lo/7NFh25e4kgJpjvnwua7c2YrA44ggRb1QT19ueOZLK5wCQ1mR+0GdrcHRCLr +7Sdw1d7aLxMT+5nvqfzsmbDullsWOD6RnMdcqhOxZZvpay8OeuK+yb8FVQ4sOIzB +FiNi5cNOFFHg+8dZQoDrK3BpwNxYdGHsYIwU9u6DWWqXybBnB9jd2pve9PlzQUbO +eHEa4Z+jPqxY829f4ldaql7ig8e6BaInTfs2wPnHJ+606g2UH86QUmrVAjVzlLCm +nqoGymoAPGA4ObHu9X3kO8viMBId9FzooVqR8a9En7ZE0Dm9O7puzXR7A1f5sHoz +JdYHnr32I+B8iOixhDUtxIY4GA8biGATNaPd8XR2Ca1hPuZRVuIiGG9HDqUEtXhV +fY5qjTjaThIVKtYgEkWMT+Wet3DPPiWT3ftNOE907e6EWEBCHgsEuuZnAbku1GgD +LBH4/a/yo9bNvGZKRaTUM/1TXhM5XgVKjd07B4cChgKypAVHvef3HKfCG2U/DkyA +LjteHt/V807MtSlQyYaXUTGtDCrQPSlMK5TjmqUnDwy6Qdq8dtWN3DtBWQARAQAB +tCpDZXBoLmNvbSAocmVsZWFzZSBrZXkpIDxzZWN1cml0eUBjZXBoLmNvbT6JAjgE +EwECACIFAlX4hgkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOhKwsBG +DzmUXdIQAI8YPcZMBWdv489q8CzxlfRIRZ3Gv/G/8CH+EOExcmkVZ89mVHngCdAP +DOYCl8twWXC1lwJuLDBtkUOHXNuR5+Jcl5zFOUyldq1Hv8u03vjnGT7lLJkJoqpG +l9QD8nBqRvBU7EM+CU7kP8+09b+088pULil+8x46PwgXkvOQwfVKSOr740Q4J4nm +/nUOyTNtToYntmt2fAVWDTIuyPpAqA6jcqSOC7Xoz9cYxkVWnYMLBUySXmSS0uxl +3p+wK0lMG0my/gb+alke5PAQjcE5dtXYzCn+8Lj0uSfCk8Gy0ZOK2oiUjaCGYN6D +u72qDRFBnR3jaoFqi03bGBIMnglGuAPyBZiI7LJgzuT9xumjKTJW3kN4YJxMNYu1 +FzmIyFZpyvZ7930vB2UpCOiIaRdZiX4Z6ZN2frD3a/vBxBNqiNh/BO+Dex+PDfI4 +TqwF8zlcjt4XZ2teQ8nNMR/D8oiYTUW8hwR4laEmDy7ASxe0p5aijmUApWq5UTsF ++s/QbwugccU0iR5orksM5u9MZH4J/mFGKzOltfGXNLYI6D5Mtwrnyi0BsF5eY0u6 +vkdivtdqrq2DXY+ftuqLOQ7b+t1RctbcMHGPptlxFuN9ufP5TiTWSpfqDwmHCLsT +k2vFiMwcHdLpQ1IH8ORVRgPPsiBnBOJ/kIiXG2SxPUTjjEGOVgeA +=/Tod +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/ceph-common/files/cephstablerhcs.asc b/roles/ceph-common/files/cephstablerhcs.asc new file mode 100644 index 0000000..0009a3e --- /dev/null +++ b/roles/ceph-common/files/cephstablerhcs.asc @@ -0,0 +1,34 @@ +pub 4096R/FD431D51 2009-10-22 + Key fingerprint = 567E 347A D004 4ADE 55BA 8A5F 199E 2F91 FD43 1D51 +uid Red Hat, Inc. (release key 2) + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.5 (GNU/Linux) + +mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF +0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF +0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c +u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh +XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H +5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW +9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj +/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 +PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY +HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF +buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB +tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 +LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK +CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC +2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf +C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 +un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E +0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE +IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh +8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL +Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki +JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 +OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq +dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== +=zbHE +-----END PGP PUBLIC KEY BLOCK----- + diff --git a/roles/ceph-common/meta/main.yml b/roles/ceph-common/meta/main.yml new file mode 100644 index 0000000..f30dfb9 --- /dev/null +++ b/roles/ceph-common/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-common/tasks/configure_cluster_name.yml b/roles/ceph-common/tasks/configure_cluster_name.yml new file mode 100644 index 0000000..150b803 --- /dev/null +++ b/roles/ceph-common/tasks/configure_cluster_name.yml @@ -0,0 +1,52 @@ +--- +- name: Configure cluster name + ansible.builtin.lineinfile: + dest: /etc/sysconfig/ceph + insertafter: EOF + create: true + line: "CLUSTER={{ cluster }}" + regexp: "^CLUSTER=" + mode: "0644" + when: ansible_facts['os_family'] in ["RedHat", "Suse"] + +# NOTE(leseb): we are performing the following check +# to ensure any Jewel installation will not fail. +# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b +# fixed a package issue where the path was the wrong. +# This bug is not yet on all the distros package so we are working around it +# Impacted versions: +# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773 +# - Jewel from latest Canonical 16.04 distro +# - All previous versions from Canonical +# - Infernalis from ceph.com +- name: Debian based systems - configure cluster name + when: ansible_facts['os_family'] == "Debian" + block: + - name: Check /etc/default/ceph exist + ansible.builtin.stat: + path: /etc/default/ceph + register: etc_default_ceph + check_mode: false + + - name: Configure cluster name + when: etc_default_ceph.stat.exists + block: + - name: When /etc/default/ceph is not dir + ansible.builtin.lineinfile: + dest: /etc/default/ceph + insertafter: EOF + create: true + regexp: "^CLUSTER=" + line: "CLUSTER={{ cluster }}" + mode: "0644" + when: not etc_default_ceph.stat.isdir + + - name: When /etc/default/ceph is dir + ansible.builtin.lineinfile: + dest: /etc/default/ceph/ceph + insertafter: EOF + create: true + regexp: "^CLUSTER=" + line: "CLUSTER={{ cluster }}" + mode: "0644" + when: etc_default_ceph.stat.isdir diff --git a/roles/ceph-common/tasks/configure_memory_allocator.yml b/roles/ceph-common/tasks/configure_memory_allocator.yml new file mode 100644 index 0000000..4db07a0 --- /dev/null +++ b/roles/ceph-common/tasks/configure_memory_allocator.yml @@ -0,0 +1,36 @@ +--- +- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian + ansible.builtin.lineinfile: + dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}" + insertafter: EOF + create: true + regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" + line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" + mode: "0644" + when: + - ansible_facts['os_family'] == 'Debian' + - etc_default_ceph.stat.exists + notify: + - Restart ceph mons + - Restart ceph mgrs + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph rbdmirrors + +- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat + ansible.builtin.lineinfile: + dest: "/etc/sysconfig/ceph" + insertafter: EOF + create: true + regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" + line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" + mode: "0644" + when: ansible_facts['os_family'] == 'RedHat' + notify: + - Restart ceph mons + - Restart ceph mgrs + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph rbdmirrors diff --git a/roles/ceph-common/tasks/configure_repository.yml b/roles/ceph-common/tasks/configure_repository.yml new file mode 100644 index 0000000..3558376 --- /dev/null +++ b/roles/ceph-common/tasks/configure_repository.yml @@ -0,0 +1,32 @@ +--- +- name: Config repository for Red Hat based OS + when: ansible_facts['os_family'] == 'RedHat' + block: + - name: Include installs/configure_redhat_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_redhat_repository_installation.yml + when: ceph_origin == 'repository' + + - name: Include installs/configure_redhat_local_installation.yml + ansible.builtin.include_tasks: installs/configure_redhat_local_installation.yml + when: ceph_origin == 'local' + +- name: Config repository for Debian based OS + when: ansible_facts['os_family'] == 'Debian' + tags: package-install + block: + - name: Include installs/configure_debian_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_debian_repository_installation.yml + when: ceph_origin == 'repository' + + - name: Update apt cache if cache_valid_time has expired + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + register: result + until: result is succeeded + +- name: Include installs/configure_suse_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_suse_repository_installation.yml + when: + - ansible_facts['os_family'] == 'Suse' + - ceph_origin == 'repository' diff --git a/roles/ceph-common/tasks/create_rbd_client_dir.yml b/roles/ceph-common/tasks/create_rbd_client_dir.yml new file mode 100644 index 0000000..271452f --- /dev/null +++ b/roles/ceph-common/tasks/create_rbd_client_dir.yml @@ -0,0 +1,12 @@ +--- +- name: Create rbd client directory + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ rbd_client_directory_owner }}" + group: "{{ rbd_client_directory_group }}" + mode: "{{ rbd_client_directory_mode }}" + with_items: + - "{{ rbd_client_admin_socket_path }}" + - "{{ rbd_client_log_path }}" + when: rbd_client_directories | bool diff --git a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml new file mode 100644 index 0000000..caedf4d --- /dev/null +++ b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml @@ -0,0 +1,16 @@ +--- +- name: Include debian_community_repository.yml + ansible.builtin.include_tasks: debian_community_repository.yml + when: ceph_repository == 'community' + +- name: Include debian_dev_repository.yml + ansible.builtin.include_tasks: debian_dev_repository.yml + when: ceph_repository == 'dev' + +- name: Include debian_custom_repository.yml + ansible.builtin.include_tasks: debian_custom_repository.yml + when: ceph_repository == 'custom' + +- name: Include debian_uca_repository.yml + ansible.builtin.include_tasks: debian_uca_repository.yml + when: ceph_repository == 'uca' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml new file mode 100644 index 0000000..3f2ddd5 --- /dev/null +++ b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml @@ -0,0 +1,45 @@ +--- +- name: Make sure /tmp exists + ansible.builtin.file: + path: /tmp + state: directory + mode: "0755" + when: use_installer | bool + +- name: Use mktemp to create name for rundep + ansible.builtin.tempfile: + path: /tmp + prefix: rundep. + register: rundep_location + when: use_installer | bool + +- name: Copy rundep + ansible.builtin.copy: + src: "{{ ansible_dir }}/rundep" + dest: "{{ rundep_location.path }}" + mode: preserve + when: use_installer | bool + +- name: Install ceph dependencies + ansible.builtin.script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}" + when: use_installer | bool + +- name: Ensure rsync is installed + ansible.builtin.package: + name: rsync + state: present + register: result + until: result is succeeded + +- name: Synchronize ceph install + ansible.posix.synchronize: + src: "{{ ceph_installation_dir }}/" + dest: "/" + +- name: Create user group ceph + ansible.builtin.group: + name: 'ceph' + +- name: Create user ceph + ansible.builtin.user: + name: 'ceph' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml new file mode 100644 index 0000000..43d0dd6 --- /dev/null +++ b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml @@ -0,0 +1,34 @@ +--- +- name: Configure epel repository + ansible.builtin.package: + name: epel-release + register: result + until: result is succeeded + tags: with_pkg + when: + - ansible_facts['distribution'] == 'Rocky' + +- name: Install python3-packaging + ansible.builtin.yum: + name: python3-packaging + enablerepo: powertools + state: present + when: ansible_facts['distribution_major_version'] | int < 9 + +- name: Include redhat_community_repository.yml + ansible.builtin.include_tasks: redhat_community_repository.yml + when: ceph_repository == 'community' + +- name: Include redhat_dev_repository.yml + ansible.builtin.include_tasks: redhat_dev_repository.yml + when: ceph_repository == 'dev' + +- name: Include redhat_custom_repository.yml + ansible.builtin.include_tasks: redhat_custom_repository.yml + when: ceph_repository == 'custom' + +# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version +- name: Purge yum cache + ansible.builtin.command: yum clean all # noqa: [303] + changed_when: false + when: ansible_facts['pkg_mgr'] == 'yum' diff --git a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml new file mode 100644 index 0000000..32d9e51 --- /dev/null +++ b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml @@ -0,0 +1,4 @@ +--- +- name: Include suse_obs_repository.yml + ansible.builtin.include_tasks: suse_obs_repository.yml + when: ceph_repository == 'obs' diff --git a/roles/ceph-common/tasks/installs/debian_community_repository.yml b/roles/ceph-common/tasks/installs/debian_community_repository.yml new file mode 100644 index 0000000..c334521 --- /dev/null +++ b/roles/ceph-common/tasks/installs/debian_community_repository.yml @@ -0,0 +1,20 @@ +--- +- name: Install dependencies for apt modules + ansible.builtin.package: + name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] + update_cache: true + register: result + until: result is succeeded + +- name: Configure debian ceph community repository stable key + ansible.builtin.apt_key: + data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}" + state: present + register: result + until: result is succeeded + +- name: Configure debian ceph stable community repository + ansible.builtin.apt_repository: + repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" + state: present + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_custom_repository.yml b/roles/ceph-common/tasks/installs/debian_custom_repository.yml new file mode 100644 index 0000000..2d1fb07 --- /dev/null +++ b/roles/ceph-common/tasks/installs/debian_custom_repository.yml @@ -0,0 +1,14 @@ +--- +- name: Configure debian custom apt key + ansible.builtin.apt_key: + url: "{{ ceph_custom_key }}" + state: present + register: result + until: result is succeeded + when: ceph_custom_key is defined + +- name: Configure debian custom repository + ansible.builtin.apt_repository: + repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main" + state: present + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_dev_repository.yml b/roles/ceph-common/tasks/installs/debian_dev_repository.yml new file mode 100644 index 0000000..9533fcf --- /dev/null +++ b/roles/ceph-common/tasks/installs/debian_dev_repository.yml @@ -0,0 +1,12 @@ +--- +- name: Fetch ceph debian development repository + ansible.builtin.uri: + url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}" + return_content: true + register: ceph_dev_deb_repo + +- name: Configure ceph debian development repository + ansible.builtin.apt_repository: + repo: "{{ ceph_dev_deb_repo.content }}" + state: present + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_uca_repository.yml b/roles/ceph-common/tasks/installs/debian_uca_repository.yml new file mode 100644 index 0000000..be1a562 --- /dev/null +++ b/roles/ceph-common/tasks/installs/debian_uca_repository.yml @@ -0,0 +1,12 @@ +--- +- name: Add ubuntu cloud archive key package + ansible.builtin.package: + name: ubuntu-cloud-keyring + register: result + until: result is succeeded + +- name: Add ubuntu cloud archive repository + ansible.builtin.apt_repository: + repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main" + state: present + update_cache: true diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml new file mode 100644 index 0000000..edb4a74 --- /dev/null +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -0,0 +1,9 @@ +--- +- name: Install ceph for debian + ansible.builtin.apt: + name: "{{ debian_ceph_pkgs | unique }}" + update_cache: false + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" + register: result + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_on_clear.yml b/roles/ceph-common/tasks/installs/install_on_clear.yml new file mode 100644 index 0000000..84619f9 --- /dev/null +++ b/roles/ceph-common/tasks/installs/install_on_clear.yml @@ -0,0 +1,7 @@ +--- +- name: Install ceph bundle + community.general.swupd: + name: storage-cluster + state: present + register: result + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_on_debian.yml b/roles/ceph-common/tasks/installs/install_on_debian.yml new file mode 100644 index 0000000..7db001e --- /dev/null +++ b/roles/ceph-common/tasks/installs/install_on_debian.yml @@ -0,0 +1,13 @@ +- name: Install dependencies + ansible.builtin.apt: + name: "{{ debian_package_dependencies }}" + state: present + update_cache: true + cache_valid_time: 3600 + register: result + until: result is succeeded + +- name: Include install_debian_packages.yml + ansible.builtin.include_tasks: install_debian_packages.yml + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml new file mode 100644 index 0000000..08769de --- /dev/null +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -0,0 +1,23 @@ +--- +- name: Install redhat dependencies + ansible.builtin.package: + name: "{{ redhat_package_dependencies }}" + state: present + register: result + until: result is succeeded + when: ansible_facts['distribution'] == 'RedHat' + +- name: Install centos dependencies + ansible.builtin.yum: + name: "{{ centos_package_dependencies }}" + state: present + register: result + until: result is succeeded + when: ansible_facts['distribution'] == 'CentOS' + +- name: Install redhat ceph packages + ansible.builtin.package: + name: "{{ redhat_ceph_pkgs | unique }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_suse_packages.yml b/roles/ceph-common/tasks/installs/install_suse_packages.yml new file mode 100644 index 0000000..5adb39d --- /dev/null +++ b/roles/ceph-common/tasks/installs/install_suse_packages.yml @@ -0,0 +1,14 @@ +--- +- name: Install SUSE/openSUSE dependencies + ansible.builtin.package: + name: "{{ suse_package_dependencies }}" + state: present + register: result + until: result is succeeded + +- name: Install SUSE/openSUSE ceph packages + ansible.builtin.package: + name: "{{ suse_ceph_pkgs | unique }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/redhat_community_repository.yml b/roles/ceph-common/tasks/installs/redhat_community_repository.yml new file mode 100644 index 0000000..91adb28 --- /dev/null +++ b/roles/ceph-common/tasks/installs/redhat_community_repository.yml @@ -0,0 +1,41 @@ +--- +- name: Install yum plugin priorities + ansible.builtin.package: + name: yum-plugin-priorities + register: result + until: result is succeeded + tags: with_pkg + when: ansible_facts['distribution_major_version'] | int == 7 + +- name: Configure red hat ceph community repository stable key + ansible.builtin.rpm_key: + key: "{{ ceph_stable_key }}" + state: present + register: result + until: result is succeeded + +- name: Configure red hat ceph stable community repository + ansible.builtin.yum_repository: + name: ceph_stable + description: Ceph Stable $basearch repo + gpgcheck: true + state: present + gpgkey: "{{ ceph_stable_key }}" + baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch" + file: ceph_stable + priority: 2 + register: result + until: result is succeeded + +- name: Configure red hat ceph stable noarch community repository + ansible.builtin.yum_repository: + name: ceph_stable_noarch + description: Ceph Stable noarch repo + gpgcheck: true + state: present + gpgkey: "{{ ceph_stable_key }}" + baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch" + file: ceph_stable + priority: 2 + register: result + until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/redhat_custom_repository.yml b/roles/ceph-common/tasks/installs/redhat_custom_repository.yml new file mode 100644 index 0000000..c944d3a --- /dev/null +++ b/roles/ceph-common/tasks/installs/redhat_custom_repository.yml @@ -0,0 +1,16 @@ +--- +- name: Configure red hat custom rpm key + ansible.builtin.rpm_key: + key: "{{ ceph_custom_key }}" + state: present + register: result + until: result is succeeded + when: ceph_custom_key is defined + +- name: Configure red hat custom repository + ansible.builtin.get_url: + url: "{{ ceph_custom_repo }}" + dest: /etc/yum.repos.d + owner: root + group: root + mode: "0644" diff --git a/roles/ceph-common/tasks/installs/redhat_dev_repository.yml b/roles/ceph-common/tasks/installs/redhat_dev_repository.yml new file mode 100644 index 0000000..541552a --- /dev/null +++ b/roles/ceph-common/tasks/installs/redhat_dev_repository.yml @@ -0,0 +1,25 @@ +--- +- name: Fetch ceph red hat development repository + ansible.builtin.uri: + # Use the centos repo since we don't currently have a dedicated red hat repo + url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}" + return_content: true + register: ceph_dev_yum_repo + +- name: Configure ceph red hat development repository + ansible.builtin.copy: + content: "{{ ceph_dev_yum_repo.content }}" + dest: /etc/yum.repos.d/ceph-dev.repo + owner: root + group: root + mode: "0644" + backup: true + +- name: Remove ceph_stable repositories + ansible.builtin.yum_repository: + name: '{{ item }}' + file: ceph_stable + state: absent + with_items: + - ceph_stable + - ceph_stable_noarch diff --git a/roles/ceph-common/tasks/installs/suse_obs_repository.yml b/roles/ceph-common/tasks/installs/suse_obs_repository.yml new file mode 100644 index 0000000..327f15e --- /dev/null +++ b/roles/ceph-common/tasks/installs/suse_obs_repository.yml @@ -0,0 +1,8 @@ +--- +- name: Configure openSUSE ceph OBS repository + community.general.zypper_repository: + name: "OBS:filesystems:ceph:{{ ceph_release }}" + state: present + repo: "{{ ceph_obs_repo }}" + auto_import_keys: true + autorefresh: true diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml new file mode 100644 index 0000000..1fdb3bb --- /dev/null +++ b/roles/ceph-common/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: Include configure_repository.yml + ansible.builtin.include_tasks: configure_repository.yml + tags: package-configure + +- name: Include installs/install_redhat_packages.yml + ansible.builtin.include_tasks: installs/install_redhat_packages.yml + when: + - ansible_facts['os_family'] == 'RedHat' + - (ceph_origin == 'repository' or ceph_origin == 'distro') + tags: package-install + +- name: Include installs/install_suse_packages.yml + ansible.builtin.include_tasks: installs/install_suse_packages.yml + when: ansible_facts['os_family'] == 'Suse' + tags: package-install + +- name: Include installs/install_on_debian.yml + ansible.builtin.include_tasks: installs/install_on_debian.yml + tags: package-install + when: ansible_facts['os_family'] == 'Debian' + +- name: Include_tasks installs/install_on_clear.yml + ansible.builtin.include_tasks: installs/install_on_clear.yml + when: ansible_facts['os_family'] == 'ClearLinux' + tags: package-install + +- name: Get ceph version + ansible.builtin.command: ceph --version + changed_when: false + check_mode: false + register: ceph_version + +- name: Set_fact ceph_version + ansible.builtin.set_fact: + ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" + +- name: Set_fact ceph_release - override ceph_release with ceph_stable_release + ansible.builtin.set_fact: + ceph_release: "{{ ceph_stable_release }}" + when: + - ceph_origin == 'repository' + - ceph_repository not in ['dev', 'custom'] + tags: always + +- name: Include create_rbd_client_dir.yml + ansible.builtin.include_tasks: create_rbd_client_dir.yml + +- name: Include configure_cluster_name.yml + ansible.builtin.include_tasks: configure_cluster_name.yml + +- name: Include configure_memory_allocator.yml + ansible.builtin.include_tasks: configure_memory_allocator.yml + when: + - (ceph_tcmalloc_max_total_thread_cache | int) > 0 + - (ceph_origin == 'repository' or ceph_origin == 'distro') + +- name: Include selinux.yml + ansible.builtin.include_tasks: selinux.yml + when: + - ansible_facts['os_family'] == 'RedHat' + - inventory_hostname in groups.get(nfs_group_name, []) + or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) diff --git a/roles/ceph-common/tasks/selinux.yml b/roles/ceph-common/tasks/selinux.yml new file mode 100644 index 0000000..65459b5 --- /dev/null +++ b/roles/ceph-common/tasks/selinux.yml @@ -0,0 +1,22 @@ +--- +- name: If selinux is not disabled + when: ansible_facts['selinux']['status'] == 'enabled' + block: + - name: Install policycoreutils-python + ansible.builtin.package: + name: policycoreutils-python + state: present + register: result + until: result is succeeded + when: ansible_facts['distribution_major_version'] == '7' + + - name: Install python3-policycoreutils on RHEL 8 + ansible.builtin.package: + name: python3-policycoreutils + state: present + register: result + until: result is succeeded + when: + - inventory_hostname in groups.get(nfs_group_name, []) + or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) + - ansible_facts['distribution_major_version'] == '8' diff --git a/roles/ceph-common/vars/main.yml b/roles/ceph-common/vars/main.yml new file mode 100644 index 0000000..1d832f9 --- /dev/null +++ b/roles/ceph-common/vars/main.yml @@ -0,0 +1,32 @@ +--- +# ceph-common is always installed, if a package isn't to be installed we replace +# it with 'ceph-common' and run the install with the | unique filter. +debian_ceph_pkgs: + - "ceph" + - "ceph-common" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" + - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}" + - "{{ (rgw_group_name in group_names) | ternary('radosgw', 'ceph-common') }}" + - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" + +redhat_ceph_pkgs: + - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}" + - "ceph-common" + - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" + - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}" + - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" + - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}" + - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" + +suse_ceph_pkgs: + - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}" + - "ceph-common" + - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" + - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}" + - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" + - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}" + - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" diff --git a/roles/ceph-config/LICENSE b/roles/ceph-config/LICENSE new file mode 100644 index 0000000..b0d1c9f --- /dev/null +++ b/roles/ceph-config/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Guillaume Abrioux] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-config/README.md b/roles/ceph-config/README.md new file mode 100644 index 0000000..319da5b --- /dev/null +++ b/roles/ceph-config/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-config + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-config/meta/main.yml b/roles/ceph-config/meta/main.yml new file mode 100644 index 0000000..8462226 --- /dev/null +++ b/roles/ceph-config/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Handles ceph-ansible initial configuration + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-config/tasks/create_ceph_initial_dirs.yml b/roles/ceph-config/tasks/create_ceph_initial_dirs.yml new file mode 100644 index 0000000..64936e1 --- /dev/null +++ b/roles/ceph-config/tasks/create_ceph_initial_dirs.yml @@ -0,0 +1,25 @@ +--- +- name: Create ceph initial directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0755" + loop: + - /etc/ceph + - /var/lib/ceph/ + - /var/lib/ceph/mon + - /var/lib/ceph/osd + - /var/lib/ceph/mds + - /var/lib/ceph/tmp + - /var/lib/ceph/crash + - /var/lib/ceph/radosgw + - /var/lib/ceph/bootstrap-rgw + - /var/lib/ceph/bootstrap-mgr + - /var/lib/ceph/bootstrap-mds + - /var/lib/ceph/bootstrap-osd + - /var/lib/ceph/bootstrap-rbd + - /var/lib/ceph/bootstrap-rbd-mirror + - /var/run/ceph + - /var/log/ceph diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml new file mode 100644 index 0000000..3928bf2 --- /dev/null +++ b/roles/ceph-config/tasks/main.yml @@ -0,0 +1,194 @@ +--- +- name: Include create_ceph_initial_dirs.yml + ansible.builtin.include_tasks: create_ceph_initial_dirs.yml + when: containerized_deployment | bool + +- name: Include_tasks rgw_systemd_environment_file.yml + ansible.builtin.include_tasks: rgw_systemd_environment_file.yml + when: inventory_hostname in groups.get(rgw_group_name, []) + +- name: Config file operations related to OSDs + when: + - inventory_hostname in groups.get(osd_group_name, []) + # the rolling_update.yml playbook sets num_osds to the number of currently + # running osds + - not rolling_update | bool + block: + - name: Reset num_osds + ansible.builtin.set_fact: + num_osds: 0 + + - name: Count number of osds for lvm scenario + ansible.builtin.set_fact: + num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}" + when: lvm_volumes | default([]) | length > 0 + + - name: Ceph-volume pre-requisites tasks + when: + - devices | default([]) | length > 0 + block: + - name: Look up for ceph-volume rejected devices + ceph_volume: + cluster: "{{ cluster }}" + action: "inventory" + register: rejected_devices + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + PYTHONIOENCODING: utf-8 + + - name: Set_fact rejected_devices + ansible.builtin.set_fact: + _rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}" + with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}" + when: "'Used by ceph-disk' in item.rejected_reasons" + + - name: Set_fact _devices + ansible.builtin.set_fact: + _devices: "{{ devices | difference(_rejected_devices | default([])) }}" + + - name: Run 'ceph-volume lvm batch --report' to see how many osds are to be created + ceph_volume: + cluster: "{{ cluster }}" + objectstore: "{{ osd_objectstore }}" + batch_devices: "{{ _devices }}" + osds_per_device: "{{ osds_per_device | default(1) | int }}" + block_db_size: "{{ block_db_size }}" + report: true + action: "batch" + register: lvm_batch_report + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + PYTHONIOENCODING: utf-8 + when: _devices | default([]) | length > 0 + + - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report) + ansible.builtin.set_fact: + num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" + when: + - (lvm_batch_report.stdout | default('{}') | from_json) is mapping + - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool + + - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report) + ansible.builtin.set_fact: + num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" + when: + - (lvm_batch_report.stdout | default('{}') | from_json) is not mapping + - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool + + - name: Run 'ceph-volume lvm list' to see how many osds have already been created + ceph_volume: + action: "list" + register: lvm_list + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + PYTHONIOENCODING: utf-8 + changed_when: false + + - name: Set_fact num_osds (add existing osds) + ansible.builtin.set_fact: + num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}" + +- name: Set osd related config facts + when: inventory_hostname in groups.get(osd_group_name, []) + block: + - name: Set_fact _osd_memory_target + ansible.builtin.set_fact: + _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}" + when: + - _osd_memory_target is undefined + - num_osds | default(0) | int > 0 + - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float) + - ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') == '' + + - name: Set osd_memory_target to cluster host config + ceph_config: + action: set + who: "osd.*/{{ ansible_facts['hostname'] }}:host" + option: "osd_memory_target" + value: "{{ _osd_memory_target }}" + when: + - ceph_config_osd_memory_target | default(true) + - _osd_memory_target is defined + - running_mon is defined + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ running_mon }}" + +- name: Set rgw configs + when: inventory_hostname in groups.get(rgw_group_name, []) + block: + - name: Render rgw configs + vars: + _rgw_binding_socket: "{{ item.radosgw_address | default(_radosgw_address) | string + ':' + item.radosgw_frontend_port | default(radosgw_frontend_port) | string }}" + _rgw_beast_endpoint: "{{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}" + _rgw_beast_ssl_option: "{{ ' ssl_certificate=' + radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}" + ansible.builtin.set_fact: + _ceph_ansible_rgw_conf: >- + {{ _ceph_ansible_rgw_conf | default({}) | combine({ + 'client.rgw.' + rgw_zone + '.' + ansible_facts['hostname'] + '.' + item.instance_name: { + 'log_file': '/var/log/ceph/' + cluster + '-rgw-' + rgw_zone + '-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log', + 'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option, + } + }, recursive=true) }} + loop: "{{ rgw_instances }}" + + - name: Set config to cluster + ceph_config: + action: set + who: "{{ item.0.key }}" + option: "{{ item.1.key }}" + value: "{{ item.1.value }}" + loop: "{{ _ceph_ansible_rgw_conf | dict2dict }}" + when: + - rgw_conf_to_cluster | default(true) | bool + - running_mon is defined + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ running_mon }}" + + - name: Set rgw configs to file + ansible.builtin.set_fact: + ceph_conf_overrides: "{{ ceph_conf_overrides | default({}) | combine(_ceph_ansible_rgw_conf, recursive=true) }}" + when: not rgw_conf_to_cluster | default(true) | bool + +- name: Create ceph conf directory + ansible.builtin.file: + path: "/etc/ceph" + state: directory + owner: "ceph" + group: "ceph" + mode: "{{ ceph_directories_mode }}" + when: not containerized_deployment | bool + +- name: Import_role ceph-facts + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_radosgw_address.yml + when: + - set_radosgw_address | default(true) + - inventory_hostname in groups.get(rgw_group_name, []) + +- name: Generate Ceph file + openstack.config_template.config_template: + src: "ceph.conf.j2" + dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0644" + config_overrides: "{{ ceph_conf_overrides }}" + config_type: ini + notify: + - Restart ceph mons + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph mgrs + - Restart ceph rbdmirrors diff --git a/roles/ceph-config/tasks/rgw_systemd_environment_file.yml b/roles/ceph-config/tasks/rgw_systemd_environment_file.yml new file mode 100644 index 0000000..df52e6a --- /dev/null +++ b/roles/ceph-config/tasks/rgw_systemd_environment_file.yml @@ -0,0 +1,22 @@ +--- +- name: Create rados gateway instance directories + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode | default('0755') }}" + with_items: "{{ rgw_instances }}" + +- name: Generate environment file + ansible.builtin.copy: + dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile" + owner: "root" + group: "root" + mode: "0644" + content: | + INST_NAME={{ item.instance_name }} + with_items: "{{ rgw_instances }}" + when: + - containerized_deployment | bool + - rgw_instances is defined diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 new file mode 100644 index 0000000..c68908b --- /dev/null +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -0,0 +1,31 @@ +#jinja2: trim_blocks: "true", lstrip_blocks: "true" +# {{ ansible_managed }} + +[global] +{% if not cephx | bool %} +auth cluster required = none +auth service required = none +auth client required = none +{% endif %} +{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #} + +{% set nb_mon = groups.get(mon_group_name, []) | length | int %} + +fsid = {{ fsid }} +mon host = {% if nb_mon > 0 %} +{% for name, addr in _monitor_addresses.items() -%} +{% if mon_host_v1.enabled | bool %} +{% set _v1 = ',v1:' + addr + mon_host_v1.suffix %} +{% endif %} +[{{ "v2:" + addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] +{%- if not loop.last -%},{%- endif %} +{%- endfor %} +{% elif nb_mon == 0 %} +{{ external_cluster_mon_ips }} +{% endif %} + +{% if inventory_hostname in groups.get(client_group_name, []) %} +[client.libvirt] +admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor +log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor +{% endif %} diff --git a/roles/ceph-container-common/README.md b/roles/ceph-container-common/README.md new file mode 100644 index 0000000..0fff59a --- /dev/null +++ b/roles/ceph-container-common/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-container-common + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-container-common/defaults/main.yml b/roles/ceph-container-common/defaults/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/ceph-container-common/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/ceph-container-common/files/ceph.target b/roles/ceph-container-common/files/ceph.target new file mode 100644 index 0000000..9c09851 --- /dev/null +++ b/roles/ceph-container-common/files/ceph.target @@ -0,0 +1,5 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph*@.service instances at once + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/roles/ceph-container-common/meta/main.yml b/roles/ceph-container-common/meta/main.yml new file mode 100644 index 0000000..f30dfb9 --- /dev/null +++ b/roles/ceph-container-common/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-container-common/tasks/fetch_image.yml b/roles/ceph-container-common/tasks/fetch_image.yml new file mode 100644 index 0000000..f222e4b --- /dev/null +++ b/roles/ceph-container-common/tasks/fetch_image.yml @@ -0,0 +1,81 @@ +--- +- name: Pulling Ceph container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + changed_when: false + register: docker_image + until: docker_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image | bool) + environment: + HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" + HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" + NO_PROXY: "{{ ceph_docker_no_proxy }}" + +- name: Pulling alertmanager/prometheus/grafana container images + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}" + changed_when: false + register: monitoring_images + until: monitoring_images.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + loop: + - "{{ alertmanager_container_image }}" + - "{{ prometheus_container_image }}" + - "{{ grafana_container_image }}" + when: + - dashboard_enabled | bool + - inventory_hostname in groups.get(monitoring_group_name, []) + environment: + HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" + HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" + NO_PROXY: "{{ ceph_docker_no_proxy }}" + +- name: Pulling node-exporter container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}" + changed_when: false + register: node_exporter_image + until: node_exporter_image.rc == 0 + retries: "{{ docker_pull_retry }}" + delay: 10 + when: + - dashboard_enabled | bool + - inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) or + inventory_hostname in groups.get(nfs_group_name, []) or + inventory_hostname in groups.get(monitoring_group_name, []) + environment: + HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" + HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" + NO_PROXY: "{{ ceph_docker_no_proxy }}" + +- name: Export local ceph dev image + ansible.builtin.command: > + {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}" + delegate_to: localhost + changed_when: false + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) + run_once: true + +- name: Copy ceph dev image file + ansible.builtin.copy: + src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + mode: "0644" + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) + +- name: Load ceph dev image + ansible.builtin.command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + changed_when: false + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) + +- name: Remove tmp ceph dev image file + ansible.builtin.file: + name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + state: absent + when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) diff --git a/roles/ceph-container-common/tasks/main.yml b/roles/ceph-container-common/tasks/main.yml new file mode 100644 index 0000000..6a36e8b --- /dev/null +++ b/roles/ceph-container-common/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Generate systemd ceph target file + ansible.builtin.copy: + src: ceph.target + dest: /etc/systemd/system/ceph.target + mode: "0644" + +- name: Enable ceph.target + ansible.builtin.service: + name: ceph.target + enabled: true + daemon_reload: true + +- name: Include prerequisites.yml + ansible.builtin.include_tasks: prerequisites.yml + +- name: Include registry.yml + ansible.builtin.include_tasks: registry.yml + when: ceph_docker_registry_auth | bool + +- name: Include fetch_image.yml + ansible.builtin.include_tasks: fetch_image.yml + tags: fetch_container_image + +- name: Get ceph version + ansible.builtin.command: > + {{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph + {{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} + --version + changed_when: false + check_mode: false + register: ceph_version + +- name: Set_fact ceph_version ceph_version.stdout.split + ansible.builtin.set_fact: + ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" + +- name: Include release.yml + ansible.builtin.include_tasks: release.yml diff --git a/roles/ceph-container-common/tasks/prerequisites.yml b/roles/ceph-container-common/tasks/prerequisites.yml new file mode 100644 index 0000000..4009a32 --- /dev/null +++ b/roles/ceph-container-common/tasks/prerequisites.yml @@ -0,0 +1,52 @@ +--- +- name: Lvmetad tasks related + when: + - inventory_hostname in groups.get(osd_group_name, []) + - lvmetad_disabled | default(False) | bool + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] | int == 7 + block: + - name: Stop lvmetad + ansible.builtin.service: + name: lvm2-lvmetad + state: stopped + + - name: Disable and mask lvmetad service + ansible.builtin.systemd: + name: lvm2-lvmetad + enabled: false + masked: true + +- name: Remove ceph udev rules + ansible.builtin.file: + path: "{{ item }}" + state: absent + with_items: + - /usr/lib/udev/rules.d/95-ceph-osd.rules + - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules + +- name: Ensure tmpfiles.d is present + ansible.builtin.lineinfile: + path: /etc/tmpfiles.d/ceph-common.conf + line: "d /run/ceph 0755 167 167 -" + owner: root + group: root + mode: "0644" + state: present + create: true + +- name: Restore certificates selinux context + when: + - ansible_facts['os_family'] == 'RedHat' + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + ansible.builtin.command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted + changed_when: false + +- name: Install python3 on osd nodes + ansible.builtin.package: + name: python3 + state: present + when: + - inventory_hostname in groups.get(osd_group_name, []) + - ansible_facts['os_family'] == 'RedHat' diff --git a/roles/ceph-container-common/tasks/registry.yml b/roles/ceph-container-common/tasks/registry.yml new file mode 100644 index 0000000..ef9f2ec --- /dev/null +++ b/roles/ceph-container-common/tasks/registry.yml @@ -0,0 +1,11 @@ +--- +- name: Container registry authentication + ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}' + args: + stdin: '{{ ceph_docker_registry_password }}' + stdin_add_newline: false + changed_when: false + environment: + HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" + HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" + NO_PROXY: "{{ ceph_docker_no_proxy }}" diff --git a/roles/ceph-container-common/tasks/release.yml b/roles/ceph-container-common/tasks/release.yml new file mode 100644 index 0000000..0db7fe4 --- /dev/null +++ b/roles/ceph-container-common/tasks/release.yml @@ -0,0 +1,50 @@ +--- +- name: Set_fact ceph_release jewel + ansible.builtin.set_fact: + ceph_release: jewel + when: ceph_version.split('.')[0] is version('10', '==') + +- name: Set_fact ceph_release kraken + ansible.builtin.set_fact: + ceph_release: kraken + when: ceph_version.split('.')[0] is version('11', '==') + +- name: Set_fact ceph_release luminous + ansible.builtin.set_fact: + ceph_release: luminous + when: ceph_version.split('.')[0] is version('12', '==') + +- name: Set_fact ceph_release mimic + ansible.builtin.set_fact: + ceph_release: mimic + when: ceph_version.split('.')[0] is version('13', '==') + +- name: Set_fact ceph_release nautilus + ansible.builtin.set_fact: + ceph_release: nautilus + when: ceph_version.split('.')[0] is version('14', '==') + +- name: Set_fact ceph_release octopus + ansible.builtin.set_fact: + ceph_release: octopus + when: ceph_version.split('.')[0] is version('15', '==') + +- name: Set_fact ceph_release pacific + ansible.builtin.set_fact: + ceph_release: pacific + when: ceph_version.split('.')[0] is version('16', '==') + +- name: Set_fact ceph_release quincy + ansible.builtin.set_fact: + ceph_release: quincy + when: ceph_version.split('.')[0] is version('17', '==') + +- name: Set_fact ceph_release reef + ansible.builtin.set_fact: + ceph_release: reef + when: ceph_version.split('.')[0] is version('18', '==') + +- name: Set_fact ceph_release squid + ansible.builtin.set_fact: + ceph_release: squid + when: ceph_version.split('.')[0] is version('19', '==') diff --git a/roles/ceph-container-engine/README.md b/roles/ceph-container-engine/README.md new file mode 100644 index 0000000..6d00c02 --- /dev/null +++ b/roles/ceph-container-engine/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-container-engine + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-container-engine/meta/main.yml b/roles/ceph-container-engine/meta/main.yml new file mode 100644 index 0000000..6de5564 --- /dev/null +++ b/roles/ceph-container-engine/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Handles container installation prerequisites + license: Apache + min_ansible_version: '2.7' + platforms: + - name: Ubuntu + versions: + - xenial + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-container-engine/tasks/main.yml b/roles/ceph-container-engine/tasks/main.yml new file mode 100644 index 0000000..70e9a64 --- /dev/null +++ b/roles/ceph-container-engine/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: Include pre_requisites/prerequisites.yml + ansible.builtin.include_tasks: pre_requisites/prerequisites.yml + when: not is_atomic | bool diff --git a/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml b/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml new file mode 100644 index 0000000..dfeb2a0 --- /dev/null +++ b/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml @@ -0,0 +1,32 @@ +--- +- name: Uninstall old docker versions + ansible.builtin.package: + name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc'] + state: absent + when: container_package_name == 'docker-ce' + +- name: Allow apt to use a repository over https (debian) + ansible.builtin.package: + name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] + update_cache: true + register: result + until: result is succeeded + +- name: Add docker's gpg key + ansible.builtin.apt_key: + url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg" + register: result + until: result is succeeded + when: container_package_name == 'docker-ce' + +- name: Add docker repository + ansible.builtin.apt_repository: + repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable" + when: container_package_name == 'docker-ce' + +- name: Add podman ppa repository + ansible.builtin.apt_repository: + repo: "ppa:projectatomic/ppa" + when: + - container_package_name == 'podman' + - ansible_facts['distribution'] == 'Ubuntu' diff --git a/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml b/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml new file mode 100644 index 0000000..2ac0512 --- /dev/null +++ b/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml @@ -0,0 +1,77 @@ +--- +- name: Include specific variables + ansible.builtin.include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml" + - "{{ ansible_facts['os_family'] }}.yml" + when: container_package_name is undefined and container_service_name is undefined + +- name: Debian based systems tasks + ansible.builtin.include_tasks: debian_prerequisites.yml + when: + - ansible_facts['os_family'] == 'Debian' + tags: with_pkg + +- name: Install container packages + ansible.builtin.package: + name: '{{ container_package_name }}' + update_cache: true + register: result + until: result is succeeded + tags: with_pkg + +- name: Install lvm2 package + ansible.builtin.package: + name: lvm2 + register: result + until: result is succeeded + tags: with_pkg + when: inventory_hostname in groups.get(osd_group_name, []) + +- name: Extra configuration for docker + when: container_service_name == 'docker' + block: + - name: Create the systemd docker override directory + ansible.builtin.file: + path: /etc/systemd/system/docker.service.d + state: directory + mode: "0755" + when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined + + - name: Create the systemd docker override file + ansible.builtin.template: + src: docker-proxy.conf.j2 + dest: /etc/systemd/system/docker.service.d/proxy.conf + mode: "0600" + owner: root + group: root + register: proxy_created + when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined + + - name: Remove docker proxy configuration + ansible.builtin.file: + path: /etc/systemd/system/docker.service.d/proxy.conf + state: absent + register: proxy_removed + when: + - ceph_docker_http_proxy is not defined + - ceph_docker_https_proxy is not defined + + # using xxx.changed here instead of an ansible handler because we need to + # have an immediate effect and not wait the end of the play. + # using flush_handlers via the meta action plugin isn't enough too because + # it flushes all handlers and not only the one notified in this role. + - name: Restart docker + ansible.builtin.systemd: + name: "{{ container_service_name }}" + state: restarted + daemon_reload: true + when: proxy_created.changed | bool or proxy_removed.changed | bool + + - name: Start container service + ansible.builtin.service: + name: '{{ container_service_name }}' + state: started + enabled: true + tags: + with_pkg diff --git a/roles/ceph-container-engine/templates/docker-proxy.conf.j2 b/roles/ceph-container-engine/templates/docker-proxy.conf.j2 new file mode 100644 index 0000000..22a1cd8 --- /dev/null +++ b/roles/ceph-container-engine/templates/docker-proxy.conf.j2 @@ -0,0 +1,8 @@ +[Service] +{% if ceph_docker_http_proxy is defined %} +Environment="HTTP_PROXY={{ ceph_docker_http_proxy }}" +{% endif %} +{% if ceph_docker_https_proxy is defined %} +Environment="HTTPS_PROXY={{ ceph_docker_https_proxy }}" +{% endif %} +Environment="NO_PROXY={{ ceph_docker_no_proxy }}" diff --git a/roles/ceph-container-engine/vars/CentOS-8.yml b/roles/ceph-container-engine/vars/CentOS-8.yml new file mode 120000 index 0000000..d49e1cd --- /dev/null +++ b/roles/ceph-container-engine/vars/CentOS-8.yml @@ -0,0 +1 @@ +RedHat-8.yml \ No newline at end of file diff --git a/roles/ceph-container-engine/vars/CentOS-9.yml b/roles/ceph-container-engine/vars/CentOS-9.yml new file mode 100644 index 0000000..a46ed44 --- /dev/null +++ b/roles/ceph-container-engine/vars/CentOS-9.yml @@ -0,0 +1,3 @@ +--- +container_package_name: podman +container_service_name: podman diff --git a/roles/ceph-container-engine/vars/Debian.yml b/roles/ceph-container-engine/vars/Debian.yml new file mode 100644 index 0000000..9e2eaf6 --- /dev/null +++ b/roles/ceph-container-engine/vars/Debian.yml @@ -0,0 +1,3 @@ +--- +container_package_name: docker-ce +container_service_name: docker diff --git a/roles/ceph-container-engine/vars/RedHat-8.yml b/roles/ceph-container-engine/vars/RedHat-8.yml new file mode 100644 index 0000000..a46ed44 --- /dev/null +++ b/roles/ceph-container-engine/vars/RedHat-8.yml @@ -0,0 +1,3 @@ +--- +container_package_name: podman +container_service_name: podman diff --git a/roles/ceph-container-engine/vars/RedHat.yml b/roles/ceph-container-engine/vars/RedHat.yml new file mode 100644 index 0000000..8fca22c --- /dev/null +++ b/roles/ceph-container-engine/vars/RedHat.yml @@ -0,0 +1,3 @@ +--- +container_package_name: docker +container_service_name: docker diff --git a/roles/ceph-container-engine/vars/Ubuntu.yml b/roles/ceph-container-engine/vars/Ubuntu.yml new file mode 100644 index 0000000..aa5087f --- /dev/null +++ b/roles/ceph-container-engine/vars/Ubuntu.yml @@ -0,0 +1,3 @@ +--- +container_package_name: docker.io +container_service_name: docker diff --git a/roles/ceph-crash/meta/main.yml b/roles/ceph-crash/meta/main.yml new file mode 100644 index 0000000..63bc64d --- /dev/null +++ b/roles/ceph-crash/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Deploy ceph-crash + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-crash/tasks/main.yml b/roles/ceph-crash/tasks/main.yml new file mode 100644 index 0000000..77b5e90 --- /dev/null +++ b/roles/ceph-crash/tasks/main.yml @@ -0,0 +1,67 @@ +--- +- name: Create and copy client.crash keyring + when: cephx | bool + block: + - name: Create client.crash keyring + ceph_key: + name: "client.crash" + caps: + mon: 'allow profile crash' + mgr: 'allow profile crash' + cluster: "{{ cluster }}" + dest: "{{ ceph_conf_key_directory }}" + import_key: true + mode: "{{ ceph_keyring_permissions }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Get keys from monitors + ceph_key_info: + name: client.crash + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _crash_keys + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring" + content: "{{ _crash_keys.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Start ceph-crash daemon + when: containerized_deployment | bool + block: + - name: Create /var/lib/ceph/crash/posted + ansible.builtin.file: + path: /var/lib/ceph/crash/posted + state: directory + mode: '0755' + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + + - name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Start the ceph-crash service + ansible.builtin.systemd: + name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + state: started + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-crash/tasks/systemd.yml b/roles/ceph-crash/tasks/systemd.yml new file mode 100644 index 0000000..a8c07b1 --- /dev/null +++ b/roles/ceph-crash/tasks/systemd.yml @@ -0,0 +1,9 @@ +--- +- name: Generate systemd unit file for ceph-crash container + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-crash.service.j2" + dest: /etc/systemd/system/ceph-crash@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph crash diff --git a/roles/ceph-crash/templates/ceph-crash.service.j2 b/roles/ceph-crash/templates/ceph-crash.service.j2 new file mode 100644 index 0000000..bba5fe7 --- /dev/null +++ b/roles/ceph-crash/templates/ceph-crash.service.j2 @@ -0,0 +1,52 @@ +[Unit] +Description=Ceph crash dump collector +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-crash-%i +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-crash-%i +ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-crash-%i \ +{% if container_binary == 'podman' %} +-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +--security-opt label=disable \ +--net=host \ +{% if cluster != 'ceph' %} +-e CEPH_ARGS="--cluster {{ cluster }}" \ +{% endif %} +{% for v in ceph_common_container_params['volumes'] %} + -v {{ v }} \ +{% endfor %} +{% for k, v in ceph_common_container_params['envs'].items() %} + -e {{ k }}={{ v }} \ +{% endfor %} +--entrypoint=/usr/bin/ceph-crash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop ceph-crash-%i +{% endif %} +StartLimitInterval=10min +StartLimitBurst=30 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=10 + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-dashboard/meta/main.yml b/roles/ceph-dashboard/meta/main.yml new file mode 100644 index 0000000..8e99090 --- /dev/null +++ b/roles/ceph-dashboard/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Boris Ranto + description: Configures Ceph Dashboard + license: Apache + min_ansible_version: '2.4' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-dashboard/tasks/configure_dashboard.yml b/roles/ceph-dashboard/tasks/configure_dashboard.yml new file mode 100644 index 0000000..bc96e17 --- /dev/null +++ b/roles/ceph-dashboard/tasks/configure_dashboard.yml @@ -0,0 +1,331 @@ +--- +- name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + delegate_to: "{{ groups[mon_group_name][0] }}" + delegate_facts: true + +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" + when: containerized_deployment | bool + +- name: Set_fact container_run_cmd + ansible.builtin.set_fact: + ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" + +- name: Get current mgr backend - ipv4 + ansible.builtin.set_fact: + dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}" + when: ip_version == 'ipv4' + loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" + delegate_to: "{{ item }}" + delegate_facts: true + +- name: Get current mgr backend - ipv6 + ansible.builtin.set_fact: + dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}" + when: ip_version == 'ipv6' + loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" + delegate_to: "{{ item }}" + delegate_facts: true + +- name: Include ceph-facts role + ansible.builtin.include_role: + name: ceph-facts + tasks_from: set_radosgw_address.yml + loop: "{{ groups.get(rgw_group_name, []) }}" + run_once: true + loop_control: + loop_var: ceph_dashboard_call_item + when: inventory_hostname in groups.get(rgw_group_name, []) + +- name: Disable SSL for dashboard + when: dashboard_protocol == "http" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + block: + - name: Get SSL status for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl" + changed_when: false + register: current_ssl_for_dashboard + + - name: Disable SSL for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false" + changed_when: false + when: current_ssl_for_dashboard.stdout == "true" + +- name: With SSL for dashboard + when: dashboard_protocol == "https" + block: + - name: Enable SSL for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + + - name: Copy dashboard SSL certificate file + ansible.builtin.copy: + src: "{{ dashboard_crt }}" + dest: "/etc/ceph/ceph-dashboard.crt" + owner: root + group: root + mode: "0440" + remote_src: "{{ dashboard_tls_external | bool }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: dashboard_crt | length > 0 + + - name: Copy dashboard SSL certificate key + ansible.builtin.copy: + src: "{{ dashboard_key }}" + dest: "/etc/ceph/ceph-dashboard.key" + owner: root + group: root + mode: "0440" + remote_src: "{{ dashboard_tls_external | bool }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: dashboard_key | length > 0 + + - name: Generate and copy self-signed certificate + when: dashboard_key | length == 0 or dashboard_crt | length == 0 + run_once: true + block: + - name: Set_fact subj_alt_names + ansible.builtin.set_fact: + subj_alt_names: > + {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}{%- endfor -%} + + - name: Create tempfile for openssl certificate and key generation + ansible.builtin.tempfile: + state: file + register: openssl_config_file + + - name: Copy the openssl configuration file + ansible.builtin.copy: + src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}" + dest: '{{ openssl_config_file.path }}' + remote_src: true + mode: "0644" + + - name: Add subjectAltName to the openssl configuration + community.general.ini_file: + path: '{{ openssl_config_file.path }}' + section: v3_ca + option: subjectAltName + value: '{{ subj_alt_names | trim }}' + mode: "0644" + + - name: Generate a Self Signed OpenSSL certificate for dashboard + ansible.builtin.shell: | + test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \ + openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca + changed_when: false + + - name: Remove the openssl tempfile + ansible.builtin.file: + path: '{{ openssl_config_file.path }}' + state: absent + + - name: Slurp self-signed generated certificate for dashboard + ansible.builtin.slurp: + src: "/etc/ceph/{{ item }}" + run_once: true + with_items: + - 'ceph-dashboard.key' + - 'ceph-dashboard.crt' + register: slurp_self_signed_crt + + - name: Copy self-signed generated certificate on mons + ansible.builtin.copy: + dest: "{{ item.0.source }}" + content: "{{ item.0.content | b64decode }}" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "{{ '0600' if item.0.source.split('.')[-1] == 'key' else '0664' }}" + delegate_to: "{{ item.1 }}" + with_nested: + - "{{ slurp_self_signed_crt.results }}" + - "{{ groups[mon_group_name] }}" + + - name: Import dashboard certificate file + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + + - name: Import dashboard certificate key + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + +- name: Set the dashboard port + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + +- name: Set the dashboard SSL port + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0 + +- name: Config the current dashboard backend + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + run_once: true + with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}' + +- name: Disable mgr dashboard module (restart) + ceph_mgr_module: + name: dashboard + cluster: "{{ cluster }}" + state: disable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + +- name: Enable mgr dashboard module (restart) + ceph_mgr_module: + name: dashboard + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + +- name: Create dashboard admin user + ceph_dashboard_user: + name: "{{ dashboard_admin_user }}" + cluster: "{{ cluster }}" + password: "{{ dashboard_admin_password }}" + roles: ["{{ 'read-only' if dashboard_admin_user_ro | bool else 'administrator' }}"] + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Disable unused dashboard features + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + with_items: "{{ dashboard_disabled_features }}" + +- name: Set grafana api user + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + +- name: Set grafana api password + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -" + args: + stdin: "{{ grafana_admin_password }}" + stdin_add_newline: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + +- name: Disable ssl verification for grafana + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + when: + - dashboard_protocol == "https" + - dashboard_grafana_api_no_ssl_verify | bool + +- name: Set alertmanager host + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + +- name: Set prometheus host + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + +- name: Include grafana layout tasks + ansible.builtin.include_tasks: configure_grafana_layouts.yml + with_items: '{{ grafana_server_addrs }}' + vars: + grafana_server_addr: '{{ item }}' + +- name: Config monitoring api url vip + run_once: true + block: + - name: Config grafana api url vip + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0 + + - name: Config alertmanager api url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0 + + - name: Config prometheus api url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0 + +- name: Dashboard object gateway management frontend + when: groups.get(rgw_group_name, []) | length > 0 + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + block: + - name: Set the rgw credentials + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials" + changed_when: false + register: result + until: result is succeeded + retries: 5 + + - name: Set the rgw admin resource + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}" + changed_when: false + when: dashboard_rgw_api_admin_resource | length > 0 + + - name: Disable ssl verification for rgw + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False" + changed_when: false + when: + - dashboard_rgw_api_no_ssl_verify | bool + - radosgw_frontend_ssl_certificate | length > 0 + +- name: Disable mgr dashboard module (restart) + ceph_mgr_module: + name: dashboard + cluster: "{{ cluster }}" + state: disable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + +- name: Enable mgr dashboard module (restart) + ceph_mgr_module: + name: dashboard + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true diff --git a/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml b/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml new file mode 100644 index 0000000..2b386ac --- /dev/null +++ b/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml @@ -0,0 +1,13 @@ +--- +- name: Set grafana url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + +- name: Inject grafana dashboard layouts + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + changed_when: false + when: containerized_deployment | bool diff --git a/roles/ceph-dashboard/tasks/main.yml b/roles/ceph-dashboard/tasks/main.yml new file mode 100644 index 0000000..581cf96 --- /dev/null +++ b/roles/ceph-dashboard/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Include configure_dashboard.yml + ansible.builtin.include_tasks: configure_dashboard.yml + +- name: Print dashboard URL + ansible.builtin.debug: + msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password." + run_once: true diff --git a/roles/ceph-defaults/README.md b/roles/ceph-defaults/README.md new file mode 100644 index 0000000..34f8d5e --- /dev/null +++ b/roles/ceph-defaults/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-defaults + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml new file mode 100644 index 0000000..938ef52 --- /dev/null +++ b/roles/ceph-defaults/defaults/main.yml @@ -0,0 +1,658 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +###################################### +# Releases name to number dictionary # +###################################### +ceph_release_num: + dumpling: 0.67 + emperor: 0.72 + firefly: 0.80 + giant: 0.87 + hammer: 0.94 + infernalis: 9 + jewel: 10 + kraken: 11 + luminous: 12 + mimic: 13 + nautilus: 14 + octopus: 15 + pacific: 16 + quincy: 17 + reef: 18 + squid: 19 + dev: 99 + + +# The 'cluster' variable determines the name of the cluster. +# Changing the default value to something else means that you will +# need to change all the command line calls as well, for example if +# your cluster name is 'foo': +# "ceph health" will become "ceph --cluster foo health" +# +# An easier way to handle this is to use the environment variable CEPH_ARGS +# So run: "export CEPH_ARGS="--cluster foo" +# With that you will be able to run "ceph health" normally +cluster: ceph + +# Inventory host group variables +mon_group_name: mons +osd_group_name: osds +rgw_group_name: rgws +mds_group_name: mdss +nfs_group_name: nfss +rbdmirror_group_name: rbdmirrors +client_group_name: clients +mgr_group_name: mgrs +rgwloadbalancer_group_name: rgwloadbalancers +monitoring_group_name: monitoring +adopt_label_group_names: + - "{{ mon_group_name }}" + - "{{ osd_group_name }}" + - "{{ rgw_group_name }}" + - "{{ mds_group_name }}" + - "{{ nfs_group_name }}" + - "{{ rbdmirror_group_name }}" + - "{{ client_group_name }}" + - "{{ mgr_group_name }}" + - "{{ rgwloadbalancer_group_name }}" + - "{{ monitoring_group_name }}" + +# If configure_firewall is true, then ansible will try to configure the +# appropriate firewalling rules so that Ceph daemons can communicate +# with each others. +configure_firewall: true + +# Open ports on corresponding nodes if firewall is installed on it +ceph_mon_firewall_zone: public +ceph_mgr_firewall_zone: public +ceph_osd_firewall_zone: public +ceph_rgw_firewall_zone: public +ceph_mds_firewall_zone: public +ceph_nfs_firewall_zone: public +ceph_rbdmirror_firewall_zone: public +ceph_dashboard_firewall_zone: public +ceph_rgwloadbalancer_firewall_zone: public + +# cephadm account for remote connections +cephadm_ssh_user: root +cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa" +cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub" +cephadm_mgmt_network: "{{ public_network }}" + +############ +# PACKAGES # +############ +debian_package_dependencies: [] + +centos_package_dependencies: + - epel-release + - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}" + +redhat_package_dependencies: [] + +suse_package_dependencies: [] + +# Whether or not to install the ceph-test package. +ceph_test: false + +# Enable the ntp service by default to avoid clock skew on ceph nodes +# Disable if an appropriate NTP client is already installed and configured +ntp_service_enabled: true + +# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd +ntp_daemon_type: chronyd + +# This variable determines if ceph packages can be updated. If False, the +# package resources will use "state=present". If True, they will use +# "state=latest". +upgrade_ceph_packages: false + +ceph_use_distro_backports: false # DEBIAN ONLY +ceph_directories_mode: "0755" + +########### +# INSTALL # +########### +# ORIGIN SOURCE +# +# Choose between: +# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'dev' or 'obs' +# - 'distro' means that no separate repo file will be added +# you will get whatever version of Ceph is included in your Linux distro. +# 'local' means that the ceph binaries will be copied over from the local machine +ceph_origin: dummy +valid_ceph_origins: + - repository + - distro + - local + + +ceph_repository: dummy +valid_ceph_repository: + - community + - dev + - uca + - custom + - obs + + +# REPOSITORY: COMMUNITY VERSION +# +# Enabled when ceph_repository == 'community' +# +ceph_mirror: https://download.ceph.com +ceph_stable_key: https://download.ceph.com/keys/release.asc +ceph_stable_release: squid +ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" + +nfs_ganesha_stable: true # use stable repos for nfs-ganesha +centos_release_nfs: centos-release-nfs-ganesha4 +nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu +nfs_ganesha_apt_keyserver: keyserver.ubuntu.com +nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA +libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu + +# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions +# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ +# for more info read: https://github.com/ceph/ceph-ansible/issues/305 +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" + +# REPOSITORY: UBUNTU CLOUD ARCHIVE +# +# Enabled when ceph_repository == 'uca' +# +# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive +# usually has newer Ceph releases than the normal distro repository. +# +# +ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" +ceph_stable_openstack_release_uca: queens +ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}" + +# REPOSITORY: openSUSE OBS +# +# Enabled when ceph_repository == 'obs' +# +# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository +# usually has newer Ceph releases than the normal distro repository. +# +# +ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/" + +# REPOSITORY: DEV +# +# Enabled when ceph_repository == 'dev' +# +ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack +ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) + +nfs_ganesha_dev: false # use development repos for nfs-ganesha + +# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman +# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous +nfs_ganesha_flavor: "ceph_main" + + +# REPOSITORY: CUSTOM +# +# Enabled when ceph_repository == 'custom' +# +# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be +# a URL to the .repo file to be installed on the targets. For deb, +# ceph_custom_repo should be the URL to the repo base. +# +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +ceph_custom_repo: https://server.domain.com/ceph-custom-repo + + +# ORIGIN: LOCAL CEPH INSTALLATION +# +# Enabled when ceph_repository == 'local' +# +# Path to DESTDIR of the ceph install +# ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +# use_installer: false +# Root directory for ceph-ansible +# ansible_dir: "/path/to/ceph-ansible" + + +###################### +# CEPH CONFIGURATION # +###################### + +## Ceph options +# +# Each cluster requires a unique, consistent filesystem ID. By +# default, the playbook generates one for you. +# If you want to customize how the fsid is +# generated, you may find it useful to disable fsid generation to +# avoid cluttering up your ansible repo. If you set `generate_fsid` to +# false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT +fsid: "{{ cluster_uuid.stdout }}" +generate_fsid: true + +ceph_conf_key_directory: /etc/ceph + +ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}" + +# Permissions for keyring files in /etc/ceph +ceph_keyring_permissions: '0600' + +cephx: true + +# Cluster configuration +ceph_cluster_conf: + global: + public_network: "{{ public_network | default(omit) }}" + cluster_network: "{{ cluster_network | default(omit) }}" + osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}" + ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}" + ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}" + osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}" + +## Client options +# +rbd_cache: "true" +rbd_cache_writethrough_until_flush: "true" +rbd_concurrent_management_ops: 20 + +rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +rbd_client_directory_owner: ceph +rbd_client_directory_group: ceph +rbd_client_directory_mode: "0755" + +rbd_client_log_path: /var/log/ceph +rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor +rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor + +## Monitor options +# set to either ipv4 or ipv6, whichever your network is using +ip_version: ipv4 + +mon_host_v1: + enabled: true + suffix: ':6789' +mon_host_v2: + suffix: ':3300' + +enable_ceph_volume_debug: false + +########## +# CEPHFS # +########## +# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value +# `pg_num` and `pgp_num` keys will be ignored, even if specified. +# eg: +# cephfs_data_pool: +# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +# target_size_ratio: 0.2 +cephfs: cephfs # name of the ceph filesystem +cephfs_data_pool: + name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}" +cephfs_metadata_pool: + name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}" +cephfs_pools: + - "{{ cephfs_data_pool }}" + - "{{ cephfs_metadata_pool }}" + +## OSD options +# +lvmetad_disabled: false +is_hci: false +hci_safety_factor: 0.2 +non_hci_safety_factor: 0.7 +safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}" +osd_memory_target: 4294967296 +journal_size: 5120 # OSD journal size in MB +block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. +public_network: 0.0.0.0/0 +cluster_network: "{{ public_network | regex_replace(' ', '') }}" +osd_mkfs_type: xfs +osd_mkfs_options_xfs: -f -i size=2048 +osd_mount_options_xfs: noatime,largeio,inode64,swalloc +osd_objectstore: bluestore + +# Any device containing these patterns in their path will be excluded. +osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*" + +## MDS options +# +mds_max_mds: 1 + +## Rados Gateway options +# +radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/latest/radosgw/frontends/ + +radosgw_frontend_port: 8080 +# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format. +radosgw_frontend_ssl_certificate: "" +radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate +radosgw_frontend_options: "" +radosgw_thread_pool_size: 512 + + +# You must define either radosgw_interface, radosgw_address. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. +# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. +radosgw_interface: interface +radosgw_address: x.x.x.x +radosgw_address_block: subnet +radosgw_keystone_ssl: false # activate this when using keystone PKI keys +radosgw_num_instances: 1 +rgw_zone: default # This is used for rgw instance client names. + + +## Testing mode +# enable this mode _only_ when you have a single node +# if you don't want it keep the option commented +# common_single_host_mode: true + +## Handlers - restarting daemons after a config change +# if for whatever reasons the content of your ceph configuration changes +# ceph daemons will be restarted as well. At the moment, we can not detect +# which config option changed so all the daemons will be restarted. Although +# this restart will be serialized for each node, in between a health check +# will be performed so we make sure we don't move to the next node until +# ceph is not healthy +# Obviously between the checks (for monitors to be in quorum and for osd's pgs +# to be clean) we have to wait. These retries and delays can be configurable +# for both monitors and osds. +# +# Monitor handler checks +handler_health_mon_check_retries: 10 +handler_health_mon_check_delay: 20 +# +# OSD handler checks +handler_health_osd_check_retries: 40 +handler_health_osd_check_delay: 30 +handler_health_osd_check: true +# +# MDS handler checks +handler_health_mds_check_retries: 5 +handler_health_mds_check_delay: 10 +# +# RGW handler checks +handler_health_rgw_check_retries: 5 +handler_health_rgw_check_delay: 10 +handler_rgw_use_haproxy_maintenance: false + +# NFS handler checks +handler_health_nfs_check_retries: 5 +handler_health_nfs_check_delay: 10 + +# RBD MIRROR handler checks +handler_health_rbd_mirror_check_retries: 5 +handler_health_rbd_mirror_check_delay: 10 + +# MGR handler checks +handler_health_mgr_check_retries: 5 +handler_health_mgr_check_delay: 10 + +## health mon/osds check retries/delay: + +health_mon_check_retries: 20 +health_mon_check_delay: 10 +health_osd_check_retries: 20 +health_osd_check_delay: 10 + +############## +# RBD-MIRROR # +############## + +ceph_rbd_mirror_pool: "rbd" + +############### +# NFS-GANESHA # +############### +# +# Access type options +# +# Enable NFS File access +# If set to true, then ganesha is set up to export the root of the +# Ceph filesystem, and ganesha's attribute and directory caching is disabled +# as much as possible since libcephfs clients also caches the same +# information. +# +# Set this to true to enable File access via NFS. Requires an MDS role. +nfs_file_gw: false +# Set this to true to enable Object access via NFS. Requires an RGW role. +nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}" + + +################### +# CONFIG OVERRIDE # +################### + +# Ceph configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# +# When configuring RGWs, make sure you use the form [client.rgw.*] +# instead of [client.radosgw.*]. +# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible. +# +# The following sections are supported: [global], [mon], [osd], [mds], [client] +# +# Example: +# ceph_conf_overrides: +# global: +# foo: 1234 +# bar: 5678 +# "client.rgw.{{ rgw_zone }}.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}": +# rgw_zone: zone1 +# +ceph_conf_overrides: {} + + +############# +# OS TUNING # +############# + +disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}" +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + - { name: vm.zone_reclaim_mode, value: 0 } + - { name: vm.swappiness, value: 10 } + - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } + +# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES +# Set this to a byte value (e.g. 134217728) +# A value of 0 will leave the package default. +ceph_tcmalloc_max_total_thread_cache: 134217728 + + +########## +# DOCKER # +########## +ceph_docker_image: "ceph/ceph" +ceph_docker_image_tag: v19 +ceph_docker_registry: quay.io +ceph_docker_registry_auth: false +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: +ceph_docker_no_proxy: "localhost,127.0.0.1" +## Client only docker image - defaults to {{ ceph_docker_image }} +ceph_client_docker_image: "{{ ceph_docker_image }}" +ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" +ceph_client_docker_registry: "{{ ceph_docker_registry }}" +containerized_deployment: false +container_binary: +timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" +ceph_common_container_params: + envs: + NODE_NAME: "{{ ansible_facts['hostname'] }}" + CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES: "{{ ceph_tcmalloc_max_total_thread_cache }}" + args: + - --setuser=ceph + - --setgroup=ceph + - --default-log-to-file=false + - --default-log-to-stderr=true + - --default-log-stderr-prefix="debug " + volumes: + - /var/lib/ceph/crash:/var/lib/ceph/crash:z + - /var/run/ceph:/var/run/ceph:z + - /var/log/ceph:/var/log/ceph:z + - /etc/ceph:/etc/ceph:z + - /etc/localtime:/etc/localtime:ro + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +rolling_update: false + +##################### +# Docker pull retry # +##################### +docker_pull_retry: 3 +docker_pull_timeout: "300s" + + +############# +# DASHBOARD # +############# +dashboard_enabled: true +# Choose http or https +# For https, you should set dashboard.crt/key and grafana.crt/key +# If you define the dashboard_crt and dashboard_key variables, but leave them as '', +# then we will autogenerate a cert and keyfile +dashboard_protocol: https +dashboard_port: 8443 +# set this variable to the network you want the dashboard to listen on. (Default to public_network) +dashboard_network: "{{ public_network }}" +dashboard_admin_user: admin +dashboard_admin_user_ro: false +# This variable must be set with a strong custom password when dashboard_enabled is True +# dashboard_admin_password: p@ssw0rd +# We only need this for SSL (https) connections +dashboard_crt: '' +dashboard_key: '' +dashboard_certificate_cn: ceph-dashboard +dashboard_tls_external: false +dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" +dashboard_rgw_api_user_id: ceph-dashboard +dashboard_rgw_api_admin_resource: '' +dashboard_rgw_api_no_ssl_verify: false +dashboard_frontend_vip: '' +dashboard_disabled_features: [] +prometheus_frontend_vip: '' +alertmanager_frontend_vip: '' +node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0" +node_exporter_port: 9100 +grafana_admin_user: admin +# This variable must be set with a strong custom password when dashboard_enabled is True +# grafana_admin_password: admin +# We only need this for SSL (https) connections +grafana_crt: '' +grafana_key: '' +# When using https, please fill with a hostname for which grafana_crt is valid. +grafana_server_fqdn: '' +grafana_container_image: "docker.io/grafana/grafana:6.7.4" +grafana_container_cpu_period: 100000 +grafana_container_cpu_cores: 2 +# container_memory is in GB +grafana_container_memory: 4 +grafana_uid: 472 +grafana_datasource: Dashboard +grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard" +grafana_dashboard_version: main +grafana_dashboard_files: + - ceph-cluster.json + - cephfs-overview.json + - host-details.json + - hosts-overview.json + - osd-device-details.json + - osds-overview.json + - pool-detail.json + - pool-overview.json + - radosgw-detail.json + - radosgw-overview.json + - radosgw-sync-overview.json + - rbd-details.json + - rbd-overview.json +grafana_plugins: + - vonage-status-panel + - grafana-piechart-panel +grafana_allow_embedding: true +grafana_port: 3000 +grafana_network: "{{ public_network }}" +grafana_conf_overrides: {} +prometheus_container_image: "docker.io/prom/prometheus:v2.7.2" +prometheus_container_cpu_period: 100000 +prometheus_container_cpu_cores: 2 +# container_memory is in GB +prometheus_container_memory: 4 +prometheus_data_dir: /var/lib/prometheus +prometheus_conf_dir: /etc/prometheus +prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image +prometheus_port: 9092 +prometheus_conf_overrides: {} +# Uncomment out this variable if you need to customize the retention period for prometheus storage. +# set it to '30d' if you want to retain 30 days of data. +# prometheus_storage_tsdb_retention_time: 15d +alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" +alertmanager_container_cpu_period: 100000 +alertmanager_container_cpu_cores: 2 +# container_memory is in GB +alertmanager_container_memory: 4 +alertmanager_data_dir: /var/lib/alertmanager +alertmanager_conf_dir: /etc/alertmanager +alertmanager_port: 9093 +alertmanager_cluster_port: 9094 +alertmanager_conf_overrides: {} +alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}" + +no_log_on_ceph_key_tasks: true + +############### +# DEPRECATION # +############### + + +###################################################### +# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # +# *DO NOT* MODIFY THEM # +###################################################### + +container_exec_cmd: +docker: false +ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" diff --git a/roles/ceph-defaults/meta/main.yml b/roles/ceph-defaults/meta/main.yml new file mode 100644 index 0000000..ec8d196 --- /dev/null +++ b/roles/ceph-defaults/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Handles ceph-ansible default vars for all roles + license: Apache + min_ansible_version: '2.7' + platforms: + - name: Ubuntu + versions: + - xenial + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/ceph-defaults/tasks/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/ceph-defaults/vars/main.yml b/roles/ceph-defaults/vars/main.yml new file mode 100644 index 0000000..3716c4d --- /dev/null +++ b/roles/ceph-defaults/vars/main.yml @@ -0,0 +1,3 @@ +--- +ceph_osd_pool_default_crush_rule: -1 +ceph_osd_pool_default_crush_rule_name: "replicated_rule" diff --git a/roles/ceph-exporter/defaults/main.yml b/roles/ceph-exporter/defaults/main.yml new file mode 100644 index 0000000..2633c6e --- /dev/null +++ b/roles/ceph-exporter/defaults/main.yml @@ -0,0 +1,24 @@ +--- +########### +# GENERAL # +########### + +ceph_exporter_addr: "0.0.0.0" +ceph_exporter_port: 9926 +ceph_exporter_stats_period: 5 # seconds +ceph_exporter_prio_limit: 5 + +########## +# DOCKER # +########## + +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_exporter_container_params: + args: + - -f + - -n=client.ceph-exporter + - --sock-dir=/var/run/ceph + - --addrs={{ ceph_exporter_addr }} + - --port={{ ceph_exporter_port }} + - --stats-period={{ ceph_exporter_stats_period }} + - --prio-limit={{ ceph_exporter_prio_limit }} diff --git a/roles/ceph-exporter/meta/main.yml b/roles/ceph-exporter/meta/main.yml new file mode 100644 index 0000000..61fc0ce --- /dev/null +++ b/roles/ceph-exporter/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Deploy ceph-exporter + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-exporter/tasks/main.yml b/roles/ceph-exporter/tasks/main.yml new file mode 100644 index 0000000..08fa693 --- /dev/null +++ b/roles/ceph-exporter/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Create and copy client.ceph-exporter keyring + when: cephx | bool + block: + - name: Create client.ceph-exporter keyring + ceph_key: + name: "client.ceph-exporter" + caps: + mon: 'allow r' + mgr: 'allow r' + osd: 'allow r' + cluster: "{{ cluster }}" + dest: "{{ ceph_conf_key_directory }}" + import_key: true + mode: "{{ ceph_keyring_permissions }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Get keys from monitors + ceph_key_info: + name: client.ceph-exporter + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _exporter_keys + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.ceph-exporter.keyring" + content: "{{ _exporter_keys.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Start the ceph-exporter service + ansible.builtin.systemd: + name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" + state: started + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-exporter/tasks/systemd.yml b/roles/ceph-exporter/tasks/systemd.yml new file mode 100644 index 0000000..4e4733f --- /dev/null +++ b/roles/ceph-exporter/tasks/systemd.yml @@ -0,0 +1,9 @@ +--- +- name: Generate systemd unit file for ceph-exporter container + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-exporter.service.j2" + dest: /etc/systemd/system/ceph-exporter@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph exporter diff --git a/roles/ceph-exporter/templates/ceph-exporter.service.j2 b/roles/ceph-exporter/templates/ceph-exporter.service.j2 new file mode 100644 index 0000000..171bcd9 --- /dev/null +++ b/roles/ceph-exporter/templates/ceph-exporter.service.j2 @@ -0,0 +1,50 @@ +[Unit] +Description=Ceph exporter +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-exporter-%i +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-exporter-%i +ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-exporter-%i \ +{% if container_binary == 'podman' %} +-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +--security-opt label=disable \ +--net=host \ +{% for v in ceph_common_container_params['volumes'] + ceph_exporter_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_exporter_container_params['envs'] | default({}))).items() %} + -e {{ k }}={{ v }} \ +{% endfor %} +--entrypoint=/usr/bin/ceph-exporter {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ (ceph_common_container_params['args'] + ceph_exporter_container_params['args'] | default([])) | join(' ') }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop ceph-exporter-%i +{% endif %} +StartLimitInterval=10min +StartLimitBurst=30 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=10 + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-facts/README.md b/roles/ceph-facts/README.md new file mode 100644 index 0000000..592982d --- /dev/null +++ b/roles/ceph-facts/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-facts + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-facts/meta/main.yml b/roles/ceph-facts/meta/main.yml new file mode 100644 index 0000000..78818ae --- /dev/null +++ b/roles/ceph-facts/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Set some facts for ceph to be deployed + license: Apache + min_ansible_version: '2.7' + platforms: + - name: Ubuntu + versions: + - xenial + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-facts/tasks/container_binary.yml b/roles/ceph-facts/tasks/container_binary.yml new file mode 100644 index 0000000..2f1355c --- /dev/null +++ b/roles/ceph-facts/tasks/container_binary.yml @@ -0,0 +1,10 @@ +--- +- name: Check if podman binary is present + ansible.builtin.stat: + path: /usr/bin/podman + register: podman_binary + +- name: Set_fact container_binary + ansible.builtin.set_fact: + container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] in ['8', '9']) else 'docker' }}" + when: not docker2podman | default(false) | bool diff --git a/roles/ceph-facts/tasks/devices.yml b/roles/ceph-facts/tasks/devices.yml new file mode 100644 index 0000000..7f9b399 --- /dev/null +++ b/roles/ceph-facts/tasks/devices.yml @@ -0,0 +1,81 @@ +--- +- name: Resolve devices + when: + - devices is defined + - not osd_auto_discovery | default(False) | bool + block: + - name: Resolve device link(s) + ansible.builtin.command: readlink -f {{ item }} + changed_when: false + check_mode: false + with_items: "{{ devices }}" + register: devices_prepare_canonicalize + + - name: Set_fact build devices from resolved symlinks + ansible.builtin.set_fact: + devices: "{{ devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" + +- name: Resolve dedicated_device + when: + - dedicated_devices is defined + - not osd_auto_discovery | default(False) | bool + block: + - name: Resolve dedicated_device link(s) + ansible.builtin.command: readlink -f {{ item }} + changed_when: false + check_mode: false + with_items: "{{ dedicated_devices }}" + register: dedicated_devices_prepare_canonicalize + + - name: Set_fact build dedicated_devices from resolved symlinks + ansible.builtin.set_fact: + dedicated_devices: "{{ dedicated_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" + +- name: Resolve bluestore_wal_device + when: + - bluestore_wal_devices is defined + - not osd_auto_discovery | default(False) | bool + block: + - name: Resolve bluestore_wal_device link(s) + ansible.builtin.command: readlink -f {{ item }} + changed_when: false + check_mode: false + with_items: "{{ bluestore_wal_devices }}" + register: bluestore_wal_devices_prepare_canonicalize + + - name: Set_fact build bluestore_wal_devices from resolved symlinks + ansible.builtin.set_fact: + bluestore_wal_devices: "{{ bluestore_wal_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" + +- name: Collect existed devices + vars: + device: "{{ item.key | regex_replace('^', '/dev/') }}" + ansible.builtin.command: test -b {{ device }} + changed_when: false + ignore_errors: true + loop: "{{ ansible_facts['devices'] | dict2items }}" + when: + - osd_auto_discovery | default(False) | bool + - ansible_facts['devices'] is defined + - item.value.removable == "0" + - item.value.sectors != "0" + - item.value.partitions|count == 0 + - item.value.holders|count == 0 + - ansible_facts['mounts'] | selectattr('device', 'equalto', device) | list | length == 0 + - item.key is not match osd_auto_discovery_exclude + - device not in dedicated_devices | default([]) + - device not in bluestore_wal_devices | default([]) + - device not in (lvm_volumes | default([]) | map(attribute='data') | list) + register: devices_check + +- name: Set_fact devices generate device list when osd_auto_discovery + vars: + device: "{{ item.item.key | regex_replace('^', '/dev/') }}" + ansible.builtin.set_fact: + devices: "{{ devices | default([]) | union([device]) }}" + loop: "{{ devices_check.results }}" + when: + - devices_check is defined + - devices_check is not skipped + - not item.skipped | default(false) + - not item.failed | default(false) diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml new file mode 100644 index 0000000..8cedcdd --- /dev/null +++ b/roles/ceph-facts/tasks/facts.yml @@ -0,0 +1,250 @@ +--- +- name: Check if it is atomic host + ansible.builtin.stat: + path: /run/ostree-booted + register: stat_ostree + +- name: Set_fact is_atomic + ansible.builtin.set_fact: + is_atomic: "{{ stat_ostree.stat.exists }}" + +- name: Import_tasks container_binary.yml + ansible.builtin.import_tasks: container_binary.yml + +- name: Set_fact ceph_cmd + ansible.builtin.set_fact: + ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" + +# In case ansible_python_interpreter is set by the user, +# ansible will not discover python and discovered_interpreter_python +# will not be set +- name: Set_fact discovered_interpreter_python + ansible.builtin.set_fact: + discovered_interpreter_python: "{{ ansible_python_interpreter }}" + when: ansible_python_interpreter is defined + +# If ansible_python_interpreter is not defined, this can result in the +# discovered_interpreter_python fact from being set. This fails later in this +# playbook and is used elsewhere. +- name: Set_fact discovered_interpreter_python if not previously set + ansible.builtin.set_fact: + discovered_interpreter_python: "{{ ansible_facts['discovered_interpreter_python'] }}" + when: + - discovered_interpreter_python is not defined + - ansible_facts['discovered_interpreter_python'] is defined + +# Set ceph_release to ceph_stable by default +- name: Set_fact ceph_release ceph_stable_release + ansible.builtin.set_fact: + ceph_release: "{{ ceph_stable_release }}" + +- name: Set_fact monitor_name ansible_facts['hostname'] + ansible.builtin.set_fact: + monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}" + delegate_to: "{{ item }}" + delegate_facts: true + with_items: "{{ groups.get(mon_group_name, []) }}" + run_once: true + when: groups.get(mon_group_name, []) | length > 0 + +- name: Find a running monitor + when: groups.get(mon_group_name, []) | length > 0 + block: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}" + when: + - containerized_deployment | bool + + - name: Find a running mon container + ansible.builtin.command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + register: find_running_mon_container + failed_when: false + run_once: true + delegate_to: "{{ item }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + changed_when: false + when: + - containerized_deployment | bool + + - name: Check for a ceph mon socket + ansible.builtin.shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok + changed_when: false + failed_when: false + check_mode: false + register: mon_socket_stat + run_once: true + delegate_to: "{{ item }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + when: + - not containerized_deployment | bool + + - name: Check if the ceph mon socket is in-use + ansible.builtin.command: grep -q {{ item.stdout }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: mon_socket + run_once: true + delegate_to: "{{ hostvars[item.item]['inventory_hostname'] }}" + with_items: "{{ mon_socket_stat.results }}" + when: + - not containerized_deployment | bool + - item.rc == 0 + + - name: Set_fact running_mon - non_container + ansible.builtin.set_fact: + running_mon: "{{ hostvars[item.item.item]['inventory_hostname'] }}" + with_items: "{{ mon_socket.results }}" + run_once: true + when: + - not containerized_deployment | bool + - item.rc is defined + - item.rc == 0 + + - name: Set_fact running_mon - container + ansible.builtin.set_fact: + running_mon: "{{ item.item }}" + run_once: true + with_items: "{{ find_running_mon_container.results }}" + when: + - containerized_deployment | bool + - item.stdout_lines | default([]) | length > 0 + + - name: Set_fact _container_exec_cmd + ansible.builtin.set_fact: + _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}" + when: + - containerized_deployment | bool + + # this task shouldn't run in a rolling_update situation + # because it blindly picks a mon, which may be down because + # of the rolling update + - name: Get current fsid if cluster is already running + ansible.builtin.command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid" + changed_when: false + failed_when: false + check_mode: false + register: current_fsid + run_once: true + delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}" + when: + - not rolling_update | bool + +# set this as a default when performing a rolling_update +# so the rest of the tasks here will succeed +- name: Set_fact current_fsid rc 1 + ansible.builtin.set_fact: + current_fsid: + rc: 1 + when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0 + +- name: Get current fsid + ansible.builtin.command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid" + register: rolling_update_fsid + delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}" + until: rolling_update_fsid is succeeded + changed_when: false + when: + - rolling_update | bool + - groups.get(mon_group_name, []) | length > 0 + +- name: Set_fact fsid + ansible.builtin.set_fact: + fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}" + when: + - rolling_update | bool + - groups.get(mon_group_name, []) | length > 0 + +- name: Set_fact fsid from current_fsid + ansible.builtin.set_fact: + fsid: "{{ current_fsid.stdout }}" + run_once: true + when: current_fsid.rc == 0 + +- name: Fsid related tasks + when: + - generate_fsid | bool + - current_fsid.rc != 0 + - not rolling_update | bool + block: + - name: Generate cluster fsid + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'" + register: cluster_uuid + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + run_once: true + + - name: Set_fact fsid + ansible.builtin.set_fact: + fsid: "{{ cluster_uuid.stdout }}" + +- name: Import_tasks devices.yml + ansible.builtin.import_tasks: devices.yml + when: inventory_hostname in groups.get(osd_group_name, []) + +- name: Check if the ceph conf exists + ansible.builtin.stat: + path: '/etc/ceph/{{ cluster }}.conf' + register: ceph_conf + +- name: Set default osd_pool_default_crush_rule fact + ansible.builtin.set_fact: + osd_pool_default_crush_rule: "{{ ceph_osd_pool_default_crush_rule }}" + +- name: Get default crush rule value from ceph configuration + when: ceph_conf.stat.exists | bool + block: + - &read-osd-pool-default-crush-rule + name: Read osd pool default crush rule + ansible.builtin.command: grep 'osd_pool_default_crush_rule' /etc/ceph/{{ cluster }}.conf + register: crush_rule_variable + changed_when: false + check_mode: false + failed_when: crush_rule_variable.rc not in (0, 1) + - &set-osd-pool-default-crush-rule-fact + name: Set osd_pool_default_crush_rule fact + ansible.builtin.set_fact: + osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] }}" + when: crush_rule_variable.rc == 0 + +- name: Get default crush rule value from running monitor ceph configuration + when: + - running_mon is defined + - not ceph_conf.stat.exists | bool + block: + - <<: *read-osd-pool-default-crush-rule # noqa: name[casing] + delegate_to: "{{ running_mon }}" + - *set-osd-pool-default-crush-rule-fact + +- name: Import_tasks set_monitor_address.yml + ansible.builtin.import_tasks: set_monitor_address.yml + when: groups.get(mon_group_name, []) | length > 0 + +- name: Import_tasks set_radosgw_address.yml + ansible.builtin.include_tasks: set_radosgw_address.yml + when: + - set_radosgw_address | default(true) + - inventory_hostname in groups.get(rgw_group_name, []) + +- name: Set_fact ceph_run_cmd + ansible.builtin.set_fact: + ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + with_items: + - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}" + - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}" + - "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}" + +- name: Set_fact ceph_admin_command + ansible.builtin.set_fact: + ceph_admin_command: "{{ hostvars[item]['ceph_run_cmd'] }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + with_items: + - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}" + - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}" + - "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}" diff --git a/roles/ceph-facts/tasks/get_def_crush_rule_name.yml b/roles/ceph-facts/tasks/get_def_crush_rule_name.yml new file mode 100644 index 0000000..7d7406c --- /dev/null +++ b/roles/ceph-facts/tasks/get_def_crush_rule_name.yml @@ -0,0 +1,17 @@ +--- +- name: Get current default crush rule details + ceph_crush_rule_info: + cluster: "{{ cluster }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: default_crush_rule_details + delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}" + run_once: true + +- name: Get current default crush rule name + ansible.builtin.set_fact: + ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}" + with_items: "{{ default_crush_rule_details.stdout | default('{}', True) | from_json }}" + run_once: true + when: item.rule_id | int == osd_pool_default_crush_rule | int diff --git a/roles/ceph-facts/tasks/grafana.yml b/roles/ceph-facts/tasks/grafana.yml new file mode 100644 index 0000000..df964de --- /dev/null +++ b/roles/ceph-facts/tasks/grafana.yml @@ -0,0 +1,35 @@ +- name: Set grafana_server_addr fact - ipv4 + ansible.builtin.set_fact: + grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first }}" + when: + - groups.get(monitoring_group_name, []) | length > 0 + - ip_version == 'ipv4' + - dashboard_enabled | bool + - inventory_hostname in groups[monitoring_group_name] + +- name: Set grafana_server_addr fact - ipv6 + ansible.builtin.set_fact: + grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap }}" + when: + - groups.get(monitoring_group_name, []) | length > 0 + - ip_version == 'ipv6' + - dashboard_enabled | bool + - inventory_hostname in groups[monitoring_group_name] + +- name: Set grafana_server_addrs fact - ipv4 + ansible.builtin.set_fact: + grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first]) | unique }}" + with_items: "{{ groups.get(monitoring_group_name, []) }}" + when: + - groups.get(monitoring_group_name, []) | length > 0 + - ip_version == 'ipv4' + - dashboard_enabled | bool + +- name: Set grafana_server_addrs fact - ipv6 + ansible.builtin.set_fact: + grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap]) | unique }}" + with_items: "{{ groups.get(monitoring_group_name, []) }}" + when: + - groups.get(monitoring_group_name, []) | length > 0 + - ip_version == 'ipv6' + - dashboard_enabled | bool diff --git a/roles/ceph-facts/tasks/main.yml b/roles/ceph-facts/tasks/main.yml new file mode 100644 index 0000000..392ded9 --- /dev/null +++ b/roles/ceph-facts/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Include facts.yml + ansible.builtin.include_tasks: facts.yml diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml new file mode 100644 index 0000000..142b56a --- /dev/null +++ b/roles/ceph-facts/tasks/set_monitor_address.yml @@ -0,0 +1,14 @@ +--- +- name: Set_fact _monitor_addresses - ipv4 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | first}) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + when: + - ip_version == 'ipv4' + +- name: Set_fact _monitor_addresses - ipv6 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | last | ansible.utils.ipwrap}) }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + when: + - ip_version == 'ipv6' diff --git a/roles/ceph-facts/tasks/set_radosgw_address.yml b/roles/ceph-facts/tasks/set_radosgw_address.yml new file mode 100644 index 0000000..f862b9f --- /dev/null +++ b/roles/ceph-facts/tasks/set_radosgw_address.yml @@ -0,0 +1,80 @@ +--- +- name: Dashboard related tasks + when: ceph_dashboard_call_item is defined + block: + - name: Set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}" + ansible.builtin.set_fact: + radosgw_address_block: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address_block'] | default(radosgw_address_block) }}" + radosgw_address: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address'] | default(radosgw_address) }}" + radosgw_interface: "{{ hostvars[ceph_dashboard_call_item]['radosgw_interface'] | default(radosgw_interface) }}" + +- name: Set_fact _radosgw_address to radosgw_address_block ipv4 + ansible.builtin.set_fact: + _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}" + when: + - radosgw_address_block is defined + - radosgw_address_block != 'subnet' + - ip_version == 'ipv4' + +- name: Set_fact _radosgw_address to radosgw_address_block ipv6 + ansible.builtin.set_fact: + _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ansible.utils.ipwrap }}" + when: + - radosgw_address_block is defined + - radosgw_address_block != 'subnet' + - ip_version == 'ipv6' + +- name: Set_fact _radosgw_address to radosgw_address + ansible.builtin.set_fact: + _radosgw_address: "{{ radosgw_address | ansible.utils.ipwrap }}" + when: + - radosgw_address is defined + - radosgw_address != 'x.x.x.x' + +- name: Tasks for radosgw interface + when: + - radosgw_address_block == 'subnet' + - radosgw_address == 'x.x.x.x' + - radosgw_interface != 'interface' + block: + - name: Set_fact _interface + ansible.builtin.set_fact: + _interface: "{{ (hostvars[item]['radosgw_interface'] | replace('-', '_')) }}" + loop: "{{ groups.get(rgw_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + + - name: Set_fact _radosgw_address to radosgw_interface - ipv4 + ansible.builtin.set_fact: + _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version]['address'] }}" + loop: "{{ groups.get(rgw_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + when: ip_version == 'ipv4' + + - name: Set_fact _radosgw_address to radosgw_interface - ipv6 + ansible.builtin.set_fact: + _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version][0]['address'] | ansible.utils.ipwrap }}" + loop: "{{ groups.get(rgw_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + when: ip_version == 'ipv6' + +- name: Rgw_instances + when: + - ceph_dashboard_call_item is defined or + inventory_hostname in groups.get(rgw_group_name, []) + block: + - name: Reset rgw_instances (workaround) + ansible.builtin.set_fact: + rgw_instances: [] + + - name: Set_fact rgw_instances + ansible.builtin.set_fact: + rgw_instances: "{{ rgw_instances | default([]) | union([{'instance_name': 'rgw' + item | string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port | int + item | int}]) }}" + with_sequence: start=0 end={{ radosgw_num_instances | int - 1 }} + delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}" + delegate_facts: true diff --git a/roles/ceph-fetch-keys/LICENSE b/roles/ceph-fetch-keys/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-fetch-keys/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-fetch-keys/README.md b/roles/ceph-fetch-keys/README.md new file mode 100644 index 0000000..1f9c22f --- /dev/null +++ b/roles/ceph-fetch-keys/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-fetch + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-fetch-keys/defaults/main.yml b/roles/ceph-fetch-keys/defaults/main.yml new file mode 100644 index 0000000..5931081 --- /dev/null +++ b/roles/ceph-fetch-keys/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# Variables here are applicable to all host groups NOT roles + +# This sample file generated by generate_group_vars_sample.sh + +# Dummy variable to avoid error because ansible does not recognize the +# file as a good configuration file when no variable in it. +dummy: + +fetch_directory: fetch/ diff --git a/roles/ceph-fetch-keys/meta/main.yml b/roles/ceph-fetch-keys/meta/main.yml new file mode 100644 index 0000000..2cb63c8 --- /dev/null +++ b/roles/ceph-fetch-keys/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Andrew Schoen + description: Fetches ceph keys from monitors. + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-fetch-keys/tasks/main.yml b/roles/ceph-fetch-keys/tasks/main.yml new file mode 100644 index 0000000..dd19e50 --- /dev/null +++ b/roles/ceph-fetch-keys/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Lookup keys in /etc/ceph + ansible.builtin.shell: ls -1 /etc/ceph/*.keyring + changed_when: false + register: ceph_keys + +- name: Create a local fetch directory if it does not exist + ansible.builtin.file: + path: "{{ fetch_directory }}" + state: directory + mode: "0755" + delegate_to: localhost + become: false + +- name: Copy ceph user and bootstrap keys to the ansible server + ansible.builtin.fetch: + src: "{{ item }}" + dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}" + flat: true + fail_on_missing: false + run_once: true + with_items: + - "{{ ceph_keys.stdout_lines }}" + - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring" + - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" + - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring" + - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" + - "/var/lib/ceph/bootstrap-mgr/{{ cluster }}.keyring" diff --git a/roles/ceph-grafana/meta/main.yml b/roles/ceph-grafana/meta/main.yml new file mode 100644 index 0000000..1d346cd --- /dev/null +++ b/roles/ceph-grafana/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Boris Ranto + description: Configures Grafana for Ceph Dashboard + license: Apache + min_ansible_version: "2.4" + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-grafana/tasks/configure_grafana.yml b/roles/ceph-grafana/tasks/configure_grafana.yml new file mode 100644 index 0000000..dad9c77 --- /dev/null +++ b/roles/ceph-grafana/tasks/configure_grafana.yml @@ -0,0 +1,117 @@ +--- +- name: Install ceph-grafana-dashboards package on RedHat or SUSE + ansible.builtin.package: + name: ceph-grafana-dashboards + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: + - not containerized_deployment | bool + - ansible_facts['os_family'] in ['RedHat', 'Suse'] + tags: package-install + +- name: Make sure grafana is down + ansible.builtin.service: + name: grafana-server + state: stopped + +- name: Wait for grafana to be stopped + ansible.builtin.wait_for: + host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}' + port: '{{ grafana_port }}' + state: stopped + +- name: Make sure grafana configuration directories exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: true + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + with_items: + - "/etc/grafana/dashboards/ceph-dashboard" + - "/etc/grafana/provisioning/datasources" + - "/etc/grafana/provisioning/dashboards" + - "/etc/grafana/provisioning/notifiers" + +- name: Download ceph grafana dashboards + ansible.builtin.get_url: + url: "https://raw.githubusercontent.com/ceph/ceph/{{ grafana_dashboard_version }}/monitoring/ceph-mixin/dashboards_out/{{ item }}" + dest: "/etc/grafana/dashboards/ceph-dashboard/{{ item }}" + mode: "0644" + with_items: "{{ grafana_dashboard_files }}" + when: + - not containerized_deployment | bool + - not ansible_facts['os_family'] in ['RedHat', 'Suse'] + +- name: Write grafana.ini + openstack.config_template.config_template: + src: grafana.ini.j2 + dest: /etc/grafana/grafana.ini + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + mode: "0640" + config_type: ini + config_overrides: "{{ grafana_conf_overrides }}" + +- name: Write datasources provisioning config file + ansible.builtin.template: + src: datasources-ceph-dashboard.yml.j2 + dest: /etc/grafana/provisioning/datasources/ceph-dashboard.yml + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + mode: "0640" + +- name: Write dashboards provisioning config file + ansible.builtin.template: + src: dashboards-ceph-dashboard.yml.j2 + dest: /etc/grafana/provisioning/dashboards/ceph-dashboard.yml + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + mode: "0640" + when: not containerized_deployment | bool + +- name: Copy grafana SSL certificate file + ansible.builtin.copy: + src: "{{ grafana_crt }}" + dest: "/etc/grafana/ceph-dashboard.crt" + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + mode: "0640" + remote_src: "{{ dashboard_tls_external | bool }}" + when: + - grafana_crt | length > 0 + - dashboard_protocol == "https" + +- name: Copy grafana SSL certificate key + ansible.builtin.copy: + src: "{{ grafana_key }}" + dest: "/etc/grafana/ceph-dashboard.key" + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + mode: "0440" + remote_src: "{{ dashboard_tls_external | bool }}" + when: + - grafana_key | length > 0 + - dashboard_protocol == "https" + +- name: Generate a Self Signed OpenSSL certificate for dashboard + ansible.builtin.shell: | + test -f /etc/grafana/ceph-dashboard.key -a -f /etc/grafana/ceph-dashboard.crt || \ + (openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca && \ + chown {{ grafana_uid }}:{{ grafana_uid }} /etc/grafana/ceph-dashboard.key /etc/grafana/ceph-dashboard.crt) + changed_when: false + when: + - dashboard_protocol == "https" + - grafana_key | length == 0 or grafana_crt | length == 0 + +- name: Enable and start grafana + ansible.builtin.service: + name: grafana-server + state: restarted + enabled: true + +- name: Wait for grafana to start + ansible.builtin.wait_for: + host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}' + port: '{{ grafana_port }}' diff --git a/roles/ceph-grafana/tasks/main.yml b/roles/ceph-grafana/tasks/main.yml new file mode 100644 index 0000000..2e49036 --- /dev/null +++ b/roles/ceph-grafana/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml + +- name: Include configure_grafana.yml + ansible.builtin.include_tasks: configure_grafana.yml diff --git a/roles/ceph-grafana/tasks/setup_container.yml b/roles/ceph-grafana/tasks/setup_container.yml new file mode 100644 index 0000000..f410651 --- /dev/null +++ b/roles/ceph-grafana/tasks/setup_container.yml @@ -0,0 +1,22 @@ +--- +- name: Create /etc/grafana and /var/lib/grafana + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ grafana_uid }}" + group: "{{ grafana_uid }}" + recurse: true + with_items: + - /etc/grafana + - /var/lib/grafana + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Start the grafana-server service + ansible.builtin.systemd: + name: grafana-server + state: started + enabled: true + daemon_reload: true + failed_when: false diff --git a/roles/ceph-grafana/tasks/systemd.yml b/roles/ceph-grafana/tasks/systemd.yml new file mode 100644 index 0000000..52c40b6 --- /dev/null +++ b/roles/ceph-grafana/tasks/systemd.yml @@ -0,0 +1,8 @@ +--- +- name: Ship systemd service + ansible.builtin.template: + src: grafana-server.service.j2 + dest: "/etc/systemd/system/grafana-server.service" + owner: root + group: root + mode: "0644" diff --git a/roles/ceph-grafana/templates/dashboards-ceph-dashboard.yml.j2 b/roles/ceph-grafana/templates/dashboards-ceph-dashboard.yml.j2 new file mode 100644 index 0000000..64dbf1d --- /dev/null +++ b/roles/ceph-grafana/templates/dashboards-ceph-dashboard.yml.j2 @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: +- name: 'Ceph Dashboard' + orgId: 1 + folder: 'ceph-dashboard' + type: file + disableDeletion: false + updateIntervalSeconds: 3 + editable: false + options: + path: '{{ grafana_dashboards_path }}' diff --git a/roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2 b/roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2 new file mode 100644 index 0000000..0ff13ad --- /dev/null +++ b/roles/ceph-grafana/templates/datasources-ceph-dashboard.yml.j2 @@ -0,0 +1,26 @@ +apiVersion: 1 + +# list of datasources that should be deleted from the database +deleteDatasources: + - name: '{{ grafana_datasource }}' + orgId: 1 + +# list of datasources to insert/update depending +# what's available in the database +datasources: + # name of the datasource. Required +- name: '{{ grafana_datasource }}' + # datasource type. Required + type: 'prometheus' + # access mode. proxy or direct (Server or Browser in the UI). Required + access: 'proxy' + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: 'http://{{ grafana_server_addr }}:{{ prometheus_port }}' + # enable/disable basic auth + basicAuth: false + # mark as default datasource. Max one per org + isDefault: true + # allow users to edit datasources from the UI. + editable: false diff --git a/roles/ceph-grafana/templates/grafana-server.service.j2 b/roles/ceph-grafana/templates/grafana-server.service.j2 new file mode 100644 index 0000000..f9548ec --- /dev/null +++ b/roles/ceph-grafana/templates/grafana-server.service.j2 @@ -0,0 +1,61 @@ +# This file is managed by ansible, don't make changes here - they will be +# overwritten. +[Unit] +Description=grafana-server +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage grafana-server +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop grafana-server +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm grafana-server +ExecStart=/usr/bin/{{ container_binary }} run --rm --name=grafana-server \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + -v /etc/grafana:/etc/grafana:Z \ + -v /var/lib/grafana:/var/lib/grafana:Z \ + --net=host \ + --cpu-period={{ grafana_container_cpu_period }} \ + --cpu-quota={{ grafana_container_cpu_period * grafana_container_cpu_cores }} \ + --memory={{ grafana_container_memory }}GB \ + --memory-swap={{ grafana_container_memory * 2 }}GB \ + -e GF_INSTALL_PLUGINS={{ grafana_plugins|join(',') }} \ +{% if ceph_docker_http_proxy is defined %} + -e http_proxy={{ ceph_docker_http_proxy }} \ +{% endif %} +{% if ceph_docker_https_proxy is defined %} + -e https_proxy={{ ceph_docker_https_proxy }} \ +{% endif %} +{% if ceph_docker_no_proxy is defined %} + -e no_proxy={{ ceph_docker_no_proxy }} \ +{% endif %} + {{ grafana_container_image }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop grafana-server +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-grafana/templates/grafana.ini.j2 b/roles/ceph-grafana/templates/grafana.ini.j2 new file mode 100644 index 0000000..0593eb4 --- /dev/null +++ b/roles/ceph-grafana/templates/grafana.ini.j2 @@ -0,0 +1,35 @@ +# [server] +# root_url = %(protocol)s://%(domain)s:%(http_port)s/api/grafana/proxy + +[users] +default_theme = light + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +org_name = Main Org. + +# specify role for unauthenticated users +org_role = Viewer + +[server] +cert_file = /etc/grafana/ceph-dashboard.crt +cert_key = /etc/grafana/ceph-dashboard.key +domain = {{ ansible_facts['fqdn'] }} +protocol = {{ dashboard_protocol }} +http_port = {{ grafana_port }} +http_addr = {{ grafana_server_addr }} + +[security] +admin_user = {{ grafana_admin_user }} +admin_password = {{ grafana_admin_password }} +allow_embedding = {{ grafana_allow_embedding }} +{% if dashboard_protocol == 'https' %} +cookie_secure = true + +[session] +cookie_secure = true +{% endif %} \ No newline at end of file diff --git a/roles/ceph-handler/LICENSE b/roles/ceph-handler/LICENSE new file mode 100644 index 0000000..b0d1c9f --- /dev/null +++ b/roles/ceph-handler/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Guillaume Abrioux] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-handler/README.md b/roles/ceph-handler/README.md new file mode 100644 index 0000000..3145a7f --- /dev/null +++ b/roles/ceph-handler/README.md @@ -0,0 +1,2 @@ +# Ansible role: ceph-handler +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml new file mode 100644 index 0000000..8f06894 --- /dev/null +++ b/roles/ceph-handler/handlers/main.yml @@ -0,0 +1,77 @@ +--- +- name: Handlers + when: + - not rolling_update | bool + - not docker2podman | default(False) | bool + block: + - name: Make tempdir for scripts + ansible.builtin.tempfile: + state: directory + prefix: ceph_ansible + listen: + - "Restart ceph mons" + - "Restart ceph osds" + - "Restart ceph mdss" + - "Restart ceph rgws" + - "Restart ceph nfss" + - "Restart ceph rbdmirrors" + - "Restart ceph mgrs" + register: tmpdirpath + when: tmpdirpath is not defined or tmpdirpath.path is not defined or tmpdirpath.state=="absent" + + - name: Mons handler + ansible.builtin.include_tasks: handler_mons.yml + when: mon_group_name in group_names + listen: "Restart ceph mons" + + - name: Osds handler + ansible.builtin.include_tasks: handler_osds.yml + when: osd_group_name in group_names + listen: "Restart ceph osds" + + - name: Mdss handler + ansible.builtin.include_tasks: handler_mdss.yml + when: mds_group_name in group_names + listen: "Restart ceph mdss" + + - name: Rgws handler + ansible.builtin.include_tasks: handler_rgws.yml + when: rgw_group_name in group_names + listen: "Restart ceph rgws" + + - name: Nfss handler + ansible.builtin.include_tasks: handler_nfss.yml + when: nfs_group_name in group_names + listen: "Restart ceph nfss" + + - name: Rbdmirrors handler + ansible.builtin.include_tasks: handler_rbdmirrors.yml + when: rbdmirror_group_name in group_names + listen: "Restart ceph rbdmirrors" + + - name: Mgrs handler + ansible.builtin.include_tasks: handler_mgrs.yml + when: mgr_group_name in group_names + listen: "Restart ceph mgrs" + + - name: Ceph crash handler + ansible.builtin.include_tasks: handler_crash.yml + listen: "Restart ceph crash" + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) + + - name: Ceph exporter handler + ansible.builtin.include_tasks: handler_exporter.yml + listen: "Restart ceph exporter" + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/meta/main.yml b/roles/ceph-handler/meta/main.yml new file mode 100644 index 0000000..fe745e0 --- /dev/null +++ b/roles/ceph-handler/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Contains handlers for Ceph services + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-handler/tasks/check_running_cluster.yml b/roles/ceph-handler/tasks/check_running_cluster.yml new file mode 100644 index 0000000..62af95a --- /dev/null +++ b/roles/ceph-handler/tasks/check_running_cluster.yml @@ -0,0 +1,8 @@ +--- +- name: Include check_running_containers.yml + ansible.builtin.include_tasks: check_running_containers.yml + when: containerized_deployment | bool + +- name: Include check_socket_non_container.yml + ansible.builtin.include_tasks: check_socket_non_container.yml + when: not containerized_deployment | bool diff --git a/roles/ceph-handler/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml new file mode 100644 index 0000000..e78b7be --- /dev/null +++ b/roles/ceph-handler/tasks/check_running_containers.yml @@ -0,0 +1,84 @@ +--- +- name: Check for a mon container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'" + register: ceph_mon_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(mon_group_name, []) + +- name: Check for an osd container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-osd'" + register: ceph_osd_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(osd_group_name, []) + +- name: Check for a mds container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'" + register: ceph_mds_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(mds_group_name, []) + +- name: Check for a rgw container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}'" + register: ceph_rgw_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(rgw_group_name, []) + +- name: Check for a mgr container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'" + register: ceph_mgr_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(mgr_group_name, []) + +- name: Check for a rbd mirror container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'" + register: ceph_rbd_mirror_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Check for a nfs container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'" + register: ceph_nfs_container_stat + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(nfs_group_name, []) + +- name: Check for a ceph-crash container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'" + register: ceph_crash_container_stat + changed_when: false + failed_when: false + check_mode: false + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Check for a ceph-exporter container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-exporter-{{ ansible_facts['hostname'] }}'" + register: ceph_exporter_container_stat + changed_when: false + failed_when: false + check_mode: false + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/tasks/check_socket_non_container.yml b/roles/ceph-handler/tasks/check_socket_non_container.yml new file mode 100644 index 0000000..96c492f --- /dev/null +++ b/roles/ceph-handler/tasks/check_socket_non_container.yml @@ -0,0 +1,234 @@ +--- +- name: Find ceph mon socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-mon*.asok" + use_regex: false + register: mon_socket_stat + when: inventory_hostname in groups.get(mon_group_name, []) + +- name: Check if the ceph mon socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: mon_socket + with_items: "{{ mon_socket_stat.files }}" + when: + - inventory_hostname in groups.get(mon_group_name, []) + - mon_socket_stat.files | length > 0 + +- name: Remove ceph mon socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ mon_socket_stat.files }}" + - "{{ mon_socket.results }}" + when: + - inventory_hostname in groups.get(mon_group_name, []) + - mon_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Find ceph osd socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-osd.*.asok" + use_regex: false + register: osd_socket_stat + when: inventory_hostname in groups.get(osd_group_name, []) + +- name: Check if the ceph osd socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: osd_socket + with_items: "{{ osd_socket_stat.files }}" + when: + - inventory_hostname in groups.get(osd_group_name, []) + - osd_socket_stat.files | length > 0 + +- name: Remove ceph osd socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ osd_socket_stat.files }}" + - "{{ osd_socket.results }}" + when: + - inventory_hostname in groups.get(osd_group_name, []) + - osd_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Find ceph osd socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-mds*.asok" + use_regex: false + register: mds_socket_stat + when: inventory_hostname in groups.get(mds_group_name, []) + +- name: Check if the ceph mds socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: mds_socket + with_items: "{{ mds_socket_stat.files }}" + when: + - inventory_hostname in groups.get(mds_group_name, []) + - mds_socket_stat.files | length > 0 + +- name: Remove ceph mds socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ mds_socket_stat.files }}" + - "{{ mds_socket.results }}" + when: + - inventory_hostname in groups.get(mds_group_name, []) + - mds_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Find ceph rgw socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-client.rgw*.asok" + use_regex: false + register: rgw_socket_stat + when: inventory_hostname in groups.get(rgw_group_name, []) + +- name: Check if the ceph rgw socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: rgw_socket + with_items: "{{ rgw_socket_stat.files }}" + when: + - inventory_hostname in groups.get(rgw_group_name, []) + - rgw_socket_stat.files | length > 0 + +- name: Remove ceph rgw socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ rgw_socket_stat.files }}" + - "{{ rgw_socket.results }}" + when: + - inventory_hostname in groups.get(rgw_group_name, []) + - rgw_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Find ceph mgr socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-mgr*.asok" + use_regex: false + register: mgr_socket_stat + when: inventory_hostname in groups.get(mgr_group_name, []) + +- name: Check if the ceph mgr socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: mgr_socket + with_items: "{{ mgr_socket_stat.files }}" + when: + - inventory_hostname in groups.get(mgr_group_name, []) + - mgr_socket_stat.files | length > 0 + +- name: Remove ceph mgr socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ mgr_socket_stat.files }}" + - "{{ mgr_socket.results }}" + when: + - inventory_hostname in groups.get(mgr_group_name, []) + - mgr_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Find ceph rbd mirror socket + ansible.builtin.find: + paths: ["{{ rbd_client_admin_socket_path }}"] + recurse: true + file_type: any + patterns: "{{ cluster }}-client.rbd-mirror*.asok" + use_regex: false + register: rbd_mirror_socket_stat + when: inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Check if the ceph rbd mirror socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix + changed_when: false + failed_when: false + check_mode: false + register: rbd_mirror_socket + with_items: "{{ rbd_mirror_socket_stat.files }}" + when: + - inventory_hostname in groups.get(rbdmirror_group_name, []) + - rbd_mirror_socket_stat.files | length > 0 + +- name: Remove ceph rbd mirror socket if exists and not used by a process + ansible.builtin.file: + name: "{{ item.0.path }}" + state: absent + with_together: + - "{{ rbd_mirror_socket_stat.files }}" + - "{{ rbd_mirror_socket.results }}" + when: + - inventory_hostname in groups.get(rbdmirror_group_name, []) + - rbd_mirror_socket_stat.files | length > 0 + - item.1.rc == 1 + +- name: Check for a nfs ganesha pid + ansible.builtin.command: "pgrep ganesha.nfsd" + register: nfs_process + changed_when: false + failed_when: false + check_mode: false + when: inventory_hostname in groups.get(nfs_group_name, []) + +- name: Check for a ceph-crash process + ansible.builtin.command: pgrep ceph-crash + changed_when: false + failed_when: false + check_mode: false + register: crash_process + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Check for a ceph-exporter process + ansible.builtin.command: pgrep ceph-exporter + changed_when: false + failed_when: false + check_mode: false + register: exporter_process + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/tasks/handler_crash.yml b/roles/ceph-handler/tasks/handler_crash.yml new file mode 100644 index 0000000..44b049b --- /dev/null +++ b/roles/ceph-handler/tasks/handler_crash.yml @@ -0,0 +1,18 @@ +--- +- name: Set _crash_handler_called before restart + ansible.builtin.set_fact: + _crash_handler_called: true + +- name: Restart the ceph-crash service # noqa: ignore-errors + ansible.builtin.systemd: + name: ceph-crash@{{ ansible_facts['hostname'] }} + state: restarted + enabled: true + masked: false + daemon_reload: true + ignore_errors: true + when: hostvars[inventory_hostname]['_crash_handler_called'] | default(False) | bool + +- name: Set _crash_handler_called after restart + ansible.builtin.set_fact: + _crash_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_exporter.yml b/roles/ceph-handler/tasks/handler_exporter.yml new file mode 100644 index 0000000..d846d69 --- /dev/null +++ b/roles/ceph-handler/tasks/handler_exporter.yml @@ -0,0 +1,18 @@ +--- +- name: Set _exporter_handler_called before restart + ansible.builtin.set_fact: + _exporter_handler_called: true + +- name: Restart the ceph-exporter service # noqa: ignore-errors + ansible.builtin.systemd: + name: ceph-exporter@{{ ansible_facts['hostname'] }} + state: restarted + enabled: true + masked: false + daemon_reload: true + ignore_errors: true + when: hostvars[inventory_hostname]['_exporter_handler_called'] | default(False) | bool + +- name: Set _exporter_handler_called after restart + ansible.builtin.set_fact: + _exporter_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mdss.yml b/roles/ceph-handler/tasks/handler_mdss.yml new file mode 100644 index 0000000..9e82d54 --- /dev/null +++ b/roles/ceph-handler/tasks/handler_mdss.yml @@ -0,0 +1,28 @@ +--- +- name: Set _mds_handler_called before restart + ansible.builtin.set_fact: + _mds_handler_called: true + +- name: Copy mds restart script + ansible.builtin.template: + src: restart_mds_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_mds_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph mds daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh + when: + - hostvars[item]['handler_mds_status'] | default(False) | bool + - hostvars[item]['_mds_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[mds_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _mds_handler_called after restart + ansible.builtin.set_fact: + _mds_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mgrs.yml b/roles/ceph-handler/tasks/handler_mgrs.yml new file mode 100644 index 0000000..7984cc5 --- /dev/null +++ b/roles/ceph-handler/tasks/handler_mgrs.yml @@ -0,0 +1,28 @@ +--- +- name: Set _mgr_handler_called before restart + ansible.builtin.set_fact: + _mgr_handler_called: true + +- name: Copy mgr restart script + ansible.builtin.template: + src: restart_mgr_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_mgr_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph mgr daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh + when: + - hostvars[item]['handler_mgr_status'] | default(False) | bool + - hostvars[item]['_mgr_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[mgr_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _mgr_handler_called after restart + ansible.builtin.set_fact: + _mgr_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mons.yml b/roles/ceph-handler/tasks/handler_mons.yml new file mode 100644 index 0000000..0c97052 --- /dev/null +++ b/roles/ceph-handler/tasks/handler_mons.yml @@ -0,0 +1,32 @@ +--- +# We only want to restart on hosts that have called the handler. +# This var is set when he handler is called, and unset after the +# restart to ensure only the correct hosts are restarted. +- name: Set _mon_handler_called before restart + ansible.builtin.set_fact: + _mon_handler_called: true + +- name: Copy mon restart script + ansible.builtin.template: + src: restart_mon_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_mon_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph mon daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh + when: + # We do not want to run these checks on initial deployment (`socket.rc == 0`) + - hostvars[item]['handler_mon_status'] | default(False) | bool + - hostvars[item]['_mon_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[mon_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _mon_handler_called after restart + ansible.builtin.set_fact: + _mon_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_nfss.yml b/roles/ceph-handler/tasks/handler_nfss.yml new file mode 100644 index 0000000..dadfc1d --- /dev/null +++ b/roles/ceph-handler/tasks/handler_nfss.yml @@ -0,0 +1,28 @@ +--- +- name: Set _nfs_handler_called before restart + ansible.builtin.set_fact: + _nfs_handler_called: true + +- name: Copy nfs restart script + ansible.builtin.template: + src: restart_nfs_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph nfs daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh + when: + - hostvars[item]['handler_nfs_status'] | default(False) | bool + - hostvars[item]['_nfs_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[nfs_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _nfs_handler_called after restart + ansible.builtin.set_fact: + _nfs_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_osds.yml b/roles/ceph-handler/tasks/handler_osds.yml new file mode 100644 index 0000000..6d2634f --- /dev/null +++ b/roles/ceph-handler/tasks/handler_osds.yml @@ -0,0 +1,122 @@ +--- +- name: Set_fact trigger_restart + ansible.builtin.set_fact: + trigger_restart: true + loop: "{{ groups[osd_group_name] }}" + when: hostvars[item]['handler_osd_status'] | default(False) | bool + run_once: true + +- name: Osd handler + when: trigger_restart | default(False) | bool + block: + - name: Set _osd_handler_called before restart + ansible.builtin.set_fact: + _osd_handler_called: true + + - name: Unset noup flag + ceph_osd_flag: + name: noup + cluster: "{{ cluster }}" + state: absent + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + + # This does not just restart OSDs but everything else too. Unfortunately + # at this time the ansible role does not have an OSD id list to use + # for restarting them specifically. + # This does not need to run during a rolling update as the playbook will + # restart all OSDs using the tasks "start ceph osd" or + # "restart containerized ceph osd" + - name: Copy osd restart script + ansible.builtin.template: + src: restart_osd_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_osd_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + register: pool_list + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + run_once: true + changed_when: false + check_mode: false + + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + register: balancer_status + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + check_mode: false + + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: + pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" + run_once: true + with_items: "{{ pool_list.stdout | default('{}') | from_json }}" + + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: (balancer_status.stdout | from_json)['active'] | bool + + - name: Disable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: false + with_items: "{{ pools_pgautoscaler_mode }}" + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + run_once: true + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Restart ceph osds daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh + when: + - hostvars[item]['handler_osd_status'] | default(False) | bool + - handler_health_osd_check | bool + - hostvars[item]['_osd_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + + - name: Set _osd_handler_called after restart + ansible.builtin.set_fact: + _osd_handler_called: false + + - name: Re-enable pg autoscale on pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_autoscale_mode: true + with_items: "{{ pools_pgautoscaler_mode }}" + run_once: true + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + when: + - pools_pgautoscaler_mode is defined + - item.mode == 'on' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + when: (balancer_status.stdout | from_json)['active'] | bool diff --git a/roles/ceph-handler/tasks/handler_rbdmirrors.yml b/roles/ceph-handler/tasks/handler_rbdmirrors.yml new file mode 100644 index 0000000..0e1c893 --- /dev/null +++ b/roles/ceph-handler/tasks/handler_rbdmirrors.yml @@ -0,0 +1,28 @@ +--- +- name: Set _rbdmirror_handler_called before restart + ansible.builtin.set_fact: + _rbdmirror_handler_called: true + +- name: Copy rbd mirror restart script + ansible.builtin.template: + src: restart_rbd_mirror_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_rbd_mirror_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph rbd mirror daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh + when: + - hostvars[item]['handler_rbd_mirror_status'] | default(False) | bool + - hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[rbdmirror_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _rbdmirror_handler_called after restart + ansible.builtin.set_fact: + _rbdmirror_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_rgws.yml b/roles/ceph-handler/tasks/handler_rgws.yml new file mode 100644 index 0000000..2929c3d --- /dev/null +++ b/roles/ceph-handler/tasks/handler_rgws.yml @@ -0,0 +1,28 @@ +--- +- name: Set _rgw_handler_called before restart + ansible.builtin.set_fact: + _rgw_handler_called: true + +- name: Copy rgw restart script + ansible.builtin.template: + src: restart_rgw_daemon.sh.j2 + dest: "{{ tmpdirpath.path }}/restart_rgw_daemon.sh" + owner: root + group: root + mode: "0750" + when: tmpdirpath.path is defined + +- name: Restart ceph rgw daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh + when: + - hostvars[item]['handler_rgw_status'] | default(False) | bool + - hostvars[item]['_rgw_handler_called'] | default(False) | bool + - hostvars[item].tmpdirpath.path is defined + with_items: "{{ groups[rgw_group_name] }}" + delegate_to: "{{ item }}" + changed_when: false + run_once: true + +- name: Set _rgw_handler_called after restart + ansible.builtin.set_fact: + _rgw_handler_called: false diff --git a/roles/ceph-handler/tasks/main.yml b/roles/ceph-handler/tasks/main.yml new file mode 100644 index 0000000..c963b01 --- /dev/null +++ b/roles/ceph-handler/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- name: Include check_running_cluster.yml + ansible.builtin.include_tasks: check_running_cluster.yml + +# We do not want to run these checks on initial deployment (`socket.rc == 0`) +- name: Set_fact handler_mon_status + ansible.builtin.set_fact: + handler_mon_status: "{{ 0 in (mon_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mon_container_stat.get('rc') == 0 and ceph_mon_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(mon_group_name, []) + +- name: Set_fact handler_osd_status + ansible.builtin.set_fact: + handler_osd_status: "{{ 0 in (osd_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_osd_container_stat.get('rc') == 0 and ceph_osd_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(osd_group_name, []) + +- name: Set_fact handler_mds_status + ansible.builtin.set_fact: + handler_mds_status: "{{ 0 in (mds_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mds_container_stat.get('rc') == 0 and ceph_mds_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(mds_group_name, []) + +- name: Set_fact handler_rgw_status + ansible.builtin.set_fact: + handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(rgw_group_name, []) + +- name: Set_fact handler_nfs_status + ansible.builtin.set_fact: + handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(nfs_group_name, []) + +- name: Set_fact handler_rbd_status + ansible.builtin.set_fact: + handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Set_fact handler_mgr_status + ansible.builtin.set_fact: + handler_mgr_status: "{{ 0 in (mgr_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mgr_container_stat.get('rc') == 0 and ceph_mgr_container_stat.get('stdout_lines', []) | length != 0) }}" + when: inventory_hostname in groups.get(mgr_group_name, []) + +- name: Set_fact handler_crash_status + ansible.builtin.set_fact: + handler_crash_status: "{{ crash_process.get('rc') == 0 if not containerized_deployment | bool else (ceph_crash_container_stat.get('rc') == 0 and ceph_crash_container_stat.get('stdout_lines', []) | length != 0) }}" + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Set_fact handler_exporter_status + ansible.builtin.set_fact: + handler_exporter_status: "{{ exporter_process.get('rc') == 0 if not containerized_deployment | bool else (ceph_exporter_container_stat.get('rc') == 0 and ceph_exporter_container_stat.get('stdout_lines', []) | length != 0) }}" + when: + - inventory_hostname in groups.get(mon_group_name, []) + or inventory_hostname in groups.get(mgr_group_name, []) + or inventory_hostname in groups.get(osd_group_name, []) + or inventory_hostname in groups.get(mds_group_name, []) + or inventory_hostname in groups.get(rgw_group_name, []) + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 new file mode 100644 index 0000000..5c6473d --- /dev/null +++ b/roles/ceph-handler/templates/restart_mds_daemon.sh.j2 @@ -0,0 +1,26 @@ +#!/bin/bash + +RETRIES="{{ handler_health_mds_check_retries }}" +DELAY="{{ handler_health_mds_check_delay }}" +MDS_NAME="{{ ansible_facts['hostname'] }}" +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}" +{% endif %} + +# Backward compatibility +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok + +# First, restart the daemon +systemctl restart ceph-mds@${MDS_NAME} + +# Wait and ensure the socket exists after restarting the daemds +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC test -S $SOCKET && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means the socket is not present. +echo "Socket file ${SOCKET} could not be found, which means the Metadata Server is not running. Showing ceph-mds unit logs now:" +journalctl -u ceph-mds@${MDS_NAME} +exit 1 diff --git a/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 new file mode 100644 index 0000000..e20c619 --- /dev/null +++ b/roles/ceph-handler/templates/restart_mgr_daemon.sh.j2 @@ -0,0 +1,27 @@ +#!/bin/bash + +RETRIES="{{ handler_health_mgr_check_retries }}" +DELAY="{{ handler_health_mgr_check_delay }}" +MGR_NAME="{{ ansible_facts['hostname'] }}" +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}" +{% endif %} + +# Backward compatibility +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok + +systemctl reset-failed ceph-mgr@${MGR_NAME} +# First, restart the daemon +systemctl restart ceph-mgr@${MGR_NAME} + +# Wait and ensure the socket exists after restarting the daemds +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC test -S $SOCKET && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means the socket is not present. +echo "Socket file ${SOCKET} could not be found, which means ceph manager is not running. Showing ceph-mgr unit logs now:" +journalctl -u ceph-mgr@${MGR_NAME} +exit 1 diff --git a/roles/ceph-handler/templates/restart_mon_daemon.sh.j2 b/roles/ceph-handler/templates/restart_mon_daemon.sh.j2 new file mode 100644 index 0000000..2beade1 --- /dev/null +++ b/roles/ceph-handler/templates/restart_mon_daemon.sh.j2 @@ -0,0 +1,49 @@ +#!/bin/bash + +RETRIES="{{ handler_health_mon_check_retries }}" +DELAY="{{ handler_health_mon_check_delay }}" +MONITOR_NAME="{{ monitor_name }}" +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" +{% endif %} + +# if daemon is uninstalled, no restarting is needed; so exit with success +systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null +if [[ $? -ne 0 ]]; then + exit 0 +fi + +# Backward compatibility +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok + +check_quorum() { +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC ceph --cluster {{ cluster }} quorum_status --format json | "{{ discovered_interpreter_python }}" -c 'import sys, json; exit(0) if "{{ monitor_name }}" in json.load(sys.stdin)["quorum_names"] else exit(1)' && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means there is a problem with the quorum +echo "Error with quorum." +echo "cluster status:" +$DOCKER_EXEC ceph --cluster {{ cluster }} -s +echo "quorum status:" +$DOCKER_EXEC ceph --cluster {{ cluster }} daemon mon.${MONITOR_NAME} mon_status +$DOCKER_EXEC ceph --cluster {{ cluster }} daemon mon.${MONITOR_NAME} quorum_status +exit 1 +} + +# First, restart the daemon +systemctl restart ceph-mon@{{ ansible_facts['hostname'] }} + +COUNT=10 +# Wait and ensure the socket exists after restarting the daemon +while [ $COUNT -ne 0 ]; do + $DOCKER_EXEC test -S $SOCKET && check_quorum + sleep $DELAY + let COUNT=COUNT-1 +done +# If we reach this point, it means the socket is not present. +echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:" +journalctl -u ceph-mon@{{ ansible_facts['hostname'] }} +exit 1 diff --git a/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 new file mode 100644 index 0000000..c1571ba --- /dev/null +++ b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 @@ -0,0 +1,26 @@ +#!/bin/bash + +RETRIES="{{ handler_health_nfs_check_retries }}" +DELAY="{{ handler_health_nfs_check_delay }}" +NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}" +PID=/var/run/ganesha/ganesha.pid +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}" +{% endif %} + +# First, restart the daemon +{% if containerized_deployment | bool -%} +systemctl restart $NFS_NAME +# Wait and ensure the pid exists after restarting the daemon +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC test -f $PID && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means the pid is not present. +echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:" +journalctl -u $NFS_NAME +exit 1 +{% else %} +systemctl restart nfs-ganesha +{% endif %} diff --git a/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 new file mode 100644 index 0000000..51d2d10 --- /dev/null +++ b/roles/ceph-handler/templates/restart_osd_daemon.sh.j2 @@ -0,0 +1,82 @@ +#!/bin/bash + +DELAY="{{ handler_health_osd_check_delay }}" +CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" + +check_pgs() { + num_pgs=$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])') + if [[ "$num_pgs" == "0" ]]; then + return 0 + fi + while [ $RETRIES -ne 0 ]; do + test "$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]]))')" + RET=$? + test $RET -eq 0 && return 0 + sleep $DELAY + let RETRIES=RETRIES-1 + done + # PGs not clean, exiting with return code 1 + echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean" + echo "It is possible that the cluster has less OSDs than the replica configuration" + echo "Will refuse to continue" + $container_exec ceph $CEPH_CLI -s + $container_exec ceph $CEPH_CLI osd dump + $container_exec ceph $CEPH_CLI osd tree + $container_exec ceph $CEPH_CLI osd crush rule dump + exit 1 +} + +wait_for_socket_in_container() { + osd_mount_point=$({{ container_binary }} exec "$1" df --output=target | grep '/var/lib/ceph/osd/') + whoami=$({{ container_binary }} exec "$1" cat $osd_mount_point/whoami) + if ! {{ container_binary }} exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/{{ cluster }}-osd.${whoami}.asok ]; do sleep 1 ; done"; then + echo "Timed out while trying to look for a Ceph OSD socket." + echo "Abort mission!" + exit 1 + fi +} + +get_dev_name() { + echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/' +} + +get_container_id_from_dev_name() { + local id + local count + count=10 + while [ $count -ne 0 ]; do + id=$({{ container_binary }} ps | grep -E "${1}$" | cut -d' ' -f1) + test "$id" != "" && break + sleep $DELAY + let count=count-1 + done + echo "$id" +} + +# For containerized deployments, the unit file looks like: ceph-osd@sda.service +# For non-containerized deployments, the unit file looks like: ceph-osd@NNN.service where NNN is OSD ID +for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+).service"); do + # First, restart daemon(s) + systemctl restart "${unit}" + # We need to wait because it may take some time for the socket to actually exists + COUNT=10 + # Wait and ensure the socket exists after restarting the daemon + {% if containerized_deployment | bool %} + osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') + container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}") + container_exec="{{ container_binary }} exec $container_id" + {% else %} + osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+') + {% endif %} + SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok + while [ $COUNT -ne 0 ]; do + RETRIES="{{ handler_health_osd_check_retries }}" + $container_exec test -S "$SOCKET" && check_pgs && continue 2 + sleep $DELAY + let COUNT=COUNT-1 + done + # If we reach this point, it means the socket is not present. + echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running. Showing ceph-osd unit logs now:" + journalctl -u "${unit}" + exit 1 +done diff --git a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 new file mode 100644 index 0000000..3a59841 --- /dev/null +++ b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 @@ -0,0 +1,26 @@ +#!/bin/bash + +RETRIES="{{ handler_health_rbd_mirror_check_retries }}" +DELAY="{{ handler_health_rbd_mirror_check_delay }}" +RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}" +{% if containerized_deployment | bool %} +DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}" +{% endif %} + +# Backward compatibility +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok +$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok + +# First, restart the daemon +systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME} + +# Wait and ensure the socket exists after restarting the daemon +while [ $RETRIES -ne 0 ]; do + $DOCKER_EXEC test -S $SOCKET && exit 0 + sleep $DELAY + let RETRIES=RETRIES-1 +done +# If we reach this point, it means the socket is not present. +echo "Socket file ${SOCKET} could not be found, which means rbd mirror is not running. Showing ceph-rbd-mirror unit logs now:" +journalctl -u ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME} +exit 1 diff --git a/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 new file mode 100644 index 0000000..d7eb36a --- /dev/null +++ b/roles/ceph-handler/templates/restart_rgw_daemon.sh.j2 @@ -0,0 +1,116 @@ +#!/bin/bash + +RETRIES="{{ handler_health_rgw_check_retries }}" +DELAY="{{ handler_health_rgw_check_delay }}" +HOST_NAME="{{ ansible_facts['hostname'] }}" +RGW_NUMS={{ rgw_instances | length | int }} +RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }} +if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then + RGW_PROTOCOL=https +else + RGW_PROTOCOL=http +fi +INSTANCES_NAME=({% for i in rgw_instances %}{{ i.instance_name }} {% endfor %}) +HAPROXY_BACKEND=({% for i in rgw_instances %}{{ i.haproxy_backend | default('rgw-backend') }} {% endfor %}) +RGW_IPS=({% for i in rgw_instances %}{{ i.radosgw_address }} {% endfor %}) +RGW_PORTS=({% for i in rgw_instances %}{{ i.radosgw_frontend_port }} {% endfor %}) +RGW_ZONE="{{ rgw_zone }}" +declare -a DOCKER_EXECS +declare -a SOCKET_PREFIX +for ((i=0; i<${RGW_NUMS}; i++)); do + SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}" + DOCKER_EXECS[i]="" +{% if containerized_deployment | bool %} + DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${RGW_ZONE}-${HOST_NAME}-${INSTANCES_NAME[i]}" +{% endif %} +done + +check_socket() { + local i=$1 + local succ=0 + local count=10 + # Wait and ensure the socket exists after restarting the daemon + while [ $count -ne 0 ]; do + SOCKET=$(grep ${SOCKET_PREFIX[i]} /proc/net/unix | awk '{ print $8 }') + if [ -n "${SOCKET}" ]; then + ${DOCKER_EXECS[i]} test -S ${SOCKET} && succ=$((succ+1)) && break + fi + sleep $DELAY + let count=count-1 + done + if [ $succ -ne 1 ]; then + echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:" + journalctl -u ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]} + exit 1 + fi +} + +check_for_curl_or_wget() { + local i=$1 + url="$RGW_PROTOCOL://${RGW_IPS[i]}:${RGW_PORTS[i]}" + if ${DOCKER_EXECS[i]} command -v wget &>/dev/null; then + rgw_test_result=$(${DOCKER_EXECS[i]} wget --no-check-certificate --tries 1 --quiet --server-response --spider -O /dev/null 2>&1 $url | awk 'NR==1{print $2}') + elif ${DOCKER_EXECS[i]} command -v curl &>/dev/null; then + rgw_test_result=$(${DOCKER_EXECS[i]} curl {{ '-g' if ip_version == 'ipv6' else '' }} -k -w "%{http_code}" --silent --output /dev/null $url) + else + echo "It seems that neither curl nor wget are available on your system." + echo "Cannot test rgw connection." + rgw_test_result=0 + fi +} + +check_rest() { + local i=$1 + local succ=0 + while [ $RETRIES -ne 0 ]; do + check_for_curl_or_wget ${i} + if [ $rgw_test_result -eq 200 ] || [ $rgw_test_result -eq 404 ] || [ $rgw_test_result -eq 405 ]; then + succ=$((succ+1)) + break + fi + sleep $DELAY + let RETRIES=RETRIES-1 + done + if [ $succ -ne 1 ]; then + # If we reach this point, it means there is a problem with the connection to rgw + echo "Error connecting locally to Rados Gateway service: $RGW_PROTOCOL://${RGW_IPS[i]}:${RGW_PORTS[i]}" + exit 1 + fi +} + +for ((i=0; i<${RGW_NUMS}; i++)); do + # Check if systemd unit exists + # This is needed for new instances as the restart might trigger before the deployment + if ! systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]}"; then + echo "Systemd unit ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist." + continue + fi + +{% if handler_rgw_use_haproxy_maintenance %} + # set server weight to 0 on haproxy + echo "set weight ${HAPROXY_BACKEND[i]}/${INSTANCES_NAME[i]} 0" | socat stdio {{ haproxy_socket_path }} + + # wait for the connections to drop + retries={{ handler_rgw_haproxy_maintenance_retries | default(60) }} + while [ $retries -gt 0 ]; do + if [ "$(echo "show servers conn ${HAPROXY_BACKEND[i]}" | socat stdio {{ haproxy_socket_path }} | grep "${HAPROXY_BACKEND[i]}/${INSTANCES_NAME[i]} " | awk '{ print $7 }')" -eq 0 ]; then + break + fi + sleep 1 + let retries=retries-1 + done +{% endif %} + + # Restart the daemon + systemctl restart ceph-radosgw@rgw.${RGW_ZONE}.${HOST_NAME}.${INSTANCES_NAME[i]} + + # Check socket files + check_socket ${i} + # Check rest + check_rest ${i} + +{% if handler_rgw_use_haproxy_maintenance %} + # set server weight to 100 on haproxy + echo "set weight ${HAPROXY_BACKEND[i]}/${INSTANCES_NAME[i]} 100" | socat stdio {{ haproxy_socket_path }} +{% endif %} +done diff --git a/roles/ceph-infra/handlers/main.yml b/roles/ceph-infra/handlers/main.yml new file mode 100644 index 0000000..a2a06d4 --- /dev/null +++ b/roles/ceph-infra/handlers/main.yml @@ -0,0 +1,21 @@ +--- +- name: Disable ntpd + failed_when: false + ansible.builtin.service: + name: '{{ ntp_service_name }}' + state: stopped + enabled: false + +- name: Disable chronyd + failed_when: false + ansible.builtin.service: + name: '{{ chrony_daemon_name }}' + enabled: false + state: stopped + +- name: Disable timesyncd + failed_when: false + ansible.builtin.service: + name: timesyncd + enabled: false + state: stopped diff --git a/roles/ceph-infra/meta/main.yml b/roles/ceph-infra/meta/main.yml new file mode 100644 index 0000000..67bd7ce --- /dev/null +++ b/roles/ceph-infra/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Handles ceph infra requirements (ntp, firewall, ...) + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml new file mode 100644 index 0000000..a47986b --- /dev/null +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -0,0 +1,257 @@ +--- +- name: Check firewalld installation on redhat or SUSE/openSUSE + ansible.builtin.command: rpm -q firewalld # noqa command-instead-of-module + register: firewalld_pkg_query + ignore_errors: true + check_mode: false + changed_when: false + tags: firewall + +- name: Configuring firewalld + when: (firewalld_pkg_query.get('rc', 1) == 0 + or is_atomic | bool) + tags: firewall + block: + - name: Install firewalld python binding + ansible.builtin.package: + name: "python{{ ansible_facts['python']['version']['major'] }}-firewall" + tags: with_pkg + when: not is_atomic | bool + + - name: Start firewalld + ansible.builtin.service: + name: firewalld + state: started + enabled: true + register: result + retries: 5 + delay: 3 + until: result is succeeded + + - name: Open ceph networks on monitor + ansible.posix.firewalld: + zone: "{{ ceph_mon_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - mon_group_name is defined + - mon_group_name in group_names + + - name: Open ceph networks on manager when collocated + ansible.posix.firewalld: + zone: "{{ ceph_mgr_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - mon_group_name is defined + - mon_group_name in group_names + - mgr_group_name | length == 0 + + - name: Open monitor and manager ports + ansible.posix.firewalld: + service: "{{ item.service }}" + zone: "{{ item.zone }}" + permanent: true + immediate: true + state: enabled + with_items: + - { 'service': 'ceph-mon', 'zone': "{{ ceph_mon_firewall_zone }}" } + - { 'service': 'ceph', 'zone': "{{ ceph_mgr_firewall_zone }}" } + when: + - mon_group_name is defined + - mon_group_name in group_names + + - name: Open ceph networks on manager when dedicated + ansible.posix.firewalld: + zone: "{{ ceph_mgr_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - mgr_group_name is defined + - mgr_group_name in group_names + - mgr_group_name | length > 0 + + - name: Open manager ports + ansible.posix.firewalld: + service: ceph + zone: "{{ ceph_mgr_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - mgr_group_name is defined + - mgr_group_name in group_names + + - name: Open ceph networks on osd + ansible.posix.firewalld: + zone: "{{ ceph_osd_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') | union(cluster_network.split(',')) }}" + when: + - osd_group_name is defined + - osd_group_name in group_names + + - name: Open osd ports + ansible.posix.firewalld: + service: ceph + zone: "{{ ceph_osd_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - osd_group_name is defined + - osd_group_name in group_names + + - name: Open ceph networks on rgw + ansible.posix.firewalld: + zone: "{{ ceph_rgw_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - rgw_group_name is defined + - rgw_group_name in group_names + + - name: Open rgw ports + ansible.posix.firewalld: + port: "{{ item.radosgw_frontend_port }}/tcp" + zone: "{{ ceph_rgw_firewall_zone }}" + permanent: true + immediate: true + state: enabled + loop: "{{ rgw_instances }}" + when: + - rgw_group_name is defined + - rgw_group_name in group_names + + - name: Open ceph networks on mds + ansible.posix.firewalld: + zone: "{{ ceph_mds_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - mds_group_name is defined + - mds_group_name in group_names + + - name: Open mds ports + ansible.posix.firewalld: + service: ceph + zone: "{{ ceph_mds_firewall_zone }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - mds_group_name is defined + - mds_group_name in group_names + + - name: Open ceph networks on nfs + ansible.posix.firewalld: + zone: "{{ ceph_nfs_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - nfs_group_name is defined + - nfs_group_name in group_names + + - name: Open nfs ports + ansible.posix.firewalld: + service: nfs + zone: "{{ ceph_nfs_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - nfs_group_name is defined + - nfs_group_name in group_names + + - name: Open nfs ports (portmapper) + ansible.posix.firewalld: + port: "111/tcp" + zone: "{{ ceph_nfs_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - nfs_group_name is defined + - nfs_group_name in group_names + + - name: Open ceph networks on rbdmirror + ansible.posix.firewalld: + zone: "{{ ceph_rbdmirror_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - rbdmirror_group_name is defined + - rbdmirror_group_name in group_names + + - name: Open rbdmirror ports + ansible.posix.firewalld: + service: ceph + zone: "{{ ceph_rbdmirror_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - rbdmirror_group_name is defined + - rbdmirror_group_name in group_names + + - name: Open dashboard ports + ansible.builtin.include_tasks: dashboard_firewall.yml + when: dashboard_enabled | bool + + - name: Open ceph networks on haproxy + ansible.posix.firewalld: + zone: "{{ ceph_rgwloadbalancer_firewall_zone }}" + source: "{{ item }}" + permanent: true + immediate: true + state: enabled + with_items: "{{ public_network.split(',') }}" + when: + - rgwloadbalancer_group_name is defined + - rgwloadbalancer_group_name in group_names + + - name: Open haproxy ports + ansible.posix.firewalld: + port: "{{ haproxy_frontend_port | default(80) }}/tcp" + zone: "{{ ceph_rgwloadbalancer_firewall_zone }}" + permanent: true + immediate: true + state: enabled + when: + - rgwloadbalancer_group_name is defined + - rgwloadbalancer_group_name in group_names + + - name: Add rich rule for keepalived vrrp + ansible.posix.firewalld: + rich_rule: 'rule protocol value="vrrp" accept' + permanent: true + immediate: true + state: enabled + when: + - rgwloadbalancer_group_name is defined + - rgwloadbalancer_group_name in group_names diff --git a/roles/ceph-infra/tasks/dashboard_firewall.yml b/roles/ceph-infra/tasks/dashboard_firewall.yml new file mode 100644 index 0000000..69639a7 --- /dev/null +++ b/roles/ceph-infra/tasks/dashboard_firewall.yml @@ -0,0 +1,70 @@ +--- +- name: Open node_exporter port + ansible.posix.firewalld: + port: "{{ node_exporter_port }}/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + +- name: Open dashboard port in firewalld + when: + - mgr_group_name is defined + - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or + (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names) + block: + - name: Open dashboard port + ansible.posix.firewalld: + port: "{{ dashboard_port }}/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + + - name: Open mgr/prometheus port + ansible.posix.firewalld: + port: "9283/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + +- name: Open monitoring stack tcp ports in firewalld + when: + - monitoring_group_name is defined + - monitoring_group_name in group_names + block: + - name: Open grafana port + ansible.posix.firewalld: + port: "{{ grafana_port }}/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + + - name: Open prometheus port + ansible.posix.firewalld: + port: "{{ prometheus_port }}/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + + - name: Open alertmanager port + ansible.posix.firewalld: + port: "{{ alertmanager_port }}/tcp" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + + - name: Open alertmanager cluster port + ansible.posix.firewalld: + port: "{{ alertmanager_cluster_port }}/{{ item }}" + zone: "{{ ceph_dashboard_firewall_zone }}" + permanent: true + immediate: true + state: enabled + with_items: + - "tcp" + - "udp" diff --git a/roles/ceph-infra/tasks/main.yml b/roles/ceph-infra/tasks/main.yml new file mode 100644 index 0000000..407beec --- /dev/null +++ b/roles/ceph-infra/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Update cache for Debian based OSs + ansible.builtin.apt: + update_cache: true + when: ansible_facts['os_family'] == "Debian" + register: result + until: result is succeeded + tags: package-install + +- name: Include_tasks configure_firewall.yml + ansible.builtin.include_tasks: configure_firewall.yml + when: + - configure_firewall | bool + - ansible_facts['os_family'] in ['RedHat', 'Suse'] + tags: configure_firewall + +- name: Include_tasks setup_ntp.yml + ansible.builtin.include_tasks: setup_ntp.yml + when: ntp_service_enabled | bool + tags: configure_ntp + +- name: Ensure logrotate is installed + ansible.builtin.package: + name: logrotate + state: present + register: result + until: result is succeeded + tags: with_pkg + when: + - not is_atomic | bool + - containerized_deployment | bool + - inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) + +- name: Add logrotate configuration + ansible.builtin.template: + src: logrotate.conf.j2 + dest: /etc/logrotate.d/ceph + mode: "0644" + owner: root + group: root + when: + - containerized_deployment | bool + - inventory_hostname in groups.get(mon_group_name, []) or + inventory_hostname in groups.get(osd_group_name, []) or + inventory_hostname in groups.get(mds_group_name, []) or + inventory_hostname in groups.get(rgw_group_name, []) or + inventory_hostname in groups.get(mgr_group_name, []) or + inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-infra/tasks/setup_ntp.yml b/roles/ceph-infra/tasks/setup_ntp.yml new file mode 100644 index 0000000..92cd79f --- /dev/null +++ b/roles/ceph-infra/tasks/setup_ntp.yml @@ -0,0 +1,68 @@ +--- +- name: Set ntp service and chrony daemon name for Debian family + ansible.builtin.set_fact: + chrony_daemon_name: chrony + ntp_service_name: ntp + when: ansible_facts['os_family'] == 'Debian' + +- name: Set ntp service and chrony daemon name for RedHat and Suse family + ansible.builtin.set_fact: + chrony_daemon_name: chronyd + ntp_service_name: ntpd + when: ansible_facts['os_family'] in ['RedHat', 'Suse'] + +# Installation of NTP daemons needs to be a separate task since installations +# can't happen on Atomic +- name: Install the ntp daemon + when: not is_atomic | bool + block: + - name: Install ntpd + ansible.builtin.package: + name: ntp + state: present + register: result + until: result is succeeded + when: ntp_daemon_type == "ntpd" + + - name: Install chrony + ansible.builtin.package: + name: chrony + state: present + register: result + until: result is succeeded + when: ntp_daemon_type == "chronyd" + +- name: Enable the ntp daemon and disable the rest + block: + - name: Enable timesyncing on timesyncd + ansible.builtin.command: timedatectl set-ntp on + notify: + - Disable ntpd + - Disable chronyd + changed_when: false + when: ntp_daemon_type == "timesyncd" + + - name: Disable time sync using timesyncd if we are not using it + ansible.builtin.command: timedatectl set-ntp no + changed_when: false + when: ntp_daemon_type != "timesyncd" + + - name: Enable ntpd + ansible.builtin.service: + name: "{{ ntp_service_name }}" + enabled: true + state: started + notify: + - Disable chronyd + - Disable timesyncd + when: ntp_daemon_type == "ntpd" + + - name: Enable chronyd + ansible.builtin.service: + name: "{{ chrony_daemon_name }}" + enabled: true + state: started + notify: + - Disable ntpd + - Disable timesyncd + when: ntp_daemon_type == "chronyd" diff --git a/roles/ceph-infra/templates/logrotate.conf.j2 b/roles/ceph-infra/templates/logrotate.conf.j2 new file mode 100644 index 0000000..fa8ce46 --- /dev/null +++ b/roles/ceph-infra/templates/logrotate.conf.j2 @@ -0,0 +1,13 @@ +/var/log/ceph/*.log { + rotate {{ ceph_logrotate_rotate | default(7) }} + {{ ceph_logrotate_frequency | default('daily') }} + compress + copytruncate + sharedscripts + postrotate + killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || pkill -1 -x "ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror" || true + endscript + missingok + notifempty + su root root +} diff --git a/roles/ceph-mds/LICENSE b/roles/ceph-mds/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-mds/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-mds/README.md b/roles/ceph-mds/README.md new file mode 100644 index 0000000..3d0b8c8 --- /dev/null +++ b/roles/ceph-mds/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-mds + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-mds/defaults/main.yml b/roles/ceph-mds/defaults/main.yml new file mode 100644 index 0000000..e3f90b5 --- /dev/null +++ b/roles/ceph-mds/defaults/main.yml @@ -0,0 +1,43 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +# Even though MDS nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on MDS nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + +########## +# DOCKER # +########## + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" +ceph_mds_docker_cpu_limit: 4 + +ceph_config_keys: [] # DON'T TOUCH ME +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_mds_container_params: + volumes: + - /var/lib/ceph/bootstrap-mds:/var/lib/ceph/bootstrap-mds:z + - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}:/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}:z + args: + - -f + - -i={{ ansible_facts['hostname'] }} + + +########### +# SYSTEMD # +########### +# ceph_mds_systemd_overrides will override the systemd settings +# for the ceph-mds services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_mds_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mds/files/ceph-mds.target b/roles/ceph-mds/files/ceph-mds.target new file mode 100644 index 0000000..ae15387 --- /dev/null +++ b/roles/ceph-mds/files/ceph-mds.target @@ -0,0 +1,9 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-mds@.service instances at once +PartOf=ceph.target +After=ceph-mon.target +Before=ceph.target +Wants=ceph.target ceph-mon.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-mds/meta/main.yml b/roles/ceph-mds/meta/main.yml new file mode 100644 index 0000000..24fd45d --- /dev/null +++ b/roles/ceph-mds/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Metadata + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-mds/tasks/common.yml b/roles/ceph-mds/tasks/common.yml new file mode 100644 index 0000000..0ffbb4f --- /dev/null +++ b/roles/ceph-mds/tasks/common.yml @@ -0,0 +1,65 @@ +--- +- name: Create bootstrap-mds and mds directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + with_items: + - /var/lib/ceph/bootstrap-mds/ + - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }} + +- name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _mds_keys + with_items: + - { name: "client.bootstrap-mds", path: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _mds_keys.results }}" + when: + - cephx | bool + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Create mds keyring + ceph_key: + name: "mds.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + user: client.bootstrap-mds + user_key: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring" + caps: + mon: "allow profile mds" + mds: "allow" + osd: "allow rwx" + dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring" + import_key: false + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + when: cephx | bool diff --git a/roles/ceph-mds/tasks/containerized.yml b/roles/ceph-mds/tasks/containerized.yml new file mode 100644 index 0000000..cb49455 --- /dev/null +++ b/roles/ceph-mds/tasks/containerized.yml @@ -0,0 +1,26 @@ +--- +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Enable ceph-mds.target + ansible.builtin.service: + name: ceph-mds.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool + +- name: Systemd start mds container + ansible.builtin.systemd: + name: ceph-mds@{{ ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + daemon_reload: true + +- name: Wait for mds socket to exist + ansible.builtin.command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'" + changed_when: false + register: multi_mds_socket + retries: 5 + delay: 15 + until: multi_mds_socket.rc == 0 diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml new file mode 100644 index 0000000..5bdb80b --- /dev/null +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -0,0 +1,36 @@ +--- +- name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: get_def_crush_rule_name.yml + +- name: Create filesystem pools + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_num: "{{ item.pg_num | default(omit) }}" + pgp_num: "{{ item.pgp_num | default(omit) }}" + size: "{{ item.size | default(omit) }}" + min_size: "{{ item.min_size | default(omit) }}" + pool_type: "{{ item.type | default('replicated') }}" + rule_name: "{{ item.rule_name | default(omit) }}" + erasure_profile: "{{ item.erasure_profile | default(omit) }}" + pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}" + target_size_ratio: "{{ item.target_size_ratio | default(omit) }}" + with_items: "{{ cephfs_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Create ceph filesystem + ceph_fs: + name: "{{ cephfs }}" + cluster: "{{ cluster }}" + data: "{{ cephfs_data_pool.name }}" + metadata: "{{ cephfs_metadata_pool.name }}" + max_mds: "{{ mds_max_mds if not rolling_update | bool else omit }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/roles/ceph-mds/tasks/main.yml b/roles/ceph-mds/tasks/main.yml new file mode 100644 index 0000000..3879409 --- /dev/null +++ b/roles/ceph-mds/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Include create_mds_filesystems.yml + ansible.builtin.include_tasks: create_mds_filesystems.yml + when: + - inventory_hostname == groups[mds_group_name] | first + - not rolling_update | bool + +- name: Include common.yml + ansible.builtin.include_tasks: common.yml + +- name: Non_containerized.yml + ansible.builtin.include_tasks: non_containerized.yml + when: not containerized_deployment | bool + +- name: Containerized.yml + ansible.builtin.include_tasks: containerized.yml + when: containerized_deployment | bool diff --git a/roles/ceph-mds/tasks/non_containerized.yml b/roles/ceph-mds/tasks/non_containerized.yml new file mode 100644 index 0000000..c9d69f9 --- /dev/null +++ b/roles/ceph-mds/tasks/non_containerized.yml @@ -0,0 +1,48 @@ +--- +- name: Install ceph mds for debian + ansible.builtin.apt: + name: ceph-mds + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" + when: + - mds_group_name in group_names + - ansible_facts['os_family'] == 'Debian' + register: result + until: result is succeeded + +- name: Install ceph-mds package on redhat or SUSE/openSUSE + ansible.builtin.package: + name: "ceph-mds" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: + - mds_group_name in group_names + - ansible_facts['os_family'] in ['Suse', 'RedHat'] + +- name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-mds@.service.d/" + mode: "0755" + when: + - ceph_mds_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Add ceph-mds systemd service overrides + openstack.config_template.config_template: + src: "ceph-mds.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-mds@.service.d/ceph-mds-systemd-overrides.conf" + config_overrides: "{{ ceph_mds_systemd_overrides | default({}) }}" + config_type: "ini" + when: + - ceph_mds_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Start and add that the metadata service to the init sequence + ansible.builtin.systemd: + name: ceph-mds@{{ ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + changed_when: false diff --git a/roles/ceph-mds/tasks/systemd.yml b/roles/ceph-mds/tasks/systemd.yml new file mode 100644 index 0000000..4c6296b --- /dev/null +++ b/roles/ceph-mds/tasks/systemd.yml @@ -0,0 +1,16 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-mds.service.j2" + dest: /etc/systemd/system/ceph-mds@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph mdss + +- name: Generate systemd ceph-mds target file + ansible.builtin.copy: + src: ceph-mds.target + dest: /etc/systemd/system/ceph-mds.target + mode: "0644" + when: containerized_deployment | bool diff --git a/roles/ceph-mds/templates/ceph-mds.service.d-overrides.j2 b/roles/ceph-mds/templates/ceph-mds.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-mds/templates/ceph-mds.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-mds/templates/ceph-mds.service.j2 b/roles/ceph-mds/templates/ceph-mds.service.j2 new file mode 100644 index 0000000..91661e1 --- /dev/null +++ b/roles/ceph-mds/templates/ceph-mds.service.j2 @@ -0,0 +1,57 @@ +[Unit] +Description=Ceph MDS +PartOf=ceph-mds.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target +{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %} + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }} +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }} +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }} +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --security-opt label=disable \ + --memory={{ ceph_mds_docker_memory_limit }} \ + --cpus={{ cpu_limit }} \ +{% for v in ceph_common_container_params['volumes'] + ceph_mds_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_mds_container_params['envs'] | default({}))).items() %} + -e {{ k }}={{ v }} \ +{% endfor %} + --name=ceph-mds-{{ ansible_facts['hostname'] }} \ + --entrypoint=/usr/bin/ceph-mds \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ (ceph_common_container_params['args'] + ceph_mds_container_params['args'] | default([])) | join(' ') }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }} +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-mgr/LICENSE b/roles/ceph-mgr/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-mgr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-mgr/README.md b/roles/ceph-mgr/README.md new file mode 100644 index 0000000..453b870 --- /dev/null +++ b/roles/ceph-mgr/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-mgr + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-mgr/defaults/main.yml b/roles/ceph-mgr/defaults/main.yml new file mode 100644 index 0000000..731b7e0 --- /dev/null +++ b/roles/ceph-mgr/defaults/main.yml @@ -0,0 +1,57 @@ +--- +########## +# GLOBAL # +########## +# Even though MGR nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on MGR nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false +mgr_secret: 'mgr_secret' + + +########### +# MODULES # +########### +# Ceph mgr modules to enable, to view the list of available modules see: http://docs.ceph.com/docs/CEPH_VERSION/mgr/ +# and replace CEPH_VERSION with your current Ceph version, e,g: 'mimic' +ceph_mgr_modules: [] + +############ +# PACKAGES # +############ +# Ceph mgr packages to install, ceph-mgr + extra module packages. +ceph_mgr_packages: + - ceph-mgr + + +########## +# DOCKER # +########## + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" +ceph_mgr_docker_cpu_limit: 1 + +ceph_config_keys: [] # DON'T TOUCH ME +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_mgr_container_params: + volumes: + - /var/lib/ceph/mgr:/var/lib/ceph/mgr:z,rshared + - /var/lib/ceph/bootstrap-mgr:/var/lib/ceph/bootstrap-mgr:z + args: + - -f + - -i={{ ansible_facts['hostname'] }} + + +########### +# SYSTEMD # +########### +# ceph_mgr_systemd_overrides will override the systemd settings +# for the ceph-mgr services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_mgr_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mgr/files/ceph-mgr.target b/roles/ceph-mgr/files/ceph-mgr.target new file mode 100644 index 0000000..abb2598 --- /dev/null +++ b/roles/ceph-mgr/files/ceph-mgr.target @@ -0,0 +1,9 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-mgr@.service instances at once +PartOf=ceph.target +After=ceph-mon.target +Before=ceph.target +Wants=ceph.target ceph-mon.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-mgr/meta/main.yml b/roles/ceph-mgr/meta/main.yml new file mode 100644 index 0000000..95b8f79 --- /dev/null +++ b/roles/ceph-mgr/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Manager + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-mgr/tasks/common.yml b/roles/ceph-mgr/tasks/common.yml new file mode 100644 index 0000000..b0f7fa8 --- /dev/null +++ b/roles/ceph-mgr/tasks/common.yml @@ -0,0 +1,100 @@ +--- +- name: Create mgr directory + ansible.builtin.file: + path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }} + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + +- name: Fetch ceph mgr keyring + ceph_key: + name: "mgr.{{ ansible_facts['hostname'] }}" + caps: + mon: allow profile mgr + osd: allow * + mds: allow * + cluster: "{{ cluster }}" + secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)" + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Create and copy keyrings + when: groups.get(mgr_group_name, []) | length > 0 + block: + - name: Create ceph mgr keyring(s) on a mon node + ceph_key: + name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}" + caps: + mon: allow profile mgr + osd: allow * + mds: allow * + cluster: "{{ cluster }}" + secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ groups.get(mgr_group_name, []) }}" + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Set_fact _mgr_keys + ansible.builtin.set_fact: + _mgr_keys: + - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': "{{ copy_admin_key }}" } + - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true } + + - name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _mgr_keys + with_items: "{{ _mgr_keys }}" + delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}" + when: + - cephx | bool + - item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _mgr_keys.results }}" + when: + - cephx | bool + - item is not skipped + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Set mgr key permissions + ansible.builtin.file: + path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + when: cephx | bool + +- name: Append dashboard modules to ceph_mgr_modules + ansible.builtin.set_fact: + ceph_mgr_modules: "{{ ceph_mgr_modules | union(['dashboard', 'prometheus']) }}" + when: dashboard_enabled | bool diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml new file mode 100644 index 0000000..a4caaa3 --- /dev/null +++ b/roles/ceph-mgr/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + when: containerized_deployment | bool + +- name: Include common.yml + ansible.builtin.include_tasks: common.yml + +- name: Include pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml + when: not containerized_deployment | bool + +- name: Include start_mgr.yml + ansible.builtin.include_tasks: start_mgr.yml + +- name: Include mgr_modules.yml + ansible.builtin.include_tasks: mgr_modules.yml + when: + - ceph_mgr_modules | length > 0 + - ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or + (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last)) diff --git a/roles/ceph-mgr/tasks/mgr_modules.yml b/roles/ceph-mgr/tasks/mgr_modules.yml new file mode 100644 index 0000000..6e26d27 --- /dev/null +++ b/roles/ceph-mgr/tasks/mgr_modules.yml @@ -0,0 +1,51 @@ +--- +- name: Wait for all mgr to be up + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" + register: mgr_dump + retries: 30 + delay: 5 + delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false + until: + - mgr_dump.rc == 0 + - (mgr_dump.stdout | from_json).available | bool + when: not ansible_check_mode + +- name: Get enabled modules from ceph-mgr + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" + check_mode: false + changed_when: false + register: _ceph_mgr_modules + delegate_to: "{{ groups[mon_group_name][0] }}" + +- name: Set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict) + ansible.builtin.set_fact: + _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}" + +- name: Set _disabled_ceph_mgr_modules fact + ansible.builtin.set_fact: + _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" + +- name: Disable ceph mgr enabled modules + ceph_mgr_module: + name: "{{ item }}" + cluster: "{{ cluster }}" + state: disable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: item not in ceph_mgr_modules + +- name: Add modules to ceph-mgr + ceph_mgr_module: + name: "{{ item }}" + cluster: "{{ cluster }}" + state: enable + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ ceph_mgr_modules }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) diff --git a/roles/ceph-mgr/tasks/pre_requisite.yml b/roles/ceph-mgr/tasks/pre_requisite.yml new file mode 100644 index 0000000..0f48394 --- /dev/null +++ b/roles/ceph-mgr/tasks/pre_requisite.yml @@ -0,0 +1,45 @@ +--- +- name: Set_fact ceph_mgr_packages for sso + ansible.builtin.set_fact: + ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}" + when: + - dashboard_enabled | bool + - ansible_facts['distribution'] == 'RedHat' + +- name: Set_fact ceph_mgr_packages for dashboard + ansible.builtin.set_fact: + ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-dashboard']) }}" + when: dashboard_enabled | bool + +- name: Set_fact ceph_mgr_packages for non el7 distribution + ansible.builtin.set_fact: + ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}" + when: + - ansible_facts['os_family'] != 'RedHat' + - ansible_facts['distribution_major_version'] | int != 7 + +- name: Enable crb repository + community.general.dnf_config_manager: + name: crb + state: enabled + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] | int == 9 + +- name: Install ceph-mgr packages on RedHat or SUSE + ansible.builtin.package: + name: '{{ ceph_mgr_packages }}' + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: ansible_facts['os_family'] in ['RedHat', 'Suse'] + +- name: Install ceph-mgr packages for debian + ansible.builtin.apt: + name: '{{ ceph_mgr_packages }}' + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" + register: result + until: result is succeeded + when: ansible_facts['os_family'] == 'Debian' + tags: package-install diff --git a/roles/ceph-mgr/tasks/start_mgr.yml b/roles/ceph-mgr/tasks/start_mgr.yml new file mode 100644 index 0000000..ffb1bfd --- /dev/null +++ b/roles/ceph-mgr/tasks/start_mgr.yml @@ -0,0 +1,38 @@ +--- +- name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-mgr@.service.d/" + mode: "0755" + when: + - ceph_mgr_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Add ceph-mgr systemd service overrides + openstack.config_template.config_template: + src: "ceph-mgr.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-mgr@.service.d/ceph-mgr-systemd-overrides.conf" + config_overrides: "{{ ceph_mgr_systemd_overrides | default({}) }}" + config_type: "ini" + when: + - ceph_mgr_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Enable ceph-mgr.target + ansible.builtin.service: + name: ceph-mgr.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool + +- name: Systemd start mgr + ansible.builtin.systemd: + name: ceph-mgr@{{ ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-mgr/tasks/systemd.yml b/roles/ceph-mgr/tasks/systemd.yml new file mode 100644 index 0000000..7ee9f5a --- /dev/null +++ b/roles/ceph-mgr/tasks/systemd.yml @@ -0,0 +1,16 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-mgr.service.j2" + dest: /etc/systemd/system/ceph-mgr@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph mgrs + +- name: Generate systemd ceph-mgr target file + ansible.builtin.copy: + src: ceph-mgr.target + dest: /etc/systemd/system/ceph-mgr.target + mode: "0644" + when: containerized_deployment | bool diff --git a/roles/ceph-mgr/templates/ceph-mgr.service.d-overrides.j2 b/roles/ceph-mgr/templates/ceph-mgr.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-mgr/templates/ceph-mgr.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-mgr/templates/ceph-mgr.service.j2 b/roles/ceph-mgr/templates/ceph-mgr.service.j2 new file mode 100644 index 0000000..f8fac56 --- /dev/null +++ b/roles/ceph-mgr/templates/ceph-mgr.service.j2 @@ -0,0 +1,56 @@ +[Unit] +Description=Ceph Manager +PartOf=ceph-mgr.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }} +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }} +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }} +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --security-opt label=disable \ + --memory={{ ceph_mgr_docker_memory_limit }} \ + --cpus={{ ceph_mgr_docker_cpu_limit }} \ +{% for v in ceph_common_container_params['volumes'] + ceph_mgr_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_mgr_container_params['envs'] | default({}))).items() %} + -e {{ k }}={{ v }} \ +{% endfor %} + --name=ceph-mgr-{{ ansible_facts['hostname'] }} \ + --entrypoint=/usr/bin/ceph-mgr \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ (ceph_common_container_params['args'] + ceph_mgr_container_params['args'] | default([])) | join(' ') }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }} +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-mon/LICENSE b/roles/ceph-mon/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-mon/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-mon/README.md b/roles/ceph-mon/README.md new file mode 100644 index 0000000..f89083b --- /dev/null +++ b/roles/ceph-mon/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-mon + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml new file mode 100644 index 0000000..ec33645 --- /dev/null +++ b/roles/ceph-mon/defaults/main.yml @@ -0,0 +1,69 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### +mon_group_name: mons + +# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT +monitor_secret: "{{ monitor_keyring.stdout }}" +admin_secret: 'admin_secret' + +# Secure your cluster +# This will set the following flags on all the pools: +# * nosizechange +# * nopgchange +# * nodelete + +secure_cluster: false +secure_cluster_flags: + - nopgchange + - nodelete + - nosizechange + +client_admin_ceph_authtool_cap: + mon: allow * + osd: allow * + mds: allow * + mgr: allow * + + +########## +# DOCKER # +########## + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" +ceph_mon_docker_cpu_limit: 1 +ceph_mon_container_listen_port: 3300 + +# Use this variable to modify the configuration to run your mon container. +mon_docker_privileged: false +mon_docker_net_host: true +ceph_config_keys: [] # DON'T TOUCH ME +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_mon_container_params: + volumes: + - /var/lib/ceph/mon:/var/lib/ceph/mon:z,rshared + args: + - -f + - --default-mon-cluster-log-to-file=false + - --default-mon-cluster-log-to-stderr=true + - -i={{ monitor_name }} + - --mon-data=/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} + - --public-addr={{ _monitor_addresses[inventory_hostname] }} + - --mon-initial-members={{ groups[mon_group_name] | map('extract', hostvars, 'ansible_facts') | map(attribute='hostname') | join(',') }} + + +########### +# SYSTEMD # +########### +# ceph_mon_systemd_overrides will override the systemd settings +# for the ceph-mon services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_mon_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mon/files/ceph-mon.target b/roles/ceph-mon/files/ceph-mon.target new file mode 100644 index 0000000..b27d34b --- /dev/null +++ b/roles/ceph-mon/files/ceph-mon.target @@ -0,0 +1,8 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-mon@.service instances at once +PartOf=ceph.target +Before=ceph.target +Wants=ceph.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-mon/meta/main.yml b/roles/ceph-mon/meta/main.yml new file mode 100644 index 0000000..6ade76f --- /dev/null +++ b/roles/ceph-mon/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Monitor + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml new file mode 100644 index 0000000..02d002b --- /dev/null +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -0,0 +1,32 @@ +--- +- name: Waiting for the monitor(s) to form the quorum... + ansible.builtin.command: > + {{ container_exec_cmd }} + ceph + --cluster {{ cluster }} + daemon mon.{{ ansible_facts['hostname'] }} + mon_status + --format json + register: ceph_health_raw + run_once: true + until: > + (ceph_health_raw.stdout | length > 0) and (ceph_health_raw.stdout | default('{}') | from_json)['state'] in ['leader', 'peon'] + retries: "{{ handler_health_mon_check_retries }}" + delay: "{{ handler_health_mon_check_delay }}" + changed_when: false + when: not ansible_check_mode + +- name: Fetch ceph initial keys + ceph_key: + state: fetch_initial_keys + cluster: "{{ cluster }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + CEPH_ROLLING_UPDATE: "{{ rolling_update }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: + - cephx | bool diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml new file mode 100644 index 0000000..d9ff54d --- /dev/null +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -0,0 +1,201 @@ +--- +- name: Cephx related tasks + when: cephx | bool + block: + - name: Check if monitor initial keyring already exists + ceph_key_info: + name: mon. + cluster: "{{ cluster }}" + user: mon. + user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring" + output_format: json + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: initial_mon_key + run_once: true + delegate_to: "{{ running_mon }}" + failed_when: initial_mon_key.rc not in [0, 2] + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: running_mon is defined + + - name: Generate monitor initial keyring + ceph_key: + state: generate_secret + register: monitor_keyring + delegate_to: localhost + become: false + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: + - initial_mon_key is skipped + or + initial_mon_key is not succeeded + + - name: Set_fact _initial_mon_key_success + ansible.builtin.set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success. + _initial_mon_key_success: "{{ initial_mon_key is not skipped and initial_mon_key.rc == 0 }}" + + - name: Get initial keyring when it already exists + ansible.builtin.set_fact: + monitor_keyring: "{{ (initial_mon_key.stdout | from_json)[0]['key'] if _initial_mon_key_success | bool else monitor_keyring.stdout }}" + when: initial_mon_key.stdout|default('')|length > 0 or monitor_keyring is not skipped + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Create monitor initial keyring + ceph_key: + name: mon. + dest: "/var/lib/ceph/tmp/" + secret: "{{ monitor_keyring }}" + cluster: "{{ cluster }}" + caps: + mon: allow * + import_key: false + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + no_log: "{{ no_log_on_ceph_key_tasks }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Copy the initial key in /etc/ceph (for containers) + ansible.builtin.copy: + src: /var/lib/ceph/tmp/{{ cluster }}.mon..keyring + dest: /etc/ceph/{{ cluster }}.mon.keyring + remote_src: true + mode: "0640" + when: containerized_deployment | bool + +- name: Create monitor directory + ansible.builtin.file: + path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + +# We don't do the recursion in the task above to avoid setting `mode` (which +# defaults to 0755) on files. +# +# This is only needed when upgrading from older versions of Ceph that used to +# run as `root` (https://github.com/ceph/ceph-ansible/issues/1635). +- name: Recursively fix ownership of monitor directory + ansible.builtin.file: + path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + recurse: true + +- name: Create admin keyring + ceph_authtool: + name: client.admin + path: "/etc/ceph/{{ cluster }}.client.admin.keyring" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + caps: + mon: allow * + mgr: allow * + osd: allow * + mds: allow * + create_keyring: true + gen_key: "{{ True if admin_secret == 'admin_secret' else omit }}" + add_key: "{{ admin_secret if admin_secret != 'admin_secret' else omit }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: + - cephx | bool + + +- name: Slurp admin keyring + ansible.builtin.slurp: + src: "/etc/ceph/{{ cluster }}.client.admin.keyring" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + register: admin_keyring + +- name: Copy admin keyring over to mons + ansible.builtin.copy: + dest: "{{ admin_keyring.source }}" + content: "{{ admin_keyring.content | b64decode }}" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0600" + delegate_to: "{{ item }}" + loop: "{{ groups[mon_group_name] }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Import admin keyring into mon keyring + ceph_authtool: + path: "/var/lib/ceph/tmp/{{ cluster }}.mon..keyring" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0400" + import_keyring: "/etc/ceph/{{ cluster }}.client.admin.keyring" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: + - cephx | bool + +- name: Set_fact ceph-mon container command + ansible.builtin.set_fact: + ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}" + +- name: Set_fact monmaptool container command + ansible.builtin.set_fact: + ceph_monmaptool_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=monmaptool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'monmaptool' }}" + +- name: Generate initial monmap + ansible.builtin.command: > + {{ ceph_monmaptool_cmd }} + --create + {% for name, addr in _monitor_addresses.items() -%} + --addv + {{ hostvars[name]['ansible_facts']['hostname'] }} + {% if mon_host_v1.enabled | bool %} + {% set _v1 = ',v1:' + addr + mon_host_v1.suffix %} + {% endif %} + [{{ "v2:" + addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] + {# {%- if not loop.last -%},{%- endif %} #} + {%- endfor %} + --enable-all-features + --clobber /etc/ceph/monmap + args: + creates: /etc/ceph/monmap + +- name: Ceph monitor mkfs with keyring + ansible.builtin.command: > + {{ ceph_mon_cmd }} + --cluster {{ cluster }} + --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + --setgroup "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + --mkfs + -i {{ monitor_name }} + --fsid {{ fsid }} + --keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring + --monmap /etc/ceph/monmap + args: + creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring + when: cephx | bool + +- name: Ceph monitor mkfs without keyring + ansible.builtin.command: > + {{ ceph_mon_cmd }} + --cluster {{ cluster }} + --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + --setgroup "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + --mkfs + -i {{ monitor_name }} + --fsid {{ fsid }} + args: + creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db + when: not cephx | bool diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml new file mode 100644 index 0000000..e1d7c9b --- /dev/null +++ b/roles/ceph-mon/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" + when: containerized_deployment | bool + +- name: Include deploy_monitors.yml + ansible.builtin.include_tasks: deploy_monitors.yml + when: + # we test for both container and non-container + - (mon_socket is defined and mon_socket.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0) + - not switch_to_containers | default(False) | bool + +- name: Include start_monitor.yml + ansible.builtin.include_tasks: start_monitor.yml + +- name: Include_tasks ceph_keys.yml + ansible.builtin.include_tasks: ceph_keys.yml + when: not switch_to_containers | default(False) | bool + +- name: Include secure_cluster.yml + ansible.builtin.include_tasks: secure_cluster.yml + when: + - secure_cluster | bool + - inventory_hostname == groups[mon_group_name] | first + +- name: Set cluster configs + ceph_config: + action: set + who: "{{ item.0.key }}" + option: "{{ item.1.key }}" + value: "{{ item.1.value }}" + run_once: true + when: + - item.1.value != omit + loop: "{{ ceph_cluster_conf | dict2dict }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/roles/ceph-mon/tasks/secure_cluster.yml b/roles/ceph-mon/tasks/secure_cluster.yml new file mode 100644 index 0000000..19c3621 --- /dev/null +++ b/roles/ceph-mon/tasks/secure_cluster.yml @@ -0,0 +1,15 @@ +--- +- name: Collect all the pools + ansible.builtin.command: > + {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools + changed_when: false + register: ceph_pools + check_mode: false + +- name: Secure the cluster + ansible.builtin.command: > + {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true + changed_when: false + with_nested: + - "{{ ceph_pools.stdout_lines | default([]) }}" + - "{{ secure_cluster_flags }}" diff --git a/roles/ceph-mon/tasks/start_monitor.yml b/roles/ceph-mon/tasks/start_monitor.yml new file mode 100644 index 0000000..98b6061 --- /dev/null +++ b/roles/ceph-mon/tasks/start_monitor.yml @@ -0,0 +1,33 @@ +--- +- name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-mon@.service.d/" + mode: "0755" + when: + - not containerized_deployment | bool + - ceph_mon_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Add ceph-mon systemd service overrides + openstack.config_template.config_template: + src: "ceph-mon.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-mon@.service.d/ceph-mon-systemd-overrides.conf" + config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}" + config_type: "ini" + when: + - not containerized_deployment | bool + - ceph_mon_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Start the monitor service + ansible.builtin.systemd: + name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-mon/tasks/systemd.yml b/roles/ceph-mon/tasks/systemd.yml new file mode 100644 index 0000000..8ae4ccf --- /dev/null +++ b/roles/ceph-mon/tasks/systemd.yml @@ -0,0 +1,23 @@ +--- +- name: Generate systemd unit file for mon container + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-mon.service.j2" + dest: /etc/systemd/system/ceph-mon@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph mons + +- name: Generate systemd ceph-mon target file + ansible.builtin.copy: + src: ceph-mon.target + dest: /etc/systemd/system/ceph-mon.target + mode: "0644" + when: containerized_deployment | bool + +- name: Enable ceph-mon.target + ansible.builtin.service: + name: ceph-mon.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-mon/templates/ceph-mon.service.d-overrides.j2 b/roles/ceph-mon/templates/ceph-mon.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-mon/templates/ceph-mon.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-mon/templates/ceph-mon.service.j2 b/roles/ceph-mon/templates/ceph-mon.service.j2 new file mode 100644 index 0000000..0f29470 --- /dev/null +++ b/roles/ceph-mon/templates/ceph-mon.service.j2 @@ -0,0 +1,66 @@ +[Unit] +Description=Ceph Monitor +PartOf=ceph-mon.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mon-%i +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mon-%i +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i +ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon' +ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --memory={{ ceph_mon_docker_memory_limit }} \ + --cpus={{ ceph_mon_docker_cpu_limit }} \ + --security-opt label=disable \ +{% for v in ceph_common_container_params['volumes'] + ceph_mon_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% if ansible_facts['os_family'] == 'RedHat' -%} + -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted \ +{% endif -%} +{% if mon_docker_privileged | bool -%} + --privileged \ +{% endif -%} +{% if mon_docker_net_host | bool -%} + --net=host \ +{% endif -%} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_mon_container_params['envs'] | default({}))).items() %} + -e {{ k }}={{ v }} \ +{% endfor %} + --entrypoint=/usr/bin/ceph-mon \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ (ceph_common_container_params['args'] + ceph_mon_container_params['args'] | default([])) | join(' ') }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i +{% endif %} +ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-nfs/LICENSE b/roles/ceph-nfs/LICENSE new file mode 100644 index 0000000..4953f91 --- /dev/null +++ b/roles/ceph-nfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2016] [Red Hat, Inc.] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-nfs/README.md b/roles/ceph-nfs/README.md new file mode 100644 index 0000000..b58db56 --- /dev/null +++ b/roles/ceph-nfs/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-nfs + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml new file mode 100644 index 0000000..5cfbe22 --- /dev/null +++ b/roles/ceph-nfs/defaults/main.yml @@ -0,0 +1,122 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +# Even though NFS nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on RGW nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + +# Whether docker container or systemd service should be enabled +# and started, it's useful to set it to false if nfs-ganesha +# service is managed by pacemaker +ceph_nfs_enable_service: true + +# ceph-nfs systemd service uses ansible's hostname as an instance id, +# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not +# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in +# such case it's better to have constant instance id instead which +# can be set by 'ceph_nfs_service_suffix' +# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}" + +###################### +# NFS Ganesha Config # +###################### +ceph_nfs_log_file: "/var/log/ganesha/ganesha.log" +ceph_nfs_dynamic_exports: false +# If set to true then rados is used to store ganesha exports +# and client sessions information, this is useful if you +# run multiple nfs-ganesha servers in active/passive mode and +# want to do failover +ceph_nfs_rados_backend: false +# Name of the rados object used to store a list of the export rados +# object URLS +ceph_nfs_rados_export_index: "ganesha-export-index" +# Address ganesha service should listen on, by default ganesha listens on all +# addresses. (Note: ganesha ignores this parameter in current version due to +# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217) +# ceph_nfs_bind_addr: 0.0.0.0 + +# If set to true, then ganesha's attribute and directory caching is disabled +# as much as possible. Currently, ganesha caches by default. +# When using ganesha as CephFS's gateway, it is recommended to turn off +# ganesha's caching as the libcephfs clients also cache the same information. +# Note: Irrespective of this option's setting, ganesha's caching is disabled +# when setting 'nfs_file_gw' option as true. +ceph_nfs_disable_caching: false + +# This is the file ganesha will use to control NFSv4 ID mapping +ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf" + +# idmap configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# Example: +# idmap_conf_overrides: +# General: +# Domain: foo.domain.net +idmap_conf_overrides: {} + +#################### +# FSAL Ceph Config # +#################### +ceph_nfs_ceph_export_id: 20133 +ceph_nfs_ceph_pseudo_path: "/cephfile" +ceph_nfs_ceph_protocols: "3,4" +ceph_nfs_ceph_access_type: "RW" +ceph_nfs_ceph_user: "admin" +ceph_nfs_ceph_squash: "Root_Squash" +ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p" + +################### +# FSAL RGW Config # +################### +ceph_nfs_rgw_export_id: 20134 +ceph_nfs_rgw_pseudo_path: "/cephobject" +ceph_nfs_rgw_protocols: "3,4" +ceph_nfs_rgw_access_type: "RW" +ceph_nfs_rgw_user: "cephnfs" +ceph_nfs_rgw_squash: "Root_Squash" +ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" +# Note: keys are optional and can be generated, but not on containerized, where +# they must be configered. +# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" +rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} + +################### +# CONFIG OVERRIDE # +################### + +# Ganesha configuration file override. +# These multiline strings will be appended to the contents of the blocks in ganesha.conf and +# must be in the correct ganesha.conf format seen here: +# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example +# +# Example: +# CACHEINODE { + # Entries_HWMark = 100000; +# } +# +# ganesha_core_param_overrides: +# ganesha_ceph_export_overrides: +# ganesha_rgw_export_overrides: +# ganesha_rgw_section_overrides: +# ganesha_log_overrides: +# ganesha_conf_overrides: | +# CACHEINODE { + # Entries_HWMark = 100000; +# } + +########## +# DOCKER # +########## + +ceph_docker_image: "ceph/daemon" +ceph_docker_image_tag: latest +ceph_nfs_docker_extra_env: +ceph_config_keys: [] # DON'T TOUCH ME diff --git a/roles/ceph-nfs/meta/main.yml b/roles/ceph-nfs/meta/main.yml new file mode 100644 index 0000000..53a6746 --- /dev/null +++ b/roles/ceph-nfs/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Daniel Gryniewicz + description: Installs Ceph NFS Gateway + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml new file mode 100644 index 0000000..587e3b2 --- /dev/null +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -0,0 +1,23 @@ +--- +- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}" + radosgw_user: + name: "{{ ceph_nfs_rgw_user }}" + cluster: "{{ cluster }}" + display_name: "RGW NFS User" + access_key: "{{ ceph_nfs_rgw_access_key | default(omit) }}" + secret_key: "{{ ceph_nfs_rgw_secret_key | default(omit) }}" + run_once: true + register: rgw_nfs_user + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: nfs_obj_gw | bool + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key + ansible.builtin.set_fact: + ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}" + ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml new file mode 100644 index 0000000..acec885 --- /dev/null +++ b/roles/ceph-nfs/tasks/main.yml @@ -0,0 +1,96 @@ +--- +# global/common requirement +- name: Stop nfs server service + ansible.builtin.systemd: + name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}" + state: stopped + enabled: false + failed_when: false + +- name: Include pre_requisite_non_container.yml + ansible.builtin.include_tasks: pre_requisite_non_container.yml + when: not containerized_deployment | bool + +- name: Include pre_requisite_container.yml + ansible.builtin.include_tasks: pre_requisite_container.yml + when: containerized_deployment | bool + +- name: Set_fact _rgw_hostname + ansible.builtin.set_fact: + _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}" + +- name: Set rgw parameter (log file) + ceph_config: + action: set + who: "client.rgw.{{ _rgw_hostname }}" + option: "log file" + value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ groups.get('nfss', []) }}" + +- name: Include create_rgw_nfs_user.yml + ansible.builtin.import_tasks: create_rgw_nfs_user.yml + when: groups.get(mon_group_name, []) | length > 0 + +- name: Install nfs-ganesha-selinux on RHEL 8 + ansible.builtin.package: + name: nfs-ganesha-selinux + state: present + register: result + until: result is succeeded + when: + - not containerized_deployment | bool + - inventory_hostname in groups.get(nfs_group_name, []) + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['distribution_major_version'] == '8' + +# NOTE (leseb): workaround for issues with ganesha and librgw +- name: Add ganesha_t to permissive domain + community.general.selinux_permissive: + name: ganesha_t + permissive: true + failed_when: false + when: + - not containerized_deployment | bool + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['selinux']['status'] == 'enabled' + +- name: Nfs with external ceph cluster task related + when: + - groups.get(mon_group_name, []) | length == 0 + - ceph_nfs_ceph_user is defined + block: + - name: Create keyring directory + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0755" + with_items: + - "{{ ceph_nfs_ceph_user }}" + - "{{ ansible_facts['hostname'] }}" + + - name: Set_fact rgw_client_name + ansible.builtin.set_fact: + rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}" + + - name: Get client cephx keys + ansible.builtin.copy: + dest: "{{ item.1 }}" + content: "{{ item.0.content | b64decode }}" + mode: "{{ item.0.item.get('mode', '0600') }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + with_nested: + - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}" + - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"] + when: + - not item.0.get('skipped', False) + - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Include start_nfs.yml + ansible.builtin.import_tasks: start_nfs.yml diff --git a/roles/ceph-nfs/tasks/pre_requisite_container.yml b/roles/ceph-nfs/tasks/pre_requisite_container.yml new file mode 100644 index 0000000..747e09d --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_container.yml @@ -0,0 +1,108 @@ +--- +- name: Keyring related tasks + when: groups.get(mon_group_name, []) | length > 0 + block: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + + - name: Create directories + ansible.builtin.file: + path: "{{ item.0 }}" + state: "directory" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0755" + delegate_to: "{{ item.1 }}" + with_nested: + - ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", + "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"] + - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"] + + - name: Set_fact keyrings_list + ansible.builtin.set_fact: + keyrings_list: + - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} } + - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} } + + - name: Create keyrings from a monitor + ceph_key: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + dest: "{{ item.path }}" + caps: "{{ item.caps }}" + import_key: true + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "0600" + no_log: "{{ no_log_on_ceph_key_tasks }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + loop: "{{ keyrings_list }}" + when: + - cephx | bool + - item.create | default(False) | bool + + - name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _rgw_keys + loop: "{{ keyrings_list }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | default(True) | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Debug + ansible.builtin.debug: + msg: "{{ _rgw_keys }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item.item.copy_key | default(True) | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Dbus related tasks + when: ceph_nfs_dynamic_exports | bool + block: + - name: Get file + ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf" + register: dbus_ganesha_file + run_once: true + changed_when: false + + - name: Create dbus service file + ansible.builtin.copy: + content: "{{ dbus_ganesha_file.stdout }}" + dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf + owner: "root" + group: "root" + mode: "0644" + + - name: Reload dbus configuration + ansible.builtin.command: "killall -SIGHUP dbus-daemon" + changed_when: false diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml new file mode 100644 index 0000000..95385b7 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -0,0 +1,96 @@ +--- +- name: Include red hat based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml + when: ansible_facts['os_family'] == 'RedHat' + +- name: Include debian based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml + when: ansible_facts['os_family'] == 'Debian' + +- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE + community.general.zypper: + name: "{{ item.name }}" + disable_gpg_check: true + with_items: + - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" } + - { name: 'radosgw', install: "{{ nfs_obj_gw }}" } + - { name: 'nfs-ganesha-ceph', install: "{{ nfs_file_gw }}" } + when: + - (ceph_origin == 'repository' or ceph_origin == 'distro') + - ansible_facts['os_family'] == 'Suse' + - item.install | bool + register: result + until: result is succeeded + +# NOTE (leseb): we use root:ceph for permissions since ganesha +# does not have the right selinux context to read ceph directories. +- name: Create rados gateway and ganesha directories + ansible.builtin.file: + path: "{{ item.name }}" + state: directory + owner: "{{ item.owner | default('ceph') }}" + group: "{{ item.group | default('ceph') }}" + mode: "{{ ceph_directories_mode }}" + with_items: + - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" } + - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" } + - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" } + - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" } + - { name: "/var/log/ceph", create: true } + - { name: "/var/log/ganesha", create: true, owner: root, group: root } + - { name: "/var/run/ceph", create: true } + when: item.create | bool + +- name: Cephx related tasks + when: + - cephx | bool + - groups.get(mon_group_name, []) | length > 0 + block: + - name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + register: _rgw_keys + with_items: + - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Nfs object gateway related tasks + when: nfs_obj_gw | bool + block: + - name: Create rados gateway keyring + ceph_key: + name: "client.rgw.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + user: client.bootstrap-rgw + user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" + caps: + mon: "allow rw" + osd: "allow rwx" + dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring" + import_key: false + owner: ceph + group: ceph + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml new file mode 100644 index 0000000..b0848f8 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml @@ -0,0 +1,80 @@ +--- +- name: Debian based systems - repo handling + when: ceph_origin == 'repository' + block: + - name: Stable repos specific tasks + when: + - nfs_ganesha_stable | bool + - ceph_repository == 'community' + block: + - name: Add nfs-ganesha stable repository + ansible.builtin.apt_repository: + repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" + state: present + update_cache: false + register: add_ganesha_apt_repo + + - name: Add libntirpc stable repository + ansible.builtin.apt_repository: + repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" + state: present + update_cache: false + register: add_libntirpc_apt_repo + when: libntirpc_stable_deb_repo is defined + + - name: Add nfs-ganesha ppa apt key + ansible.builtin.apt_key: + keyserver: "{{ nfs_ganesha_apt_keyserver }}" + id: "{{ nfs_ganesha_apt_key_id }}" + when: + - nfs_ganesha_apt_key_id is defined + - nfs_ganesha_apt_keyserver is defined + + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + register: update_ganesha_apt_cache + retries: 5 + delay: 2 + until: update_ganesha_apt_cache is success + when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed + + - name: Debian based systems - dev repos specific tasks + when: + - nfs_ganesha_dev | bool + - ceph_repository == 'dev' + block: + - name: Fetch nfs-ganesha development repository + ansible.builtin.uri: + url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" + return_content: true + register: nfs_ganesha_dev_apt_repo + + - name: Add nfs-ganesha development repository + ansible.builtin.copy: + content: "{{ nfs_ganesha_dev_apt_repo.content }}" + dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list + owner: root + group: root + backup: true + mode: "0644" + +- name: Debain based systems - install required packages + block: + - name: Debian based systems + when: ceph_origin == 'repository' or ceph_origin == 'distro' + block: + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: + name: ['nfs-ganesha-rgw', 'radosgw'] + allow_unauthenticated: true + register: result + until: result is succeeded + when: nfs_obj_gw | bool + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: + name: nfs-ganesha-ceph + allow_unauthenticated: true + register: result + until: result is succeeded + when: nfs_file_gw | bool diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml new file mode 100644 index 0000000..92a4448 --- /dev/null +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml @@ -0,0 +1,43 @@ +--- +- name: Red hat based systems - repo handling + when: ceph_origin == 'repository' + block: + - name: Red hat based systems - stable repo related tasks + when: + - nfs_ganesha_stable | bool + - ceph_repository == 'community' + block: + - name: Add nfs-ganesha stable repository + ansible.builtin.package: + name: "{{ centos_release_nfs }}" + state: present + + - name: Red hat based systems - dev repo related tasks + when: + - nfs_ganesha_dev | bool + - ceph_repository == 'dev' + block: + - name: Add nfs-ganesha dev repo + ansible.builtin.get_url: + url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" + dest: /etc/yum.repos.d/nfs-ganesha-dev.repo + mode: "0644" + force: true + +- name: Red hat based systems - install nfs packages + block: + - name: Install nfs cephfs gateway + ansible.builtin.package: + name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace'] + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: nfs_file_gw | bool + + - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages + ansible.builtin.package: + name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw'] + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" + register: result + until: result is succeeded + when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml new file mode 100644 index 0000000..45e7a26 --- /dev/null +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -0,0 +1,105 @@ +--- +- name: Nfs various pre-requisites tasks + block: + - name: Set_fact exec_cmd_nfs - external + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring" + delegate_node: "{{ inventory_hostname }}" + when: groups.get(mon_group_name, []) | length == 0 + + - name: Set_fact exec_cmd_nfs - internal + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados" + delegate_node: "{{ groups[mon_group_name][0] }}" + when: groups.get(mon_group_name, []) | length > 0 + + - name: Check if rados index object exists + ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}" + changed_when: false + failed_when: false + register: rados_index_exists + check_mode: false + when: ceph_nfs_rados_backend | bool + delegate_to: "{{ delegate_node }}" + run_once: true + + - name: Create an empty rados index object + ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" + when: + - ceph_nfs_rados_backend | bool + - rados_index_exists.rc != 0 + delegate_to: "{{ delegate_node }}" + changed_when: false + run_once: true + +- name: Create /etc/ganesha + ansible.builtin.file: + path: /etc/ganesha + state: directory + owner: root + group: root + mode: "0755" + +- name: Generate ganesha configuration file + ansible.builtin.template: + src: "ganesha.conf.j2" + dest: /etc/ganesha/ganesha.conf + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph nfss + +- name: Generate ganesha idmap.conf file + openstack.config_template.config_template: + src: "idmap.conf.j2" + dest: "{{ ceph_nfs_idmap_conf }}" + owner: "root" + group: "root" + mode: "0644" + config_overrides: "{{ idmap_conf_overrides }}" + config_type: ini + notify: Restart ceph nfss + +- name: Create exports directory + ansible.builtin.file: + path: /etc/ganesha/export.d + state: directory + owner: "root" + group: "root" + mode: "0755" + when: ceph_nfs_dynamic_exports | bool + +- name: Create exports dir index file + ansible.builtin.copy: + content: "" + force: false + dest: /etc/ganesha/export.d/INDEX.conf + owner: "root" + group: "root" + mode: "0644" + when: ceph_nfs_dynamic_exports | bool + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Systemd start nfs container + ansible.builtin.systemd: + name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} + state: started + enabled: true + masked: false + daemon_reload: true + when: + - containerized_deployment | bool + - ceph_nfs_enable_service | bool + +- name: Start nfs gateway service + ansible.builtin.systemd: + name: nfs-ganesha + state: started + enabled: true + masked: false + when: + - not containerized_deployment | bool + - ceph_nfs_enable_service | bool diff --git a/roles/ceph-nfs/tasks/systemd.yml b/roles/ceph-nfs/tasks/systemd.yml new file mode 100644 index 0000000..1534cf4 --- /dev/null +++ b/roles/ceph-nfs/tasks/systemd.yml @@ -0,0 +1,9 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-nfs.service.j2" + dest: /etc/systemd/system/ceph-nfs@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph nfss diff --git a/roles/ceph-nfs/templates/ceph-nfs.service.j2 b/roles/ceph-nfs/templates/ceph-nfs.service.j2 new file mode 100644 index 0000000..663faed --- /dev/null +++ b/roles/ceph-nfs/templates/ceph-nfs.service.j2 @@ -0,0 +1,56 @@ +[Unit] +Description=NFS-Ganesha file server +Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph /var/log/ganesha +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i +ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +-v /etc/ceph:/etc/ceph:z \ +-v /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:z \ +-v /var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring:/etc/ceph/keyring:z \ +-v /etc/ganesha:/etc/ganesha:z \ +-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \ +-v /var/run/ceph:/var/run/ceph:z \ +-v /var/log/ceph:/var/log/ceph:z \ +-v /var/log/ganesha:/var/log/ganesha:z \ +-v /etc/localtime:/etc/localtime:ro \ +{{ ceph_nfs_docker_extra_env }} \ +--entrypoint=/usr/bin/ganesha.nfsd \ +--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ +-F -L STDOUT +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-nfs/templates/ganesha.conf.j2 b/roles/ceph-nfs/templates/ganesha.conf.j2 new file mode 100644 index 0000000..7e6fab6 --- /dev/null +++ b/roles/ceph-nfs/templates/ganesha.conf.j2 @@ -0,0 +1,124 @@ +#jinja2: trim_blocks: "true", lstrip_blocks: "true" +# {{ ansible_managed }} + +{% if ceph_nfs_dynamic_exports | bool and not ceph_nfs_rados_backend | bool %} +%include /etc/ganesha/export.d/INDEX.conf +{% endif %} + +NFS_Core_Param +{ +{% if ceph_nfs_bind_addr is defined %} + Bind_Addr={{ ceph_nfs_bind_addr }}; +{% endif %} +{{ ganesha_core_param_overrides | default(None) }} +} + +{% if ceph_nfs_disable_caching | bool or nfs_file_gw | bool %} +EXPORT_DEFAULTS { + Attr_Expiration_Time = 0; +} + +CACHEINODE { + Dir_Chunk = 0; + + NParts = 1; + Cache_Size = 1; +} +{% endif %} + +{% if ceph_nfs_rados_backend | bool %} +RADOS_URLS { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; +} +%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }} + +NFSv4 { + RecoveryBackend = 'rados_kv'; + IdmapConf = "{{ ceph_nfs_idmap_conf }}"; +} +RADOS_KV { + ceph_conf = '/etc/ceph/{{ cluster }}.conf'; + userid = "{{ ceph_nfs_ceph_user }}"; + pool = "{{ cephfs_data_pool.name }}"; +} +{% endif %} + +{% if nfs_file_gw | bool %} +EXPORT +{ + Export_id={{ ceph_nfs_ceph_export_id }}; + + Path = "/"; + + Pseudo = {{ ceph_nfs_ceph_pseudo_path }}; + + Access_Type = {{ ceph_nfs_ceph_access_type }}; + + Protocols = {{ ceph_nfs_ceph_protocols }}; + + Transports = TCP; + + SecType = {{ ceph_nfs_ceph_sectype }}; + + Squash = {{ ceph_nfs_ceph_squash }}; + + Attr_Expiration_Time = 0; + + FSAL { + Name = CEPH; + User_Id = "{{ ceph_nfs_ceph_user }}"; + } + + {{ ganesha_ceph_export_overrides | default(None) }} +} +{% endif %} +{% if nfs_obj_gw | bool %} +EXPORT +{ + Export_id={{ ceph_nfs_rgw_export_id }}; + + Path = "/"; + + Pseudo = {{ ceph_nfs_rgw_pseudo_path }}; + + Access_Type = {{ ceph_nfs_rgw_access_type }}; + + Protocols = {{ ceph_nfs_rgw_protocols }}; + + Transports = TCP; + + SecType = {{ ceph_nfs_rgw_sectype }}; + + Squash = {{ ceph_nfs_rgw_squash }}; + + FSAL { + Name = RGW; + User_Id = "{{ ceph_nfs_rgw_user }}"; + Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}"; + Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}"; + } + + {{ ganesha_rgw_export_overrides | default(None) }} + +} + +RGW { + ceph_conf = "/etc/ceph/{{ cluster }}.conf"; + cluster = "{{ cluster }}"; + name = "{{ rgw_client_name }}"; + {{ ganesha_rgw_section_overrides | default(None) }} +} +{% endif %} + +LOG { + Facility { + name = FILE; + destination = "{{ ceph_nfs_log_file }}"; + enable = active; + } + + {{ ganesha_log_overrides | default(None) }} +} + +{{ ganesha_conf_overrides | default(None) }} diff --git a/roles/ceph-nfs/templates/idmap.conf.j2 b/roles/ceph-nfs/templates/idmap.conf.j2 new file mode 100644 index 0000000..d052232 --- /dev/null +++ b/roles/ceph-nfs/templates/idmap.conf.j2 @@ -0,0 +1,137 @@ +[General] +#Verbosity = 0 +# The following should be set to the local NFSv4 domain name +# The default is the host's DNS domain name. +#Domain = local.domain.edu + +# In multi-domain environments, some NFS servers will append the identity +# management domain to the owner and owner_group in lieu of a true NFSv4 +# domain. This option can facilitate lookups in such environments. If +# set to a value other than "none", the nsswitch plugin will first pass +# the name to the password/group lookup function without stripping the +# domain off. If that mapping fails then the plugin will try again using +# the old method (comparing the domain in the string to the Domain value, +# stripping it if it matches, and passing the resulting short name to the +# lookup function). Valid values are "user", "group", "both", and +# "none". The default is "none". +#No-Strip = none + +# Winbind has a quirk whereby doing a group lookup in UPN format +# (e.g. staff@americas.example.com) will cause the group to be +# displayed prefixed with the full domain in uppercase +# (e.g. AMERICAS.EXAMPLE.COM\staff) instead of in the familiar netbios +# name format (e.g. AMERICAS\staff). Setting this option to true +# causes the name to be reformatted before passing it to the group +# lookup function in order to work around this. This setting is +# ignored unless No-Strip is set to either "both" or "group". +# The default is "false". +#Reformat-Group = false + +# The following is a comma-separated list of Kerberos realm +# names that should be considered to be equivalent to the +# local realm, such that @REALM.A can be assumed to +# be the same user as @REALM.B +# If not specified, the default local realm is the domain name, +# which defaults to the host's DNS domain name, +# translated to upper-case. +# Note that if this value is specified, the local realm name +# must be included in the list! +#Local-Realms = + +[Mapping] + +#Nobody-User = nobody +#Nobody-Group = nobody + +[Translation] + +# Translation Method is an comma-separated, ordered list of +# translation methods that can be used. Distributed methods +# include "nsswitch", "umich_ldap", and "static". Each method +# is a dynamically loadable plugin library. +# New methods may be defined and inserted in the list. +# The default is "nsswitch". +#Method = nsswitch + +# Optional. This is a comma-separated, ordered list of +# translation methods to be used for translating GSS +# authenticated names to ids. +# If this option is omitted, the same methods as those +# specified in "Method" are used. +#GSS-Methods = + +#-------------------------------------------------------------------# +# The following are used only for the "static" Translation Method. +#-------------------------------------------------------------------# +[Static] + +# A "static" list of GSS-Authenticated names to +# local user name mappings + +#someuser@REALM = localuser + + +#-------------------------------------------------------------------# +# The following are used only for the "umich_ldap" Translation Method. +#-------------------------------------------------------------------# + +[UMICH_SCHEMA] + +# server information (REQUIRED) +LDAP_server = ldap-server.local.domain.edu + +# the default search base (REQUIRED) +LDAP_base = dc=local,dc=domain,dc=edu + +#-----------------------------------------------------------# +# The remaining options have defaults (as shown) +# and are therefore not required. +#-----------------------------------------------------------# + +# whether or not to perform canonicalization on the +# name given as LDAP_server +#LDAP_canonicalize_name = true + +# absolute search base for (people) accounts +#LDAP_people_base = + +# absolute search base for groups +#LDAP_group_base = + +# Set to true to enable SSL - anything else is not enabled +#LDAP_use_ssl = false + +# You must specify a CA certificate location if you enable SSL +#LDAP_ca_cert = /etc/ldapca.cert + +# Objectclass mapping information + +# Mapping for the person (account) object class +#NFSv4_person_objectclass = NFSv4RemotePerson + +# Mapping for the nfsv4name attribute the person object +#NFSv4_name_attr = NFSv4Name + +# Mapping for the UID number +#NFSv4_uid_attr = UIDNumber + +# Mapping for the GSSAPI Principal name +#GSS_principal_attr = GSSAuthName + +# Mapping for the account name attribute (usually uid) +# The value for this attribute must match the value of +# the group member attribute - NFSv4_member_attr +#NFSv4_acctname_attr = uid + +# Mapping for the group object class +#NFSv4_group_objectclass = NFSv4RemoteGroup + +# Mapping for the GID attribute +#NFSv4_gid_attr = GIDNumber + +# Mapping for the Group NFSv4 name +#NFSv4_group_attr = NFSv4Name + +# Mapping for the Group member attribute (usually memberUID) +# The value of this attribute must match the value of NFSv4_acctname_attr +#NFSv4_member_attr = memberUID \ No newline at end of file diff --git a/roles/ceph-nfs/templates/systemd-run.j2 b/roles/ceph-nfs/templates/systemd-run.j2 new file mode 100644 index 0000000..868cd19 --- /dev/null +++ b/roles/ceph-nfs/templates/systemd-run.j2 @@ -0,0 +1,27 @@ +#!/bin/sh +T=$1 +N=$2 + +# start nfs-ganesha +/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +-v /var/lib/ceph:/var/lib/ceph:z \ +-v /etc/ceph:/etc/ceph:z \ +-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \ +-v /etc/ganesha:/etc/ganesha:z \ +-v /var/run/ceph:/var/run/ceph:z \ +-v /var/log/ceph:/var/log/ceph:z \ +-v /var/log/ganesha:/var/log/ganesha:z \ +{% if ceph_nfs_dynamic_exports | bool %} +--privileged \ +-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \ +{% endif -%} +-v /etc/localtime:/etc/localtime:ro \ +{{ ceph_nfs_docker_extra_env }} \ +--entrypoint=/usr/bin/ganesha.nfsd \ +--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} +-F -L STDOUT "${GANESHA_EPOCH}" diff --git a/roles/ceph-node-exporter/meta/main.yml b/roles/ceph-node-exporter/meta/main.yml new file mode 100644 index 0000000..8357e97 --- /dev/null +++ b/roles/ceph-node-exporter/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Boris Ranto + description: Configures Prometheus Node Exporter + license: Apache + min_ansible_version: '2.4' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-node-exporter/tasks/main.yml b/roles/ceph-node-exporter/tasks/main.yml new file mode 100644 index 0000000..251ffe7 --- /dev/null +++ b/roles/ceph-node-exporter/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml diff --git a/roles/ceph-node-exporter/tasks/setup_container.yml b/roles/ceph-node-exporter/tasks/setup_container.yml new file mode 100644 index 0000000..7ab311d --- /dev/null +++ b/roles/ceph-node-exporter/tasks/setup_container.yml @@ -0,0 +1,11 @@ +--- +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Start the node_exporter service + ansible.builtin.systemd: + name: node_exporter + state: started + enabled: true + daemon_reload: true + failed_when: false diff --git a/roles/ceph-node-exporter/tasks/systemd.yml b/roles/ceph-node-exporter/tasks/systemd.yml new file mode 100644 index 0000000..0581b54 --- /dev/null +++ b/roles/ceph-node-exporter/tasks/systemd.yml @@ -0,0 +1,8 @@ +--- +- name: Ship systemd service + ansible.builtin.template: + src: node_exporter.service.j2 + dest: "/etc/systemd/system/node_exporter.service" + owner: root + group: root + mode: "0644" diff --git a/roles/ceph-node-exporter/templates/node_exporter.service.j2 b/roles/ceph-node-exporter/templates/node_exporter.service.j2 new file mode 100644 index 0000000..8641c57 --- /dev/null +++ b/roles/ceph-node-exporter/templates/node_exporter.service.j2 @@ -0,0 +1,51 @@ +# This file is managed by ansible, don't make changes here - they will be +# overwritten. +[Unit] +Description=Node Exporter +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage node-exporter +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f node-exporter +ExecStart=/usr/bin/{{ container_binary }} run --rm --name=node-exporter \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --privileged \ + --security-opt label=disable \ + -v /:/rootfs:ro \ + --net=host \ + {{ node_exporter_container_image }} \ + --path.procfs=/rootfs/proc \ + --path.sysfs=/rootfs/sys \ + --path.rootfs=/rootfs \ + --no-collector.timex \ + --web.listen-address=:{{ node_exporter_port }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop node-exporter +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-osd/LICENSE b/roles/ceph-osd/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-osd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-osd/README.md b/roles/ceph-osd/README.md new file mode 100644 index 0000000..3ec1670 --- /dev/null +++ b/roles/ceph-osd/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-osd + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml new file mode 100644 index 0000000..629400f --- /dev/null +++ b/roles/ceph-osd/defaults/main.yml @@ -0,0 +1,218 @@ +--- +########### +# GENERAL # +########### + +# Even though OSD nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on OSD nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + + +############## +# CEPH OPTIONS +############## + +# Devices to be used as OSDs +# You can pre-provision disks that are not present yet. +# Ansible will just skip them. Newly added disk will be +# automatically configured during the next run. +# + + +# Declare devices to be used as OSDs +# All scenario(except 3rd) inherit from the following device declaration +# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs + +# devices: +# - /dev/sdb +# - /dev/sdc +# - /dev/sdd +# - /dev/sde + +devices: [] + +# Declare devices to be used as block.db devices + +# dedicated_devices: +# - /dev/sdx +# - /dev/sdy + +dedicated_devices: [] + +# Declare devices to be used as block.wal devices + +# bluestore_wal_devices: +# - /dev/nvme0n1 +# - /dev/nvme0n2 + +bluestore_wal_devices: [] + +# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. +# Device discovery is based on the Ansible fact 'ansible_facts["devices"]' +# which reports all the devices on a system. If chosen, all the disks +# found will be passed to ceph-volume lvm batch. You should not be worried on using +# this option since ceph-volume has a built-in check which looks for empty devices. +# Thus devices with existing partition tables will not be used. +# +osd_auto_discovery: false + +# Encrypt your OSD device using dmcrypt +# If set to True, no matter which osd_objecstore you use the data will be encrypted +dmcrypt: false + +# Use ceph-volume to create OSDs from logical volumes. +# lvm_volumes is a list of dictionaries. +# +# Filestore: Each dictionary must contain a data, journal and vg_name key. Any +# logical volume or logical group used must be a name and not a path. data +# can be a logical volume, device or partition. journal can be either a lv or partition. +# You can not use the same journal for many data lvs. +# data_vg must be the volume group name of the data lv, only applicable when data is an lv. +# journal_vg is optional and must be the volume group name of the journal lv, if applicable. +# For example: +# lvm_volumes: +# - data: data-lv1 +# data_vg: vg1 +# journal: journal-lv1 +# journal_vg: vg2 +# crush_device_class: foo +# - data: data-lv2 +# journal: /dev/sda1 +# data_vg: vg1 +# - data: data-lv3 +# journal: /dev/sdb1 +# data_vg: vg2 +# - data: /dev/sda +# journal: /dev/sdb1 +# - data: /dev/sda1 +# journal: /dev/sdb1 +# +# Bluestore: Each dictionary must contain at least data. When defining wal or +# db, it must have both the lv name and vg group (db and wal are not required). +# This allows for four combinations: just data, data and wal, data and wal and +# db, data and db. +# For example: +# lvm_volumes: +# - data: data-lv1 +# data_vg: vg1 +# wal: wal-lv1 +# wal_vg: vg1 +# crush_device_class: foo +# - data: data-lv2 +# db: db-lv2 +# db_vg: vg2 +# - data: data-lv3 +# wal: wal-lv1 +# wal_vg: vg3 +# db: db-lv3 +# db_vg: vg3 +# - data: data-lv4 +# data_vg: vg4 +# - data: /dev/sda +# - data: /dev/sdb1 + +lvm_volumes: [] +crush_device_class: "" +osds_per_device: 1 + +############### +# CRUSH RULES # +############### +crush_rule_config: false + +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: false + +crush_rule_ssd: + name: SSD + root: default + type: host + class: ssd + default: false + +crush_rules: + - "{{ crush_rule_hdd }}" + - "{{ crush_rule_ssd }}" + +ceph_ec_profiles: {} + +# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} +# and will move hosts into them which might lead to significant data movement in the cluster! +# +# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so: +# +# [osds] +# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }" +# +# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) +create_crush_tree: false + +########## +# DOCKER # +########## + +ceph_config_keys: [] # DON'T TOUCH ME + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" +ceph_osd_docker_cpu_limit: 4 + +# The next two variables are undefined, and thus, unused by default. +# If `lscpu | grep NUMA` returned the following: +# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 +# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 +# then, the following would run the OSD on the first NUMA node only. +# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_osd_docker_cpuset_mems: "0" + +# PREPARE DEVICE +# +# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above +# +ceph_osd_docker_devices: "{{ devices }}" +ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} + +# ACTIVATE DEVICE +# +ceph_osd_numactl_opts: "" + +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_osd_container_params: + volumes: + - /dev:/dev + - /var/lib/ceph/bootstrap-osd/ceph.keyring:/var/lib/ceph/bootstrap-osd/ceph.keyring:z + - /var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":/var/lib/ceph/osd/{{ cluster }}-"${OSD_ID}":z + - /var/run/udev/:/var/run/udev/ + - /run/lvm/:/run/lvm/ + envs: + OSD_ID: ${OSD_ID} + args: + - -f + - -i=${OSD_ID} + +########### +# SYSTEMD # +########### + +# ceph_osd_systemd_overrides will override the systemd settings +# for the ceph-osd services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_osd_systemd_overrides: +# Service: +# PrivateDevices: false + + +########### +# CHECK # +########### + +nb_retry_wait_osd_up: 60 +delay_wait_osd_up: 10 diff --git a/roles/ceph-osd/files/ceph-osd.target b/roles/ceph-osd/files/ceph-osd.target new file mode 100644 index 0000000..bb06303 --- /dev/null +++ b/roles/ceph-osd/files/ceph-osd.target @@ -0,0 +1,9 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-osd@.service instances at once +PartOf=ceph.target +After=ceph-mon.target +Before=ceph.target +Wants=ceph.target ceph-mon.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-osd/meta/main.yml b/roles/ceph-osd/meta/main.yml new file mode 100644 index 0000000..a5eb83b --- /dev/null +++ b/roles/ceph-osd/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Object Storage Daemon + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-osd/tasks/common.yml b/roles/ceph-osd/tasks/common.yml new file mode 100644 index 0000000..554e524 --- /dev/null +++ b/roles/ceph-osd/tasks/common.yml @@ -0,0 +1,46 @@ +--- +- name: Create bootstrap-osd and osd directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + when: cephx | bool + with_items: + - /var/lib/ceph/bootstrap-osd/ + - /var/lib/ceph/osd/ + +- name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _osd_keys + with_items: + - { name: "client.bootstrap-osd", path: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + when: + - cephx | bool + - item.copy_key | bool + +- name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _osd_keys.results }}" + when: + - cephx | bool + - item is not skipped + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-osd/tasks/crush_rules.yml b/roles/ceph-osd/tasks/crush_rules.yml new file mode 100644 index 0000000..8f27146 --- /dev/null +++ b/roles/ceph-osd/tasks/crush_rules.yml @@ -0,0 +1,98 @@ +--- +- name: Configure crush hierarchy + ceph_crush: + cluster: "{{ cluster }}" + location: "{{ osd_crush_location }}" + containerized: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }}" + register: config_crush_hierarchy + delegate_to: '{{ groups[mon_group_name][0] }}' + when: + - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool + - osd_crush_location is defined + +- name: Create configured ec profiles + ceph_ec_profile: + name: "{{ item.key }}" + cluster: "{{ cluster }}" + state: "{{ item.value.state | default('present') }}" + stripe_unit: "{{ item.value.stripe_unit | default(omit) }}" + plugin: "{{ item.value.plugin | default(omit) }}" + k: "{{ item.value.k }}" + m: "{{ item.value.m }}" + d: "{{ item.value.d | default(omit) }}" + l: "{{ item.value.l | default(omit) }}" + c: "{{ item.value.c | default(omit) }}" + scalar_mds: "{{ item.value.scalar_mds | default(omit) }}" + technique: "{{ item.value.technique | default(omit) }}" + crush_root: "{{ item.value.crush_root | default(omit) }}" + crush_failure_domain: "{{ item.value.crush_failure_domain | default(omit) }}" + crush_device_class: "{{ item.value.crush_device_class | default(omit) }}" + force: "{{ item.value.force | default(false) }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ ceph_ec_profiles | dict2items }}" + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + +- name: Create configured crush rules + ceph_crush_rule: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + rule_type: "{{ item.rule_type | default('replicated') }}" + profile: "{{ item.ec_profile | default(omit) }}" + bucket_root: "{{ item.root | default(omit) }}" + bucket_type: "{{ item.type | default(omit) }}" + device_class: "{{ item.class | default(omit) }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}" + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + +- name: Get id for new default crush rule + ceph_crush_rule_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: info_ceph_default_crush_rule + with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}" + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + when: item.default | default(False) | bool + +# If multiple rules are set as default (should not be) then the last one is taken as actual default. +# the with_items statement overrides each iteration with the new one. +# NOTE(leseb): we should actually fail if multiple rules are set as default +- name: Set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name + ansible.builtin.set_fact: + info_ceph_default_crush_rule_yaml: "{{ item.stdout | default('{}', True) | from_json() }}" + ceph_osd_pool_default_crush_rule_name: "{{ (item.stdout | default('{}', True) | from_json).get('rule_name') }}" + with_items: "{{ info_ceph_default_crush_rule.results }}" + run_once: true + when: not item.get('skipped', false) + +- name: Insert new default crush rule into daemon to prevent restart + ansible.builtin.command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}" + changed_when: false + delegate_to: "{{ item }}" + with_items: "{{ groups[mon_group_name] }}" + run_once: true + when: + - info_ceph_default_crush_rule_yaml | default('') | length > 0 + +- name: Add new default crush rule to ceph config file + community.general.ini_file: + dest: "/etc/ceph/{{ cluster }}.conf" + section: "global" + option: "osd_pool_default_crush_rule" + value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}" + mode: "0644" + delegate_to: "{{ item }}" + with_items: "{{ groups[mon_group_name] }}" + run_once: true + when: + - info_ceph_default_crush_rule_yaml | default('') | length > 0 diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml new file mode 100644 index 0000000..8987f57 --- /dev/null +++ b/roles/ceph-osd/tasks/main.yml @@ -0,0 +1,101 @@ +--- +- name: Set_fact add_osd + ansible.builtin.set_fact: + add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}" + +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + with_items: "{{ groups.get(mon_group_name, []) }}" + delegate_to: "{{ item }}" + delegate_facts: true + run_once: true + when: containerized_deployment | bool + +- name: Include_tasks system_tuning.yml + ansible.builtin.include_tasks: system_tuning.yml + +- name: Install dependencies + ansible.builtin.package: + name: parted + state: present + register: result + until: result is succeeded + when: + - not containerized_deployment | bool + - ansible_facts['os_family'] != 'ClearLinux' + +- name: Install numactl when needed + ansible.builtin.package: + name: numactl + register: result + until: result is succeeded + when: + - containerized_deployment | bool + - ceph_osd_numactl_opts | length > 0 + tags: with_pkg + +- name: Include_tasks common.yml + ansible.builtin.include_tasks: common.yml + +- name: Set noup flag + ceph_osd_flag: + name: noup + cluster: "{{ cluster }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + when: + - not rolling_update | default(False) | bool + - not switch_to_containers | default(False) | bool + +- name: Include_tasks scenarios/lvm.yml + ansible.builtin.include_tasks: scenarios/lvm.yml + when: + - lvm_volumes|length > 0 + - not rolling_update|default(False) | bool + +- name: Include_tasks scenarios/lvm-batch.yml + ansible.builtin.include_tasks: scenarios/lvm-batch.yml + when: + - devices|length > 0 + - not rolling_update|default(False) | bool + +- name: Include_tasks start_osds.yml + ansible.builtin.include_tasks: start_osds.yml + +- name: Unset noup flag + ceph_osd_flag: + name: noup + cluster: "{{ cluster }}" + state: absent + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - not rolling_update | default(False) | bool + - not switch_to_containers | default(False) | bool + - inventory_hostname == ansible_play_hosts_all | last + +- name: Wait for all osd to be up + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json" + register: wait_for_all_osds_up + retries: "{{ nb_retry_wait_osd_up }}" + delay: "{{ delay_wait_osd_up }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + until: + - (wait_for_all_osds_up.stdout | from_json)["num_osds"] | int > 0 + - (wait_for_all_osds_up.stdout | from_json)["num_osds"] == (wait_for_all_osds_up.stdout | from_json)["num_up_osds"] + when: + - not ansible_check_mode + - inventory_hostname == ansible_play_hosts_all | last + tags: wait_all_osds_up + +- name: Include crush_rules.yml + ansible.builtin.include_tasks: crush_rules.yml + when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool + tags: wait_all_osds_up diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml new file mode 100644 index 0000000..a105312 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -0,0 +1,21 @@ +--- + +- name: Use ceph-volume lvm batch to create osds + ceph_volume: + cluster: "{{ cluster }}" + objectstore: "{{ osd_objectstore }}" + batch_devices: "{{ _devices }}" + dmcrypt: "{{ dmcrypt | default(omit) }}" + crush_device_class: "{{ crush_device_class | default(omit) }}" + osds_per_device: "{{ osds_per_device }}" + block_db_size: "{{ block_db_size }}" + block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}" + wal_devices: "{{ bluestore_wal_devices | unique if bluestore_wal_devices | length > 0 else omit }}" + action: "batch" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + PYTHONIOENCODING: utf-8 + when: _devices | default([]) | length > 0 + tags: prepare_osd diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml new file mode 100644 index 0000000..68ade36 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -0,0 +1,21 @@ +--- +- name: Use ceph-volume to create osds + ceph_volume: + cluster: "{{ cluster }}" + objectstore: "{{ osd_objectstore }}" + data: "{{ item.data }}" + data_vg: "{{ item.data_vg | default(omit) }}" + db: "{{ item.db | default(omit) }}" + db_vg: "{{ item.db_vg | default(omit) }}" + wal: "{{ item.wal | default(omit) }}" + wal_vg: "{{ item.wal_vg | default(omit) }}" + crush_device_class: "{{ item.crush_device_class | default(crush_device_class) | default(omit) }}" + dmcrypt: "{{ dmcrypt | default(omit) }}" + action: "{{ 'prepare' if containerized_deployment | bool else 'create' }}" + environment: + CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + PYTHONIOENCODING: utf-8 + with_items: "{{ lvm_volumes }}" + tags: prepare_osd diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml new file mode 100644 index 0000000..8dfb1d4 --- /dev/null +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -0,0 +1,69 @@ +--- +# this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph +- name: Get osd ids + ansible.builtin.shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa risky-shell-pipe + args: + executable: /bin/bash + changed_when: false + failed_when: false + register: osd_ids_non_container + +- name: Collect osd ids + ceph_volume: + cluster: "{{ cluster }}" + action: list + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: ceph_osd_ids + +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + when: containerized_deployment | bool + +- name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-osd@.service.d/" + mode: "0755" + when: + - ceph_osd_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Add ceph-osd systemd service overrides + openstack.config_template.config_template: + src: "ceph-osd.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-osd@.service.d/ceph-osd-systemd-overrides.conf" + config_overrides: "{{ ceph_osd_systemd_overrides | default({}) }}" + config_type: "ini" + when: + - ceph_osd_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + +- name: Ensure /var/lib/ceph/osd/- is present + ansible.builtin.file: + state: directory + path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" + mode: "{{ ceph_directories_mode }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" + +- name: Write run file in /var/lib/ceph/osd/xxxx/run + ansible.builtin.template: + src: systemd-run.j2 + dest: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}/run" + mode: "0700" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" + when: containerized_deployment | bool + +- name: Systemd start osd + ansible.builtin.systemd: + name: ceph-osd@{{ item }} + state: started + enabled: true + masked: false + daemon_reload: true + with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" diff --git a/roles/ceph-osd/tasks/system_tuning.yml b/roles/ceph-osd/tasks/system_tuning.yml new file mode 100644 index 0000000..4dae81b --- /dev/null +++ b/roles/ceph-osd/tasks/system_tuning.yml @@ -0,0 +1,43 @@ +--- +- name: Create tmpfiles.d directory + ansible.builtin.file: + path: "/etc/tmpfiles.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + register: "tmpfiles_d" + when: disable_transparent_hugepage | bool + +- name: Disable transparent hugepage + ansible.builtin.template: + src: "tmpfiles_hugepage.j2" + dest: "/etc/tmpfiles.d/ceph_transparent_hugepage.conf" + group: "root" + owner: "root" + mode: "0644" + force: "yes" + validate: "systemd-tmpfiles --create %s" + when: disable_transparent_hugepage | bool + +- name: Get default vm.min_free_kbytes + ansible.builtin.slurp: + src: /proc/sys/vm/min_free_kbytes + register: default_vm_min_free_kbytes + +- name: Set_fact vm_min_free_kbytes + ansible.builtin.set_fact: + vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}" + +- name: Apply operating system tuning + ansible.posix.sysctl: + name: "{{ item.name }}" + value: "{{ item.value }}" + state: present + sysctl_file: /etc/sysctl.d/ceph-tuning.conf + sysctl_set: true + ignoreerrors: true + with_items: + - { name: "fs.aio-max-nr", value: "1048576", enable: "{{ osd_objectstore == 'bluestore' }}" } + - "{{ os_tuning_params }}" + when: item.enable | default(true) | bool diff --git a/roles/ceph-osd/tasks/systemd.yml b/roles/ceph-osd/tasks/systemd.yml new file mode 100644 index 0000000..3531cfc --- /dev/null +++ b/roles/ceph-osd/tasks/systemd.yml @@ -0,0 +1,23 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-osd.service.j2" + dest: /etc/systemd/system/ceph-osd@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph osds + +- name: Generate systemd ceph-osd target file + ansible.builtin.copy: + src: ceph-osd.target + dest: /etc/systemd/system/ceph-osd.target + mode: "0644" + when: containerized_deployment | bool + +- name: Enable ceph-osd.target + ansible.builtin.service: + name: ceph-osd.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-osd/templates/ceph-osd.service.d-overrides.j2 b/roles/ceph-osd/templates/ceph-osd.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-osd/templates/ceph-osd.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-osd/templates/ceph-osd.service.j2 b/roles/ceph-osd/templates/ceph-osd.service.j2 new file mode 100644 index 0000000..2556ec4 --- /dev/null +++ b/roles/ceph-osd/templates/ceph-osd.service.j2 @@ -0,0 +1,42 @@ +# {{ ansible_managed }} +[Unit] +Description=Ceph OSD +PartOf=ceph-osd.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target +{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %} + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-osd-%i +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +ExecStartPre=-/usr/bin/mkdir -p /var/lib/ceph/osd/{{ cluster }}-%i +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i +ExecStart=/bin/bash /var/lib/ceph/osd/{{ cluster }}-%i/run %t %n +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=-/usr/bin/{{ container_binary }} stop --timeout 120 ceph-osd-%i +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=120 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-osd/templates/systemd-run.j2 b/roles/ceph-osd/templates/systemd-run.j2 new file mode 100644 index 0000000..0dcaf79 --- /dev/null +++ b/roles/ceph-osd/templates/systemd-run.j2 @@ -0,0 +1,58 @@ +#!/bin/sh +{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %} +OSD_ID={{ item }} +T=$1 +N=$2 +CEPH_VOLUME_CMD="/usr/bin/{{ container_binary }} run --rm --net=host --privileged=true --pid=host --ipc=host -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -v /var/lib/ceph/osd/{{ cluster }}-${OSD_ID}:/var/lib/ceph/osd/{{ cluster }}-${OSD_ID}:z -v /etc/ceph:/etc/ceph:z -v /var/run/ceph:/var/run/ceph:z -v /var/run/udev/:/var/run/udev/ -v /var/log/ceph:/var/log/ceph:z -v /run/lvm/:/run/lvm/ --entrypoint=ceph-volume {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" +PYTHON=python3 +CEPH_VOLUME_LIST_JSON="$($CEPH_VOLUME_CMD lvm list --format json)" + +# Find the OSD FSID from the OSD ID +OSD_FSID="$(echo "$CEPH_VOLUME_LIST_JSON" | $PYTHON -c "import sys, json; print(json.load(sys.stdin)['$OSD_ID'][0]['tags']['ceph.osd_fsid'])")" + +# Find the OSD type +OSD_TYPE="$(echo "$CEPH_VOLUME_LIST_JSON" | $PYTHON -c "import sys, json; print(json.load(sys.stdin)['$OSD_ID'][0]['type'])")" + +# Discover the objectstore +if [[ "block wal db" =~ $OSD_TYPE ]]; then + OSD_OBJECTSTORE=(--bluestore) +else + log "Unable to discover osd objectstore for OSD type: $OSD_TYPE" + exit 1 +fi + +# activate +$CEPH_VOLUME_CMD lvm activate --no-systemd ${OSD_OBJECTSTORE[@]} ${OSD_ID} ${OSD_FSID} + +# start ceph-osd +{% if ceph_osd_numactl_opts != "" %} +numactl \ +{{ ceph_osd_numactl_opts }} \ +{% endif %} +/usr/bin/{{ container_binary }} run \ +{% if container_binary == 'podman' %} +-d --log-driver journald --conmon-pidfile /${T}/${N}-pid --cidfile /${T}/${N}-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +--rm --net=host --privileged=true --pid=host \ +--ipc=host \ +--cpus={{ cpu_limit }} \ +{% if ceph_osd_docker_cpuset_cpus is defined -%} +--cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \ +{% endif -%} +{% if ceph_osd_docker_cpuset_mems is defined -%} +--cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \ +{% endif -%} +{% for v in ceph_common_container_params['volumes'] + ceph_osd_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% if ansible_facts['distribution'] == 'Ubuntu' -%} +--security-opt apparmor=unconfined \ +{% endif -%} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_osd_container_params['envs'] | default({}))).items() %} +-e {{ k }}={{ v }} \ +{% endfor %} +--name=ceph-osd-${OSD_ID} \ +--entrypoint=/usr/bin/ceph-osd \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ +{{ (ceph_common_container_params['args'] + ceph_osd_container_params['args'] | default([])) | join(' ') }} diff --git a/roles/ceph-osd/templates/tmpfiles_hugepage.j2 b/roles/ceph-osd/templates/tmpfiles_hugepage.j2 new file mode 100644 index 0000000..7dff2c3 --- /dev/null +++ b/roles/ceph-osd/templates/tmpfiles_hugepage.j2 @@ -0,0 +1,3 @@ +{{ '# ' + ansible_managed }} + +{{ 'w /sys/kernel/mm/transparent_hugepage/enabled - - - - never' }} diff --git a/roles/ceph-osd/vars/main.yml b/roles/ceph-osd/vars/main.yml new file mode 100644 index 0000000..d26a09f --- /dev/null +++ b/roles/ceph-osd/vars/main.yml @@ -0,0 +1,2 @@ +--- +container_bin_path: /opt/ceph-container/bin diff --git a/roles/ceph-prometheus/files/ceph_dashboard.yml b/roles/ceph-prometheus/files/ceph_dashboard.yml new file mode 100644 index 0000000..8a7b68e --- /dev/null +++ b/roles/ceph-prometheus/files/ceph_dashboard.yml @@ -0,0 +1,115 @@ +groups: + - name: Dashboard + rules: + - alert: Ceph Health Warning + expr: ceph_health_status == 1 + for: 1m + labels: + severity: page + annotations: + summary: "Ceph Health Warning" + description: "Overall Ceph Health" + - alert: Ceph Health Error + expr: ceph_health_status > 1 + for: 1m + labels: + severity: page + annotations: + summary: "Ceph Health Error" + description: "The Ceph cluster health is in an error state" + - alert: Disk(s) Near Full + expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85 + for: 1m + labels: + severity: page + annotations: + summary: "Disk(s) Near Full" + description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's." + - alert: OSD(s) Down + expr: ceph_osd_up < 0.5 + for: 1m + labels: + severity: page + annotations: + summary: "OSD(s) Down" + description: "This indicates that one or more OSDs is currently marked down in the cluster." + - alert: OSD Host(s) Down + expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0 + for: 1m + labels: + severity: page + annotations: + summary: "OSD Host(s) Down" + description: "This indicates that one or more OSD hosts is currently down in the cluster." + - alert: PG(s) Stuck + expr: max(ceph_osd_numpg) > scalar(ceph_pg_active) + for: 1m + labels: + severity: page + annotations: + summary: "PG(s) Stuck" + description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve." + - alert: OSD Host Loss Check + expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0)))) + for: 1m + labels: + severity: page + annotations: + summary: "OSD Host Loss Check" + description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host." + - alert: Slow OSD Responses + expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1 + for: 1m + labels: + severity: page + annotations: + summary: "Slow OSD Responses" + description: "This indicates that some OSD Latencies are above 1s." + - alert: Network Errors + expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10 + for: 1m + labels: + severity: page + annotations: + summary: "Network Errors" + description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval" + - alert: Pool Capacity Low + expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85 + for: 1m + labels: + severity: page + annotations: + summary: "Pool Capacity Low" + description: "This indicates a low capacity in a pool." + - alert: MON(s) Down + expr: ceph_mon_quorum_status != 1 + for: 1m + labels: + severity: page + annotations: + summary: "MON(s) down" + description: "This indicates that one or more MON(s) is down." + - alert: Cluster Capacity Low + expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85 + for: 1m + labels: + severity: page + annotations: + summary: "Cluster Capacity Low" + description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster." + - alert: OSD(s) with High PG Count + expr: ceph_osd_numpg > 275 + for: 1m + labels: + severity: page + annotations: + summary: "OSD(s) with High PG Count" + description: "This indicates there are some OSDs with high PG count (275+)." + - alert: Slow OSD Ops + expr: ceph_healthcheck_slow_ops > 0 + for: 1m + labels: + severity: page + annotations: + summary: "Slow OSD Ops" + description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)" diff --git a/roles/ceph-prometheus/handlers/main.yml b/roles/ceph-prometheus/handlers/main.yml new file mode 100644 index 0000000..8aa7e06 --- /dev/null +++ b/roles/ceph-prometheus/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: Service handler + # We use the systemd module here so we can use the daemon_reload feature, + # since we're shipping the .service file ourselves + ansible.builtin.systemd: + name: "{{ item }}" + daemon_reload: true + enabled: true + state: restarted + with_items: + - 'alertmanager' + - 'prometheus' + when: not docker2podman | default(False) | bool diff --git a/roles/ceph-prometheus/meta/main.yml b/roles/ceph-prometheus/meta/main.yml new file mode 100644 index 0000000..b88a5db --- /dev/null +++ b/roles/ceph-prometheus/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Boris Ranto + description: Configures Prometheus for Ceph Dashboard + license: Apache + min_ansible_version: '2.4' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-prometheus/tasks/main.yml b/roles/ceph-prometheus/tasks/main.yml new file mode 100644 index 0000000..7206da2 --- /dev/null +++ b/roles/ceph-prometheus/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: Create prometheus directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0755" + with_items: + - "{{ prometheus_conf_dir }}" + - "{{ prometheus_data_dir }}" + +- name: Write prometheus config file + openstack.config_template.config_template: + src: prometheus.yml.j2 + dest: "{{ prometheus_conf_dir }}/prometheus.yml" + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0640" + config_type: yaml + config_overrides: "{{ prometheus_conf_overrides }}" + notify: Service handler + +- name: Make sure the alerting rules directory exists + ansible.builtin.file: + path: "/etc/prometheus/alerting/" + state: directory + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0755" + +- name: Copy alerting rules + ansible.builtin.copy: + src: "ceph_dashboard.yml" + dest: "/etc/prometheus/alerting/ceph_dashboard.yml" + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0644" + +- name: Create alertmanager directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0755" + with_items: + - "{{ alertmanager_conf_dir }}" + - "{{ alertmanager_data_dir }}" + +- name: Write alertmanager config file + openstack.config_template.config_template: + src: alertmanager.yml.j2 + dest: "{{ alertmanager_conf_dir }}/alertmanager.yml" + owner: "{{ prometheus_user_id }}" + group: "{{ prometheus_user_id }}" + mode: "0640" + config_type: yaml + config_overrides: "{{ alertmanager_conf_overrides }}" + notify: Service handler + +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml diff --git a/roles/ceph-prometheus/tasks/setup_container.yml b/roles/ceph-prometheus/tasks/setup_container.yml new file mode 100644 index 0000000..b2034ad --- /dev/null +++ b/roles/ceph-prometheus/tasks/setup_container.yml @@ -0,0 +1,13 @@ +--- +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Start prometheus services + ansible.builtin.systemd: + name: "{{ item }}" + daemon_reload: true + enabled: true + state: started + with_items: + - prometheus + - alertmanager diff --git a/roles/ceph-prometheus/tasks/systemd.yml b/roles/ceph-prometheus/tasks/systemd.yml new file mode 100644 index 0000000..b35cad3 --- /dev/null +++ b/roles/ceph-prometheus/tasks/systemd.yml @@ -0,0 +1,12 @@ +--- +- name: Ship systemd services + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + owner: root + group: root + mode: "0644" + with_items: + - 'alertmanager.service' + - 'prometheus.service' + notify: Service handler diff --git a/roles/ceph-prometheus/templates/alertmanager.service.j2 b/roles/ceph-prometheus/templates/alertmanager.service.j2 new file mode 100644 index 0000000..bad5fbd --- /dev/null +++ b/roles/ceph-prometheus/templates/alertmanager.service.j2 @@ -0,0 +1,58 @@ +# This file is managed by ansible, don't make changes here - they will be +# overwritten. +[Unit] +Description=alertmanager +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +WorkingDirectory={{ alertmanager_data_dir }} +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage alertmanager +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f alertmanager +ExecStart=/usr/bin/{{ container_binary }} run --rm --name=alertmanager \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + -v "{{ alertmanager_conf_dir }}:/etc/alertmanager:Z" \ + -v "{{ alertmanager_data_dir }}:/alertmanager:Z" \ + --net=host \ + --cpu-period={{ alertmanager_container_cpu_period }} \ + --cpu-quota={{ alertmanager_container_cpu_period * alertmanager_container_cpu_cores }} \ + --memory={{ alertmanager_container_memory }}GB \ + --memory-swap={{ alertmanager_container_memory * 2 }}GB \ + {{ alertmanager_container_image }} \ + --config.file=/etc/alertmanager/alertmanager.yml \ + --cluster.listen-address={{ grafana_server_addr }}:{{ alertmanager_cluster_port }} \ +{% for peer in grafana_server_addrs|difference(grafana_server_addr) %} + --cluster.peer={{ peer }}:{{ alertmanager_cluster_port }} \ +{% endfor %} + --storage.path=/alertmanager \ + --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ alertmanager_port }}/ \ + --web.listen-address={{ grafana_server_addr }}:{{ alertmanager_port }} +{% if container_binary == 'podman' %} +ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=/usr/bin/{{ container_binary }} stop alertmanager +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-prometheus/templates/alertmanager.yml.j2 b/roles/ceph-prometheus/templates/alertmanager.yml.j2 new file mode 100644 index 0000000..63dfbf7 --- /dev/null +++ b/roles/ceph-prometheus/templates/alertmanager.yml.j2 @@ -0,0 +1,20 @@ +global: + resolve_timeout: 5m + +route: + group_by: ['alertname'] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: 'ceph-dashboard' +receivers: +- name: 'ceph-dashboard' + webhook_configs: +{% for host in groups['mgrs'] | default(groups['mons']) %} + - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver' +{% if dashboard_protocol == 'https' and alertmanager_dashboard_api_no_ssl_verify | bool %} + http_config: + tls_config: + insecure_skip_verify: true +{% endif %} +{% endfor %} diff --git a/roles/ceph-prometheus/templates/prometheus.service.j2 b/roles/ceph-prometheus/templates/prometheus.service.j2 new file mode 100644 index 0000000..e8a92c0 --- /dev/null +++ b/roles/ceph-prometheus/templates/prometheus.service.j2 @@ -0,0 +1,57 @@ +# This file is managed by ansible, don't make changes here - they will be +# overwritten. +[Unit] +Description=prometheus +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage prometheus +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm -f prometheus +ExecStart=/usr/bin/{{ container_binary }} run --rm --name=prometheus \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + -v "{{ prometheus_conf_dir }}:/etc/prometheus:Z" \ + -v "{{ prometheus_data_dir }}:/prometheus:Z" \ + --net=host \ + --user={{ prometheus_user_id }} \ + --cpu-period={{ prometheus_container_cpu_period }} \ + --cpu-quota={{ prometheus_container_cpu_period * prometheus_container_cpu_cores }} \ + --memory={{ prometheus_container_memory }}GB \ + --memory-swap={{ prometheus_container_memory * 2 }}GB \ + {{ prometheus_container_image }} \ + --config.file=/etc/prometheus/prometheus.yml \ + --storage.tsdb.path=/prometheus \ +{% if prometheus_storage_tsdb_retention_time is defined %} + --storage.tsdb.retention.time={{ prometheus_storage_tsdb_retention_time }} \ +{% endif %} + --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ prometheus_port }}/ \ + --web.listen-address={{ grafana_server_addr }}:{{ prometheus_port }} +{% if container_binary == 'podman' %} +ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStop=/usr/bin/{{ container_binary }} stop prometheus +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/ceph-prometheus/templates/prometheus.yml.j2 b/roles/ceph-prometheus/templates/prometheus.yml.j2 new file mode 100644 index 0000000..2476495 --- /dev/null +++ b/roles/ceph-prometheus/templates/prometheus.yml.j2 @@ -0,0 +1,38 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +rule_files: + - '/etc/prometheus/alerting/*' + +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['{{ grafana_server_addr }}:{{ prometheus_port }}'] + - job_name: 'ceph' + honor_labels: true + static_configs: +{% for host in groups[mgr_group_name] | default(groups[mon_group_name]) %} + - targets: ['{{ host }}:9283'] + labels: + instance: 'ceph_cluster' +{% endfor %} + - job_name: 'node' + static_configs: +{% for host in (groups['all'] | difference(groups[monitoring_group_name] | union(groups.get(client_group_name, []))) | union(groups.get(osd_group_name, []))) %} + - targets: ['{{ host }}:{{ node_exporter_port }}'] + labels: + instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}" +{% endfor %} + - job_name: 'grafana' + static_configs: +{% for host in groups[monitoring_group_name] %} + - targets: ['{{ host }}:{{ node_exporter_port }}'] + labels: + instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}" +{% endfor %} +alerting: + alertmanagers: + - scheme: http + static_configs: + - targets: ['{{ grafana_server_addr }}:{{ alertmanager_port }}'] diff --git a/roles/ceph-rbd-mirror/LICENSE b/roles/ceph-rbd-mirror/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-rbd-mirror/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-rbd-mirror/README.md b/roles/ceph-rbd-mirror/README.md new file mode 100644 index 0000000..b42fa0e --- /dev/null +++ b/roles/ceph-rbd-mirror/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-rbd + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-rbd-mirror/defaults/main.yml b/roles/ceph-rbd-mirror/defaults/main.yml new file mode 100644 index 0000000..0b5c885 --- /dev/null +++ b/roles/ceph-rbd-mirror/defaults/main.yml @@ -0,0 +1,46 @@ +--- +######### +# SETUP # +######### + +# Even though rbd-mirror nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on rbd-mirror nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory. Only +# valid for Luminous and later releases. +copy_admin_key: false + + +################# +# CONFIGURATION # +################# + +ceph_rbd_mirror_local_user: client.rbd-mirror-peer +ceph_rbd_mirror_configure: false +ceph_rbd_mirror_mode: pool +ceph_rbd_mirror_remote_cluster: remote + +########## +# DOCKER # +########## + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable. +ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m" +ceph_rbd_mirror_docker_cpu_limit: 1 + +ceph_rbd_mirror_docker_extra_env: +ceph_config_keys: [] # DON'T TOUCH ME + + +########### +# SYSTEMD # +########### +# ceph_rbd_mirror_systemd_overrides will override the systemd settings +# for the ceph-rbd-mirror services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_rbd_mirror_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-rbd-mirror/files/ceph-rbd-mirror.target b/roles/ceph-rbd-mirror/files/ceph-rbd-mirror.target new file mode 100644 index 0000000..7c67ce9 --- /dev/null +++ b/roles/ceph-rbd-mirror/files/ceph-rbd-mirror.target @@ -0,0 +1,7 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-rbd-mirror@.service instances at once +PartOf=ceph.target +Before=ceph.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-rbd-mirror/meta/main.yml b/roles/ceph-rbd-mirror/meta/main.yml new file mode 100644 index 0000000..a5b1f23 --- /dev/null +++ b/roles/ceph-rbd-mirror/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Mirror Agent + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml b/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml new file mode 100644 index 0000000..f4bb33d --- /dev/null +++ b/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml @@ -0,0 +1,170 @@ +--- +- name: Cephx tasks + when: + - cephx | bool + block: + - name: Get client.bootstrap-rbd-mirror from ceph monitor + ceph_key_info: + name: client.bootstrap-rbd-mirror + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _bootstrap_rbd_mirror_key + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror exists + ansible.builtin.file: + path: /var/lib/ceph/bootstrap-rbd-mirror + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0750" + + - name: Copy ceph key(s) + ansible.builtin.copy: + dest: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring" + content: "{{ _bootstrap_rbd_mirror_key.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Create rbd-mirror keyrings + ceph_key: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + user: client.admin + user_key: "/etc/ceph/{{ cluster }}.client.admin.keyring" + caps: + mon: "profile rbd-mirror" + osd: "profile rbd" + dest: "{{ item.dest }}" + secret: "{{ item.secret | default(omit) }}" + import_key: true + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + loop: + - { name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}", + dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" } + - { name: "{{ ceph_rbd_mirror_local_user }}", + dest: "/etc/ceph/{{ cluster }}.{{ ceph_rbd_mirror_local_user }}.keyring", + secret: "{{ ceph_rbd_mirror_local_user_secret | default('') }}" } + + - name: Get client.rbd-mirror keyring from ceph monitor + ceph_key_info: + name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _rbd_mirror_key + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" + + - name: Copy ceph key + ansible.builtin.copy: + dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" + content: "{{ _rbd_mirror_key.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + no_log: false + +- name: Start and add the rbd-mirror service instance + ansible.builtin.service: + name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" + state: started + enabled: true + masked: false + changed_when: false + when: + - not containerized_deployment | bool + - ceph_rbd_mirror_remote_user is defined + +- name: Set_fact ceph_rbd_mirror_pools + ansible.builtin.set_fact: + ceph_rbd_mirror_pools: + - name: "{{ ceph_rbd_mirror_pool }}" + when: ceph_rbd_mirror_pools is undefined + +- name: Create pool if it doesn't exist + ceph_pool: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + pg_num: "{{ item.pg_num | default(omit) }}" + pgp_num: "{{ item.pgp_num | default(omit) }}" + size: "{{ item.size | default(omit) }}" + min_size: "{{ item.min_size | default(omit) }}" + pool_type: "{{ item.type | default('replicated') }}" + rule_name: "{{ item.rule_name | default(omit) }}" + erasure_profile: "{{ item.erasure_profile | default(omit) }}" + pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}" + target_size_ratio: "{{ item.target_size_ratio | default(omit) }}" + application: "{{ item.application | default('rbd') }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + loop: "{{ ceph_rbd_mirror_pools }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Enable mirroring on the pool + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}" + register: result + changed_when: false + retries: 60 + delay: 1 + until: result is succeeded + loop: "{{ ceph_rbd_mirror_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + +- name: Add mirroring peer + when: ceph_rbd_mirror_remote_user is defined + block: + - name: List mirroring peer + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}" + changed_when: false + register: mirror_peer + loop: "{{ ceph_rbd_mirror_pools }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Create a temporary file + ansible.builtin.tempfile: + path: /etc/ceph + state: file + suffix: _ceph-ansible + register: tmp_file + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Write secret to temporary file + ansible.builtin.copy: + dest: "{{ tmp_file.path }}" + content: "{{ ceph_rbd_mirror_remote_key }}" + mode: "0644" + delegate_to: "{{ groups[mon_group_name][0] }}" + + - name: Add a mirroring peer + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + loop: "{{ mirror_peer.results }}" + run_once: true + when: ceph_rbd_mirror_remote_user not in item.stdout + + - name: Rm temporary file + ansible.builtin.file: + path: "{{ tmp_file.path }}" + state: absent + delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-rbd-mirror/tasks/main.yml b/roles/ceph-rbd-mirror/tasks/main.yml new file mode 100644 index 0000000..9a51b0c --- /dev/null +++ b/roles/ceph-rbd-mirror/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Non-containerized related tasks + when: + - not containerized_deployment | bool + - ceph_rbd_mirror_remote_user is defined + block: + - name: Install dependencies + ansible.builtin.package: + name: rbd-mirror + state: present + register: result + until: result is succeeded + tags: package-install + + - name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/" + mode: "0755" + when: + - ceph_rbd_mirror_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + + - name: Add ceph-rbd-mirror systemd service overrides + openstack.config_template.config_template: + src: "ceph-rbd-mirror.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-rbd-mirror@.service.d/ceph-rbd-mirror-systemd-overrides.conf" + config_overrides: "{{ ceph_rbd_mirror_systemd_overrides | default({}) }}" + config_type: "ini" + when: + - ceph_rbd_mirror_systemd_overrides is defined + - ansible_facts['service_mgr'] == 'systemd' + + - name: Enable ceph-rbd-mirror.target + ansible.builtin.systemd: + name: "ceph-rbd-mirror.target" + state: started + enabled: true + masked: false + changed_when: false + +- name: Set_fact ceph_cmd + ansible.builtin.set_fact: + rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}" + +- name: Include configure_mirroring.yml + ansible.builtin.include_tasks: configure_mirroring.yml + +- name: Include start_container_rbd_mirror.yml + ansible.builtin.include_tasks: start_container_rbd_mirror.yml + when: + - containerized_deployment | bool + - ceph_rbd_mirror_remote_user is defined diff --git a/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml new file mode 100644 index 0000000..51d80e1 --- /dev/null +++ b/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml @@ -0,0 +1,12 @@ +--- +# Use systemd to manage container on Atomic host +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Systemd start rbd mirror container + ansible.builtin.systemd: + name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }} + state: started + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-rbd-mirror/tasks/systemd.yml b/roles/ceph-rbd-mirror/tasks/systemd.yml new file mode 100644 index 0000000..6e9d987 --- /dev/null +++ b/roles/ceph-rbd-mirror/tasks/systemd.yml @@ -0,0 +1,23 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2" + dest: /etc/systemd/system/ceph-rbd-mirror@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph rbdmirrors + +- name: Generate systemd ceph-rbd-mirror target file + ansible.builtin.copy: + src: ceph-rbd-mirror.target + dest: /etc/systemd/system/ceph-rbd-mirror.target + mode: "0644" + when: containerized_deployment | bool + +- name: Enable ceph-rbd-mirror.target + ansible.builtin.service: + name: ceph-rbd-mirror.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.d-overrides.j2 b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 new file mode 100644 index 0000000..259a2f7 --- /dev/null +++ b/roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2 @@ -0,0 +1,57 @@ +[Unit] +Description=Ceph RBD mirror +PartOf=ceph-rbd-mirror.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target + +[Service] +EnvironmentFile=-/etc/environment +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_facts['hostname'] }} +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }} +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_facts['hostname'] }} +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --memory={{ ceph_rbd_mirror_docker_memory_limit }} \ + --cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \ + --security-opt label=disable \ + -v /var/lib/ceph/bootstrap-rbd-mirror:/var/lib/ceph/bootstrap-rbd-mirror:Z \ + -v /etc/ceph:/etc/ceph:z \ + -v /var/run/ceph:/var/run/ceph:z \ + -v /etc/localtime:/etc/localtime:ro \ + -v /var/log/ceph:/var/log/ceph:z \ + -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \ + --name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }} \ + {{ ceph_rbd_mirror_docker_extra_env }} \ + --entrypoint=/usr/bin/rbd-mirror \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + -f -n client.rbd-mirror.{{ ansible_facts['hostname'] }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }} +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-rgw-loadbalancer/defaults/main.yml b/roles/ceph-rgw-loadbalancer/defaults/main.yml new file mode 100644 index 0000000..256a4f1 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/defaults/main.yml @@ -0,0 +1,26 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +haproxy_frontend_port: 80 +haproxy_frontend_ssl_port: 443 +haproxy_frontend_ssl_certificate: +haproxy_ssl_dh_param: 4096 +haproxy_ssl_ciphers: + - EECDH+AESGCM + - EDH+AESGCM +haproxy_ssl_options: + - no-sslv3 + - no-tlsv10 + - no-tlsv11 + - no-tls-tickets +# +# virtual_ips: +# - 192.168.238.250 +# - 192.168.238.251 +# +# virtual_ip_netmask: 24 +# virtual_ip_interface: ens33 diff --git a/roles/ceph-rgw-loadbalancer/handlers/main.yml b/roles/ceph-rgw-loadbalancer/handlers/main.yml new file mode 100644 index 0000000..a68c0e2 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: Restart haproxy + ansible.builtin.service: + name: haproxy + state: restarted + +- name: Restart keepalived + ansible.builtin.service: + name: keepalived + state: restarted diff --git a/roles/ceph-rgw-loadbalancer/meta/main.yml b/roles/ceph-rgw-loadbalancer/meta/main.yml new file mode 100644 index 0000000..624af34 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Gui Hecheng + description: Config HAProxy & Keepalived + license: Apache + min_ansible_version: '2.8' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-rgw-loadbalancer/tasks/main.yml b/roles/ceph-rgw-loadbalancer/tasks/main.yml new file mode 100644 index 0000000..53eb108 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Include_tasks pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml + +- name: Include_tasks start_rgw_loadbalancer.yml + ansible.builtin.include_tasks: start_rgw_loadbalancer.yml diff --git a/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml b/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml new file mode 100644 index 0000000..37bced4 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml @@ -0,0 +1,50 @@ +--- +- name: Install haproxy and keepalived + ansible.builtin.package: + name: ['haproxy', 'keepalived'] + state: present + register: result + until: result is succeeded + +- name: Generate haproxy configuration file haproxy.cfg + ansible.builtin.template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + owner: "root" + group: "root" + mode: "0644" + validate: "haproxy -f %s -c" + notify: Restart haproxy + +- name: Set_fact vip to vrrp_instance + ansible.builtin.set_fact: + vrrp_instances: "{{ vrrp_instances | default([]) | union([{'name': 'VI_' + index | string, 'vip': item, 'master': groups[rgwloadbalancer_group_name][index]}]) }}" + loop: "{{ virtual_ips | flatten(levels=1) }}" + loop_control: + index_var: index + +- name: Generate keepalived configuration file keepalived.conf + ansible.builtin.template: + src: keepalived.conf.j2 + dest: /etc/keepalived/keepalived.conf + owner: "root" + group: "root" + mode: "0644" + notify: Restart keepalived + +- name: Selinux related tasks + when: + - ansible_facts['os_family'] == 'RedHat' + - ansible_facts['selinux']['status'] == 'enabled' + block: + - name: Set_fact rgw_ports + ansible.builtin.set_fact: + rgw_ports: "{{ rgw_ports | default([]) | union(hostvars[item]['rgw_instances'] | map(attribute='radosgw_frontend_port') | map('string') | list) }}" + with_items: "{{ groups.get(rgw_group_name, []) }}" + + - name: Add selinux rules + community.general.seport: + ports: "{{ rgw_ports }}" + proto: tcp + setype: http_port_t + state: present diff --git a/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml b/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml new file mode 100644 index 0000000..218fc3c --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml @@ -0,0 +1,12 @@ +--- +- name: Start haproxy + ansible.builtin.service: + name: haproxy + state: started + enabled: true + +- name: Start keepalived + ansible.builtin.service: + name: keepalived + state: started + enabled: true diff --git a/roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 b/roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..47dac09 --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2 @@ -0,0 +1,63 @@ +# {{ ansible_managed }} +global + log 127.0.0.1 local2 + + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 8000 + user haproxy + group haproxy + daemon + stats socket /var/lib/haproxy/stats +{% if haproxy_frontend_ssl_certificate %} + tune.ssl.default-dh-param {{ haproxy_ssl_dh_param }} + ssl-default-bind-ciphers {{ haproxy_ssl_ciphers | join(':') }} + ssl-default-bind-options {{ haproxy_ssl_options | join(' ') }} +{% endif %} +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 8000 + +frontend rgw-frontend +{% if haproxy_frontend_ssl_certificate %} + bind *:{{ haproxy_frontend_ssl_port }} ssl crt {{ haproxy_frontend_ssl_certificate }} +{% else %} + bind *:{{ haproxy_frontend_port }} +{% endif %} + default_backend rgw-backend + +# when running in an selinux environment, selinux restricts the ports that haproxy can +# connect to to: +# * 80, 81, 443, 488, 8008, 8009, 8443, 9000 (http_port_t) and, +# * 8080, 8118, 8123, 10001-10010 (http_cache_port_t) +# +# Practically speaking, it would be preferable (and perhaps easier) to configure the +# rgw daemons to listen on ports 10001-10010 and configure haproxy here to match. +# +# Alternatively you can add other unused ports to http_port_t or http_cache_port_t +# with, e.g.: `semanage port -a -t http_cache_port_t -p tcp 8085` +# (Note that ports 8081-8084 are already taken and can't be used for haproxy.) +# +backend rgw-backend + option forwardfor + balance static-rr + option httpchk HEAD / +{% for host in groups[rgw_group_name] %} +{% for instance in hostvars[host]['rgw_instances'] %} + server {{ 'server-' + hostvars[host]['ansible_facts']['hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check +{% endfor %} +{% endfor %} diff --git a/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 b/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 new file mode 100644 index 0000000..0c9378d --- /dev/null +++ b/roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2 @@ -0,0 +1,35 @@ +# {{ ansible_managed }} +! Configuration File for keepalived + +global_defs { + router_id CEPH_RGW +} + +vrrp_script check_haproxy { + script "killall -0 haproxy" + weight -20 + interval 2 + rise 2 + fall 2 +} + +{% for instance in vrrp_instances %} +vrrp_instance {{ instance['name'] }} { + state {{ 'MASTER' if inventory_hostname == instance['master'] else 'BACKUP' }} + priority {{ '100' if inventory_hostname == instance['master'] else '90' }} + interface {{ virtual_ip_interface }} + virtual_router_id {{ 50 + loop.index }} + advert_int 1 + authentication { + auth_type PASS + auth_pass 1234 + } + virtual_ipaddress { + {{ instance['vip'] }}/{{ virtual_ip_netmask }} dev {{ virtual_ip_interface }} + } + track_script { + check_haproxy + } +} + +{% endfor %} diff --git a/roles/ceph-rgw/LICENSE b/roles/ceph-rgw/LICENSE new file mode 100644 index 0000000..acee72b --- /dev/null +++ b/roles/ceph-rgw/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2014] [Sébastien Han] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/ceph-rgw/README.md b/roles/ceph-rgw/README.md new file mode 100644 index 0000000..d8410a4 --- /dev/null +++ b/roles/ceph-rgw/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-rgw + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml new file mode 100644 index 0000000..4e16074 --- /dev/null +++ b/roles/ceph-rgw/defaults/main.yml @@ -0,0 +1,97 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +# Even though RGW nodes should not have the admin key +# at their disposal, some people might want to have it +# distributed on RGW nodes. Setting 'copy_admin_key' to 'true' +# will copy the admin key to the /etc/ceph/ directory +copy_admin_key: false + +########## +# TUNING # +########## + +# Declaring rgw_create_pools will create pools with the given number of pgs, +# size, and type. The following are some important notes on this automatic +# pool creation: +# - The pools and associated pg_num's below are merely examples of pools that +# could be automatically created when rgws are deployed. +# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created +# if rgw_create_pools isn't declared and configured. +# - A pgcalc tool should be used to determine the optimal sizes for +# the rgw.buckets.data, rgw.buckets.index pools as well as any other +# pools declared in this dictionary. +# https://ceph.io/pgcalc is the upstream pgcalc tool +# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by +# Red Hat if you are using RHCS. +# - The default value of {{ rgw_zone }} is 'default'. +# - The type must be set as either 'replicated' or 'ec' for +# each pool. +# - If a pool's type is 'ec', k and m values must be set via +# the ec_k, and ec_m variables. +# - The rule_name key can be used with a specific crush rule value (must exist). +# If the key doesn't exist it falls back to the default replicated_rule. +# This only works for replicated pool type not erasure. + +# rgw_create_pools: +# "{{ rgw_zone }}.rgw.buckets.data": +# pg_num: 64 +# type: ec +# ec_profile: myecprofile +# ec_k: 5 +# ec_m: 3 +# "{{ rgw_zone }}.rgw.buckets.index": +# pg_num: 16 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.meta": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.log": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.control": +# pg_num: 8 +# size: 3 +# type: replicated +# rule_name: foo + + +########## +# DOCKER # +########## + +# Resource limitation +# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints +# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations +ceph_rgw_docker_memory_limit: "4096m" +ceph_rgw_docker_cpu_limit: 8 +# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_rgw_docker_cpuset_mems: "0" + +ceph_config_keys: [] # DON'T TOUCH ME +rgw_config_keys: "/" # DON'T TOUCH ME +# If you want to add parameters, you should retain the existing ones and include the new ones. +ceph_rgw_container_params: + volumes: + - /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}:z + args: + - -f + - -n=client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME} + - -k=/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.${INST_NAME}/keyring + +########### +# SYSTEMD # +########### +# ceph_rgw_systemd_overrides will override the systemd settings +# for the ceph-rgw services. +# For example,to set "PrivateDevices=false" you can specify: +# ceph_rgw_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-rgw/files/ceph-radosgw.target b/roles/ceph-rgw/files/ceph-radosgw.target new file mode 100644 index 0000000..3f1b1c8 --- /dev/null +++ b/roles/ceph-rgw/files/ceph-radosgw.target @@ -0,0 +1,9 @@ +[Unit] +Description=ceph target allowing to start/stop all ceph-radosgw@.service instances at once +PartOf=ceph.target +After=ceph-mon.target +Before=ceph.target +Wants=ceph.target ceph-mon.target + +[Install] +WantedBy=multi-user.target ceph.target \ No newline at end of file diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml new file mode 100644 index 0000000..54d24b7 --- /dev/null +++ b/roles/ceph-rgw/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart rgw + ansible.builtin.service: + name: "ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: restarted + with_items: "{{ rgw_instances }}" diff --git a/roles/ceph-rgw/meta/main.yml b/roles/ceph-rgw/meta/main.yml new file mode 100644 index 0000000..096ccda --- /dev/null +++ b/roles/ceph-rgw/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Sébastien Han + description: Installs Ceph Rados Gateway + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-rgw/tasks/common.yml b/roles/ceph-rgw/tasks/common.yml new file mode 100644 index 0000000..17cf221 --- /dev/null +++ b/roles/ceph-rgw/tasks/common.yml @@ -0,0 +1,44 @@ +--- +- name: Get keys from monitors + ceph_key_info: + name: "{{ item.name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _rgw_keys + with_items: + - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true } + - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + run_once: true + when: + - cephx | bool + - item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "{{ item.item.path }}" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item is not skipped + - item.item.copy_key | bool + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Copy SSL certificate & key data to certificate path + ansible.builtin.copy: + content: "{{ radosgw_frontend_ssl_certificate_data }}" + dest: "{{ radosgw_frontend_ssl_certificate }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0440" + when: radosgw_frontend_ssl_certificate | length > 0 and radosgw_frontend_ssl_certificate_data | length > 0 + notify: Restart ceph rgws diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml new file mode 100644 index 0000000..e5214e6 --- /dev/null +++ b/roles/ceph-rgw/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Include common.yml + ansible.builtin.include_tasks: common.yml + +- name: Include_tasks pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml + +- name: Rgw pool creation tasks + ansible.builtin.include_tasks: rgw_create_pools.yml + run_once: true + when: rgw_create_pools is defined + +- name: Include_tasks openstack-keystone.yml + ansible.builtin.include_tasks: openstack-keystone.yml + when: radosgw_keystone_ssl | bool + +- name: Include_tasks start_radosgw.yml + ansible.builtin.include_tasks: start_radosgw.yml + when: + - not containerized_deployment | bool + +- name: Include start_docker_rgw.yml + ansible.builtin.include_tasks: start_docker_rgw.yml + when: + - containerized_deployment | bool diff --git a/roles/ceph-rgw/tasks/openstack-keystone.yml b/roles/ceph-rgw/tasks/openstack-keystone.yml new file mode 100644 index 0000000..61fbf88 --- /dev/null +++ b/roles/ceph-rgw/tasks/openstack-keystone.yml @@ -0,0 +1,32 @@ +--- +- name: Install nss-tools on redhat + ansible.builtin.package: + name: nss-tools + state: present + register: result + until: result is succeeded + when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf' + +- name: Install libnss3-tools on debian + ansible.builtin.package: + name: libnss3-tools + state: present + register: result + until: result is succeeded + when: ansible_facts['pkg_mgr'] == 'apt' + +- name: Create nss directory for keystone certificates + ansible.builtin.file: + path: "{{ radosgw_nss_db_path }}" + state: directory + owner: root + group: root + mode: "0644" + +- name: Create nss entries for keystone certificates + ansible.builtin.shell: "{{ item }}" + changed_when: false + with_items: + - "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'" + - "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'" + tags: skip_ansible_lint diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml new file mode 100644 index 0000000..1df2b65 --- /dev/null +++ b/roles/ceph-rgw/tasks/pre_requisite.yml @@ -0,0 +1,64 @@ +--- +- name: Create rados gateway directories + ansible.builtin.file: + path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + state: directory + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_directories_mode }}" + delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" + loop: "{{ rgw_instances }}" + when: groups.get(mon_group_name, []) | length > 0 + +- name: Create rgw keyrings + ceph_key: + name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + cluster: "{{ cluster }}" + user: "client.bootstrap-rgw" + user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring + dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring" + caps: + osd: 'allow rwx' + mon: 'allow rw' + import_key: "{{ True if groups.get(mon_group_name, []) | length > 0 else False }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0600" + no_log: "{{ no_log_on_ceph_key_tasks }}" + delegate_to: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else 'localhost' }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + with_items: "{{ rgw_instances }}" + when: cephx | bool + +- name: Get keys from monitors + ceph_key_info: + name: "client.rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" + cluster: "{{ cluster }}" + output_format: plain + state: info + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + register: _rgw_keys + loop: "{{ rgw_instances }}" + delegate_to: "{{ groups.get(mon_group_name)[0] }}" + when: + - cephx | bool + - groups.get(mon_group_name, []) | length > 0 + no_log: "{{ no_log_on_ceph_key_tasks }}" + +- name: Copy ceph key(s) if needed + ansible.builtin.copy: + dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring" + content: "{{ item.stdout + '\n' }}" + owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "{{ ceph_keyring_permissions }}" + with_items: "{{ _rgw_keys.results }}" + when: + - cephx | bool + - item is not skipped + - groups.get(mon_group_name, []) | length > 0 + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml new file mode 100644 index 0000000..b529a0a --- /dev/null +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -0,0 +1,52 @@ +--- +- name: Create ec profile + ceph_ec_profile: + name: "{{ item.value.ec_profile }}" + cluster: "{{ cluster }}" + k: "{{ item.value.ec_k | default(omit) }}" + m: "{{ item.value.ec_m | default(omit) }}" + crush_device_class: "{{ item.value.ec_crush_device_class | default(omit) }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + loop: "{{ rgw_create_pools | dict2items }}" + when: + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + +- name: Set crush rule + ceph_crush_rule: + name: "{{ item.key }}" + cluster: "{{ cluster }}" + rule_type: erasure + profile: "{{ item.value.ec_profile }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ rgw_create_pools | dict2items }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' + +- name: Create rgw pools + ceph_pool: + name: "{{ item.key }}" + state: present + cluster: "{{ cluster }}" + pg_num: "{{ item.value.pg_num | default(omit) }}" + pgp_num: "{{ item.value.pgp_num | default(omit) }}" + size: "{{ item.value.size | default(omit) }}" + min_size: "{{ item.value.min_size | default(omit) }}" + pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}" + target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}" + pool_type: "{{ 'erasure' if item.value.type | default('') == 'ec' else 'replicated' }}" + erasure_profile: "{{ item.value.ec_profile | default(omit) }}" + rule_name: "{{ item.value.rule_name if item.value.rule_name is defined else item.key if item.value.type | default('') == 'ec' else ceph_osd_pool_default_crush_rule_name }}" + application: rgw + loop: "{{ rgw_create_pools | dict2items }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/roles/ceph-rgw/tasks/start_docker_rgw.yml b/roles/ceph-rgw/tasks/start_docker_rgw.yml new file mode 100644 index 0000000..c9df6c7 --- /dev/null +++ b/roles/ceph-rgw/tasks/start_docker_rgw.yml @@ -0,0 +1,12 @@ +--- +- name: Include_task systemd.yml + ansible.builtin.include_tasks: systemd.yml + +- name: Systemd start rgw container + ansible.builtin.systemd: + name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} + state: started + enabled: true + masked: false + daemon_reload: true + with_items: "{{ rgw_instances }}" diff --git a/roles/ceph-rgw/tasks/start_radosgw.yml b/roles/ceph-rgw/tasks/start_radosgw.yml new file mode 100644 index 0000000..5de184f --- /dev/null +++ b/roles/ceph-rgw/tasks/start_radosgw.yml @@ -0,0 +1,29 @@ +--- +- name: Ensure systemd service override directory exists + ansible.builtin.file: + state: directory + path: "/etc/systemd/system/ceph-radosgw@.service.d/" + mode: "0750" + when: ceph_rgw_systemd_overrides is defined + +- name: Add ceph-rgw systemd service overrides + openstack.config_template.config_template: + src: "ceph-rgw.service.d-overrides.j2" + dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf" + config_overrides: "{{ ceph_rgw_systemd_overrides | default({}) }}" + config_type: "ini" + when: ceph_rgw_systemd_overrides is defined + +- name: Start rgw instance + ansible.builtin.systemd: + name: ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} + state: started + enabled: true + masked: false + with_items: "{{ rgw_instances }}" + +- name: Enable the ceph-radosgw.target service + ansible.builtin.systemd: + name: ceph-radosgw.target + enabled: true + masked: false diff --git a/roles/ceph-rgw/tasks/systemd.yml b/roles/ceph-rgw/tasks/systemd.yml new file mode 100644 index 0000000..baca5af --- /dev/null +++ b/roles/ceph-rgw/tasks/systemd.yml @@ -0,0 +1,23 @@ +--- +- name: Generate systemd unit file + ansible.builtin.template: + src: "{{ role_path }}/templates/ceph-radosgw.service.j2" + dest: /etc/systemd/system/ceph-radosgw@.service + owner: "root" + group: "root" + mode: "0644" + notify: Restart ceph rgws + +- name: Generate systemd ceph-radosgw target file + ansible.builtin.copy: + src: ceph-radosgw.target + dest: /etc/systemd/system/ceph-radosgw.target + mode: "0644" + when: containerized_deployment | bool + +- name: Enable ceph-radosgw.target + ansible.builtin.service: + name: ceph-radosgw.target + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 new file mode 100644 index 0000000..38fd968 --- /dev/null +++ b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 @@ -0,0 +1,69 @@ +[Unit] +Description=Ceph RGW +PartOf=ceph-radosgw.target +{% if container_binary == 'docker' %} +After=docker.service network-online.target local-fs.target time-sync.target +Requires=docker.service +{% else %} +After=network-online.target local-fs.target time-sync.target +{% endif %} +Wants=network-online.target local-fs.target time-sync.target +{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_rgw_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_rgw_docker_cpu_limit|int %} + +[Service] +EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile +{% if container_binary == 'podman' %} +ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid +ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} +ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph +{% else %} +ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} +{% endif %} +ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} +ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} + --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ + --memory={{ ceph_rgw_docker_memory_limit }} \ + --cpus={{ cpu_limit }} \ + --security-opt label=disable \ + {% if ceph_rgw_docker_cpuset_cpus is defined -%} + --cpuset-cpus="{{ ceph_rgw_docker_cpuset_cpus }}" \ + {% endif -%} + {% if ceph_rgw_docker_cpuset_mems is defined -%} + --cpuset-mems="{{ ceph_rgw_docker_cpuset_mems }}" \ + {% endif -%} +{% for v in ceph_common_container_params['volumes'] + ceph_rgw_container_params['volumes'] | default([]) %} + -v {{ v }} \ +{% endfor %} +{% for k, v in (ceph_common_container_params['envs'] | combine(ceph_rgw_container_params['envs'] | default({}))).items() %} + -e {{ k }}={{ v }} \ +{% endfor %} +{% if ansible_facts['os_family'] == 'RedHat' -%} + -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted \ +{% endif -%} +{% if radosgw_frontend_ssl_certificate -%} + -v {{ radosgw_frontend_ssl_certificate }}:{{ radosgw_frontend_ssl_certificate }} \ +{% endif -%} + --name=ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} \ + --entrypoint=/usr/bin/radosgw \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + {{ (ceph_common_container_params['args'] + ceph_rgw_container_params['args'] | default([])) | join(' ') }} +{% if container_binary == 'podman' %} +ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`" +{% else %} +ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ rgw_zone }}-{{ ansible_facts['hostname'] }}-${INST_NAME} +{% endif %} +KillMode=none +Restart=always +RestartSec=10s +TimeoutStartSec=120 +TimeoutStopSec=15 +{% if container_binary == 'podman' %} +Type=forking +PIDFile=/%t/%n-pid +{% endif %} + +[Install] +WantedBy=ceph.target diff --git a/roles/ceph-rgw/templates/ceph-rgw.service.d-overrides.j2 b/roles/ceph-rgw/templates/ceph-rgw.service.d-overrides.j2 new file mode 100644 index 0000000..e2bb153 --- /dev/null +++ b/roles/ceph-rgw/templates/ceph-rgw.service.d-overrides.j2 @@ -0,0 +1 @@ +# {{ ansible_managed }} diff --git a/roles/ceph-validate/meta/main.yml b/roles/ceph-validate/meta/main.yml new file mode 100644 index 0000000..01254db --- /dev/null +++ b/roles/ceph-validate/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + company: Red Hat + author: Andrew Schoen + description: Validates Ceph config options + license: Apache + min_ansible_version: '2.7' + platforms: + - name: EL + versions: + - 'all' + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-validate/tasks/check_devices.yml b/roles/ceph-validate/tasks/check_devices.yml new file mode 100644 index 0000000..a774add --- /dev/null +++ b/roles/ceph-validate/tasks/check_devices.yml @@ -0,0 +1,115 @@ +--- +- name: Set_fact root_device + ansible.builtin.set_fact: + root_device: "{{ ansible_facts['mounts'] | selectattr('mount', 'match', '^/$') | map(attribute='device') | first }}" + +- name: Lvm_volumes variable's tasks related + when: + - lvm_volumes is defined + - lvm_volumes | length > 0 + block: + - name: Resolve devices in lvm_volumes + ansible.builtin.command: "readlink -f {{ item.data }}" + changed_when: false + register: _lvm_volumes_data_devices + with_items: "{{ lvm_volumes }}" + when: item.data_vg is undefined + + - name: Set_fact lvm_volumes_data_devices + ansible.builtin.set_fact: + lvm_volumes_data_devices: "{{ lvm_volumes_data_devices | default([]) + [item.stdout] }}" + with_items: "{{ _lvm_volumes_data_devices.results }}" + when: item.skipped is undefined + +- name: Fail if root_device is passed in lvm_volumes or devices + ansible.builtin.fail: + msg: "{{ root_device }} found in either lvm_volumes or devices variable" + when: root_device in lvm_volumes_data_devices | default([]) or root_device in devices | default([]) + +- name: Check devices are block devices + block: + - name: Get devices information + community.general.parted: + device: "{{ item }}" + unit: MiB + register: devices_parted + failed_when: false + with_items: + - "{{ devices | default([]) }}" + - "{{ dedicated_devices | default([]) }}" + - "{{ bluestore_wal_devices | default([]) }}" + - "{{ lvm_volumes_data_devices | default([]) }}" + + - name: Fail if one of the devices is not a device + ansible.builtin.fail: + msg: "{{ item.item }} is not a block special file!" + when: item.rc is defined + with_items: "{{ devices_parted.results }}" + + - name: Fail when gpt header found on osd devices + ansible.builtin.fail: + msg: "{{ item.disk.dev }} has gpt header, please remove it." + with_items: "{{ devices_parted.results }}" + when: + - item.skipped is undefined + - item.disk.table == 'gpt' + - item.partitions | length == 0 + +- name: Check logical volume in lvm_volumes + when: lvm_volumes is defined + block: + - name: Check data logical volume + ansible.builtin.stat: + path: "/dev/{{ item.data_vg }}/{{ item.data }}" + follow: true + register: lvm_volumes_data + loop: "{{ lvm_volumes }}" + when: + - item.data is defined + - item.data_vg is defined + + - name: Fail if one of the data logical volume is not a device or doesn't exist + ansible.builtin.fail: + msg: "{{ item.item.data_vg }}/{{ item.item.data }} doesn't exist or isn't a block" + loop: "{{ lvm_volumes_data.results }}" + when: + - item.skipped is undefined + - not item.stat.exists | bool or not item.stat.isblk | bool + + - name: Check bluestore db logical volume + ansible.builtin.stat: + path: "/dev/{{ item.db_vg }}/{{ item.db }}" + follow: true + register: lvm_volumes_db + loop: "{{ lvm_volumes }}" + when: + - osd_objectstore == 'bluestore' + - item.db is defined + - item.db_vg is defined + + - name: Fail if one of the bluestore db logical volume is not a device or doesn't exist + ansible.builtin.fail: + msg: "{{ item.item.db_vg }}/{{ item.item.db }} doesn't exist or isn't a block" + loop: "{{ lvm_volumes_db.results }}" + when: + - item.skipped is undefined + - not item.stat.exists | bool or not item.stat.isblk | bool + + - name: Check bluestore wal logical volume + ansible.builtin.stat: + path: "/dev/{{ item.wal_vg }}/{{ item.wal }}" + follow: true + register: lvm_volumes_wal + loop: "{{ lvm_volumes }}" + when: + - osd_objectstore == 'bluestore' + - item.wal is defined + - item.wal_vg is defined + + - name: Fail if one of the bluestore wal logical volume is not a device or doesn't exist + ansible.builtin.fail: + msg: "{{ item.item.wal_vg }}/{{ item.item.wal }} doesn't exist or isn't a block" + loop: "{{ lvm_volumes_wal.results }}" + when: + - item.skipped is undefined + - not item.stat.exists | bool or not item.stat.isblk | bool diff --git a/roles/ceph-validate/tasks/check_eth_rgw.yml b/roles/ceph-validate/tasks/check_eth_rgw.yml new file mode 100644 index 0000000..8ad3374 --- /dev/null +++ b/roles/ceph-validate/tasks/check_eth_rgw.yml @@ -0,0 +1,24 @@ +--- +- name: Check if network interface exists + ansible.builtin.fail: + msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}" + when: radosgw_interface not in ansible_facts['interfaces'] + +- name: Check if network interface is active + ansible.builtin.fail: + msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}" + when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false" + +- name: Check if network interface has an IPv4 address + ansible.builtin.fail: + msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}" + when: + - ip_version == "ipv4" + - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined + +- name: Check if network interface has an IPv6 address + ansible.builtin.fail: + msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}" + when: + - ip_version == "ipv6" + - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv6'] is not defined diff --git a/roles/ceph-validate/tasks/check_ipaddr_mon.yml b/roles/ceph-validate/tasks/check_ipaddr_mon.yml new file mode 100644 index 0000000..cf4cc71 --- /dev/null +++ b/roles/ceph-validate/tasks/check_ipaddr_mon.yml @@ -0,0 +1,5 @@ +--- +- name: Check if network interface has an IP address in public_network + ansible.builtin.fail: + msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ public_network }}" + when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['public_network'].split(',')) | length == 0 diff --git a/roles/ceph-validate/tasks/check_nfs.yml b/roles/ceph-validate/tasks/check_nfs.yml new file mode 100644 index 0000000..2c26aa4 --- /dev/null +++ b/roles/ceph-validate/tasks/check_nfs.yml @@ -0,0 +1,15 @@ +--- +- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone) + ansible.builtin.fail: + msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True" + when: + - nfs_obj_gw | bool + - groups.get(mon_group_name, []) | length == 0 + - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined) + +- name: Fail on openSUSE Leap 15.x using distro packages + ansible.builtin.fail: + msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')" + when: + - ceph_origin == 'distro' + - ansible_facts['distribution'] == 'openSUSE Leap' diff --git a/roles/ceph-validate/tasks/check_pools.yml b/roles/ceph-validate/tasks/check_pools.yml new file mode 100644 index 0000000..0acf7c9 --- /dev/null +++ b/roles/ceph-validate/tasks/check_pools.yml @@ -0,0 +1,10 @@ +--- +- name: Fail if target_size_ratio is not set when pg_autoscale_mode is True + ansible.builtin.fail: + msg: "You must set a target_size_ratio value on following pool: {{ item.name }}." + with_items: + - "{{ cephfs_pools | default([]) }}" + - "{{ pools | default([]) }}" + when: + - item.pg_autoscale_mode | default(False) | bool + - item.target_size_ratio is undefined diff --git a/roles/ceph-validate/tasks/check_rbdmirror.yml b/roles/ceph-validate/tasks/check_rbdmirror.yml new file mode 100644 index 0000000..59ac8ea --- /dev/null +++ b/roles/ceph-validate/tasks/check_rbdmirror.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure ceph_rbd_mirror_pool is set + ansible.builtin.fail: + msg: "ceph_rbd_mirror_pool needs to be provided" + when: ceph_rbd_mirror_pool | default("") | length == 0 + +- name: Ensure ceph_rbd_mirror_remote_cluster is set + ansible.builtin.fail: + msg: "ceph_rbd_mirror_remote_cluster needs to be provided" + when: + - ceph_rbd_mirror_remote_cluster | default("") | length == 0 + - ceph_rbd_mirror_remote_user | default("") | length > 0 diff --git a/roles/ceph-validate/tasks/check_repository.yml b/roles/ceph-validate/tasks/check_repository.yml new file mode 100644 index 0000000..01067dc --- /dev/null +++ b/roles/ceph-validate/tasks/check_repository.yml @@ -0,0 +1,19 @@ +- name: Validate ceph_origin + ansible.builtin.fail: + msg: "ceph_origin must be either 'repository', 'distro' or 'local'" + when: ceph_origin not in ['repository', 'distro', 'local'] + +- name: Validate ceph_repository + ansible.builtin.fail: + msg: "ceph_repository must be either 'community', 'obs', 'dev', 'custom' or 'uca'" + when: + - ceph_origin == 'repository' + - ceph_repository not in ['community', 'obs', 'dev', 'custom', 'uca'] + +- name: Validate ceph_repository_community + ansible.builtin.fail: + msg: "ceph_stable_release must be 'squid'" + when: + - ceph_origin == 'repository' + - ceph_repository == 'community' + - ceph_stable_release not in ['squid'] diff --git a/roles/ceph-validate/tasks/check_rgw_pools.yml b/roles/ceph-validate/tasks/check_rgw_pools.yml new file mode 100644 index 0000000..73fbeb5 --- /dev/null +++ b/roles/ceph-validate/tasks/check_rgw_pools.yml @@ -0,0 +1,29 @@ +--- +- name: Fail if ec_profile is not set for ec pools + ansible.builtin.fail: + msg: "ec_profile must be set for ec pools" + loop: "{{ rgw_create_pools | dict2items }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.ec_profile is undefined + +- name: Fail if ec_k is not set for ec pools + ansible.builtin.fail: + msg: "ec_k must be set for ec pools" + loop: "{{ rgw_create_pools | dict2items }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.ec_k is undefined + +- name: Fail if ec_m is not set for ec pools + ansible.builtin.fail: + msg: "ec_m must be set for ec pools" + loop: "{{ rgw_create_pools | dict2items }}" + when: + - item.value.type is defined + - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.ec_m is undefined diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml new file mode 100644 index 0000000..da6cf1f --- /dev/null +++ b/roles/ceph-validate/tasks/check_system.yml @@ -0,0 +1,53 @@ +--- +- name: Fail on unsupported ansible version (1.X) + ansible.builtin.fail: + msg: "Ansible version must be >= 2.x, please update!" + when: ansible_version.major|int < 2 + +- name: Fail on unsupported ansible version + ansible.builtin.fail: + msg: "Ansible version must be either 2.15, 2.16 or 2.17!" + when: ansible_version.minor|int not in [15, 16, 17] + +- name: Fail on unsupported system + ansible.builtin.fail: + msg: "System not supported {{ ansible_facts['system'] }}" + when: ansible_facts['system'] not in ['Linux'] + +- name: Fail on unsupported architecture + ansible.builtin.fail: + msg: "Architecture not supported {{ ansible_facts['architecture'] }}" + when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64'] + +- name: Fail on unsupported distribution + ansible.builtin.fail: + msg: "Distribution not supported {{ ansible_facts['os_family'] }}" + when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse'] + +- name: Fail on unsupported CentOS release + ansible.builtin.fail: + msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard" + when: + - ansible_facts['distribution'] == 'CentOS' + - ansible_facts['distribution_major_version'] | int == 7 + - not containerized_deployment | bool + - dashboard_enabled | bool + +- name: Fail on unsupported distribution for ubuntu cloud archive + ansible.builtin.fail: + msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}" + when: + - ceph_repository == 'uca' + - ansible_facts['distribution'] != 'Ubuntu' + +- name: Fail on unsupported SUSE/openSUSE distribution (only 15.x supported) + ansible.builtin.fail: + msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}" + when: + - ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE' + - ansible_facts['distribution_major_version'] != '15' + +- name: Fail if systemd is not present + ansible.builtin.fail: + msg: "Systemd must be present" + when: ansible_facts['service_mgr'] != 'systemd' diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml new file mode 100644 index 0000000..885ffb3 --- /dev/null +++ b/roles/ceph-validate/tasks/main.yml @@ -0,0 +1,215 @@ +--- +- name: Include check_system.yml + ansible.builtin.include_tasks: check_system.yml + +- name: Validate repository variables in non-containerized scenario + ansible.builtin.include_tasks: check_repository.yml + when: not containerized_deployment | bool + +- name: Validate osd_objectstore + ansible.builtin.fail: + msg: "osd_objectstore must be 'bluestore''" + when: osd_objectstore not in ['bluestore'] + +- name: Validate radosgw network configuration + ansible.builtin.fail: + msg: "Either radosgw_address, radosgw_address_block or radosgw_interface must be provided" + when: + - rgw_group_name in group_names + - radosgw_address == 'x.x.x.x' + - radosgw_address_block == 'subnet' + - radosgw_interface == 'interface' + +- name: Validate osd nodes + when: osd_group_name in group_names + block: + - name: Validate lvm osd scenario + ansible.builtin.fail: + msg: 'devices or lvm_volumes must be defined for lvm osd scenario' + when: + - not osd_auto_discovery | default(false) | bool + - devices is undefined + - lvm_volumes is undefined + + - name: Validate bluestore lvm osd scenario + ansible.builtin.fail: + msg: 'data key must be defined in lvm_volumes' + when: + - osd_objectstore == 'bluestore' + - not osd_auto_discovery | default(false) | bool + - lvm_volumes is defined + - lvm_volumes | length > 0 + - item.data is undefined + with_items: '{{ lvm_volumes }}' + +- name: Debian based systems tasks + when: ansible_facts['os_family'] == 'Debian' + block: + - name: Fail if local scenario is enabled on debian + ansible.builtin.fail: + msg: "'local' installation scenario not supported on Debian systems" + when: ceph_origin == 'local' + + - name: Fail if rhcs repository is enabled on debian + ansible.builtin.fail: + msg: "RHCS isn't supported anymore on Debian distribution" + when: + - ceph_origin == 'repository' + - ceph_repository == 'rhcs' + +# SUSE/openSUSE Leap only supports the following: +# - ceph_origin == 'distro' +# - ceph_origin == 'repository' and ceph_repository == 'obs' +- name: SUSE/openSUSE Leap based system tasks + when: ansible_facts['os_family'] == 'Suse' + block: + - name: Check ceph_origin definition on SUSE/openSUSE Leap + ansible.builtin.fail: + msg: "Unsupported installation method origin:{{ ceph_origin }}" + when: ceph_origin not in ['distro', 'repository'] + + - name: Check ceph_repository definition on SUSE/openSUSE Leap + ansible.builtin.fail: + msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}' + only valid combination is ceph_origin == 'repository' and ceph_repository == 'obs'" + when: + - ceph_origin == 'repository' + - ceph_repository != 'obs' + +- name: Validate ntp daemon type + ansible.builtin.fail: + msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd" + when: + - ntp_service_enabled | bool + - ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd'] + +# Since NTPd can not be installed on Atomic... +- name: Abort if ntp_daemon_type is ntpd on Atomic + ansible.builtin.fail: + msg: installation can't happen on Atomic and ntpd needs to be installed + when: + - is_atomic | default(False) | bool + - ansible_facts['os_family'] == 'RedHat' + - ntp_daemon_type == 'ntpd' + +- name: Include check_devices.yml + ansible.builtin.include_tasks: check_devices.yml + when: + - osd_group_name in group_names + - not osd_auto_discovery | default(False) | bool + +- name: Include check_eth_rgw.yml + ansible.builtin.include_tasks: check_eth_rgw.yml + when: + - rgw_group_name in group_names + - radosgw_interface != "dummy" + - radosgw_address == "x.x.x.x" + - radosgw_address_block == "subnet" + +- name: Include check_rgw_pools.yml + ansible.builtin.include_tasks: check_rgw_pools.yml + when: + - inventory_hostname in groups.get(rgw_group_name, []) + - rgw_create_pools is defined + +- name: Include check_nfs.yml + ansible.builtin.include_tasks: check_nfs.yml + when: inventory_hostname in groups.get(nfs_group_name, []) + +- name: Include check_rbdmirror.yml + ansible.builtin.include_tasks: check_rbdmirror.yml + when: + - rbdmirror_group_name in group_names + - ceph_rbd_mirror_configure | default(false) | bool + +- name: Monitoring related tasks + when: dashboard_enabled | bool + block: + - name: Fail if monitoring group doesn't exist + ansible.builtin.fail: + msg: "you must add a monitoring group and add at least one node." + when: groups[monitoring_group_name] is undefined + + - name: Fail when monitoring doesn't contain at least one node. + ansible.builtin.fail: + msg: "you must add at least one node in the monitoring hosts group" + when: groups[monitoring_group_name] | length < 1 + + - name: Fail when dashboard_admin_password and/or grafana_admin_password are not set + ansible.builtin.fail: + msg: "you must set dashboard_admin_password and grafana_admin_password." + when: + - dashboard_admin_password is undefined + or grafana_admin_password is undefined + +- name: Validate container registry credentials + ansible.builtin.fail: + msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' + when: + - ceph_docker_registry_auth | bool + - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or + (ceph_docker_registry_username | string | length == 0 or ceph_docker_registry_password | string | length == 0) + +- name: Validate container service and container package + ansible.builtin.fail: + msg: 'both container_package_name and container_service_name should be defined' + when: + - (container_package_name is undefined and container_service_name is defined) or + (container_package_name is defined and container_service_name is undefined) + +- name: Validate openstack_keys key format + ansible.builtin.fail: + msg: '{{ item.name }} key format invalid' + with_items: '{{ openstack_keys }}' + when: + - osd_group_name in group_names + - openstack_keys is defined + - openstack_keys | length > 0 + - item.key is defined + - item.key is not match("^[a-zA-Z0-9+/]{38}==$") + +- name: Validate clients keys key format + ansible.builtin.fail: + msg: '{{ item.name }} key format invalid' + with_items: '{{ keys }}' + when: + - client_group_name in group_names + - keys is defined + - keys | length > 0 + - item.key is defined + - item.key is not match("^[a-zA-Z0-9+/]{38}==$") + +- name: Validate openstack_keys caps + ansible.builtin.fail: + msg: '{{ item.name }} key has no caps defined' + with_items: '{{ openstack_keys }}' + when: + - osd_group_name in group_names + - openstack_keys is defined + - openstack_keys | length > 0 + - item.caps is not defined + +- name: Validate clients keys caps + ansible.builtin.fail: + msg: '{{ item.name }} key has no caps defined' + with_items: '{{ keys }}' + when: + - client_group_name in group_names + - keys is defined + - keys | length > 0 + - item.caps is not defined + +- name: Check virtual_ips is defined + ansible.builtin.fail: + msg: "virtual_ips is not defined." + when: + - rgwloadbalancer_group_name in group_names + - groups[rgwloadbalancer_group_name] | length > 0 + - virtual_ips is not defined + +- name: Validate virtual_ips length + ansible.builtin.fail: + msg: "There are more virual_ips defined than rgwloadbalancer nodes" + when: + - rgwloadbalancer_group_name in group_names + - (virtual_ips | length) > (groups[rgwloadbalancer_group_name] | length) diff --git a/site-container.yml.sample b/site-container.yml.sample new file mode 100644 index 0000000..298709d --- /dev/null +++ b/site-container.yml.sample @@ -0,0 +1,545 @@ +--- +# Defines deployment design and assigns role to server groups +- hosts: localhost + connection: local + tasks: + - name: Warn about ceph-ansible current status + fail: + msg: "cephadm is the new official installer. Please, consider migrating. + See https://docs.ceph.com/en/latest/cephadm/install for new deployments + or https://docs.ceph.com/en/latest/cephadm/adoption for migrating existing deployments." + when: not yes_i_know | default(false) | bool + +- hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + - monitoring + + gather_facts: false + become: True + any_errors_fatal: true + + vars: + delegate_facts_host: True + + pre_tasks: + - import_tasks: raw_install_python.yml + + tasks: + # pre-tasks for following import - + - import_role: + name: ceph-defaults + - name: gather facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + tags: always + + - name: gather and delegate facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: True + with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" + run_once: true + when: delegate_facts_host | bool + tags: always + + + # dummy container setup is only supported on x86_64 + # when running with containerized_deployment: true this task + # creates a group that contains only x86_64 hosts. + # when running with containerized_deployment: false this task + # will add all client hosts to the group (and not filter). + - name: create filtered clients group + add_host: + name: "{{ item }}" + groups: _filtered_clients + with_items: "{{ groups.get(client_group_name, []) | intersect(ansible_play_batch) }}" + when: (hostvars[item]['ansible_facts']['architecture'] == 'x86_64') or (not containerized_deployment | bool) + + tags: [with_pkg, fetch_container_image] + - import_role: + name: ceph-facts + - import_role: + name: ceph-validate + - import_role: + name: ceph-infra + - import_role: + name: ceph-handler + - import_role: + name: ceph-container-engine + tags: with_pkg + when: (group_names != ['clients'] and group_names != ['clients', '_filtered_clients'] and group_names != ['_filtered_clients', 'clients']) or (inventory_hostname == groups.get('_filtered_clients', [''])|first) + - import_role: + name: ceph-container-common + tags: fetch_container_image + when: (group_names != ['clients'] and group_names != ['clients', '_filtered_clients'] and group_names != ['_filtered_clients', 'clients']) or (inventory_hostname == groups.get('_filtered_clients', [''])|first) + +- hosts: mons + gather_facts: false + any_errors_fatal: true + tasks: + - name: set ceph monitor install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mons + become: True + gather_facts: false + any_errors_fatal: true + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mon + - import_role: + name: ceph-mgr + when: groups.get(mgr_group_name, []) | length == 0 or mgr_group_name in group_names + +- hosts: mons + gather_facts: false + any_errors_fatal: true + tasks: + - name: set ceph monitor install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mgrs + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph manager install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mgr + + # post-tasks for upcoming imports - + - name: set ceph manager install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: osds + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for upcoming imports - + - name: set ceph osd install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-osd + + # post-tasks for preceding imports - + - name: set ceph osd install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mdss + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph mds install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mds + + # post-tasks for preceding imports - + - name: set ceph mds install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rgws + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph rgw install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rgw + + # post-tasks for preceding imports - + - name: set ceph rgw install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: clients + become: True + gather_facts: false + any_errors_fatal: true + tags: 'ceph_client' + tasks: + # pre-tasks for following imports - + - name: set ceph client install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-client + + # post-tasks for preceding imports - + - name: set ceph client install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: nfss + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph nfs install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-nfs + + # post-tasks for following imports - + - name: set ceph nfs install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rbdmirrors + become: True + gather_facts: false + any_errors_fatal: true + tasks: + # pre-tasks for following imports - + - name: set ceph rbd mirror install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rbd-mirror + + # post-tasks for preceding imports - + - name: set ceph rbd mirror install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- import_playbook: dashboard.yml + when: + - dashboard_enabled | bool + - groups.get(monitoring_group_name, []) | length > 0 + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph crash install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + tasks_from: container_binary.yml + - import_role: + name: ceph-handler + - import_role: + name: ceph-crash + + post_tasks: + - name: set ceph crash install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph exporter install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_exporter: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + tasks_from: container_binary.yml + - import_role: + name: ceph-handler + - import_role: + name: ceph-exporter + + post_tasks: + - name: set ceph exporter install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_exporter: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - clients + - mgrs + - monitoring + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - name: Remove tempdir for scripts + ansible.builtin.file: + path: "{{ tmpdirpath.path }}" + state: absent + when: + - tmpdirpath.path is defined + - not _exporter_handler_called | default(false) | bool + - not _crash_handler_called | default(false) | bool + - not _mds_handler_called | default(false) | bool + - not _mgr_handler_called | default(false) | bool + - not _mon_handler_called | default(false) | bool + - not _osd_handler_called | default(false) | bool + - not _rbdmirror_handler_called | default(false) | bool + - not _rgw_handler_called | default(false) | bool + + +- hosts: mons[0] + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - import_role: + name: ceph-defaults + + - name: check if podman binary is present + stat: + path: /usr/bin/podman + register: podman_binary + + - name: set_fact container_binary + set_fact: + container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] in ['8', '9']) else 'docker' }}" + + - name: get ceph status from the first monitor + command: > + {{ container_binary }} exec ceph-mon-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} -s + register: ceph_status + changed_when: false + + - name: "show ceph status for cluster {{ cluster }}" + debug: + msg: "{{ ceph_status.stdout_lines }}" + when: not ceph_status.failed diff --git a/site.yml b/site.yml new file mode 100644 index 0000000..88b8468 --- /dev/null +++ b/site.yml @@ -0,0 +1,520 @@ +--- +# Defines deployment design and assigns role to server groups + +# - hosts: localhost +# connection: local +# tasks: +# - name: Warn about ceph-ansible current status +# fail: +# msg: "cephadm is the new official installer. Please, consider migrating. +# See https://docs.ceph.com/en/latest/cephadm/install for new deployments +# or https://docs.ceph.com/en/latest/cephadm/adoption for migrating existing deployments." +# when: not yes_i_know | default(false) | bool + +- hosts: + - mons + - osds + - mgrs + + gather_facts: false + any_errors_fatal: true + become: true + + tags: always + + vars: + delegate_facts_host: True + + pre_tasks: + - import_role: + name: ceph-defaults + # If we can't get python2 installed before any module is used we will fail + # so just try what we can to get it installed + + - import_tasks: raw_install_python.yml + + - name: gather facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: + - not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: gather and delegate facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: True + with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" + run_once: true + when: delegate_facts_host | bool + + tasks: + - name: Import ceph-validate role + ansible.builtin.import_role: + name: ceph-validate + + # dummy container setup is only supported on x86_64 + # when running with containerized_deployment: true this task + # creates a group that contains only x86_64 hosts. + # when running with containerized_deployment: false this task + # will add all client hosts to the group (and not filter). + - name: create filtered clients group + add_host: + name: "{{ item }}" + groups: _filtered_clients + with_items: "{{ groups.get(client_group_name, []) | intersect(ansible_play_batch) }}" + when: (hostvars[item]['ansible_facts']['architecture'] == 'x86_64') or (not containerized_deployment | bool) + + - import_role: + name: ceph-facts + - import_role: + name: ceph-handler + - import_role: + name: ceph-validate + - import_role: + name: ceph-infra + - import_role: + name: ceph-common + +- hosts: mons + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph monitor install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mon + - import_role: + name: ceph-mgr + when: groups.get(mgr_group_name, []) | length == 0 or mgr_group_name in group_names + + post_tasks: + - name: set ceph monitor install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mgrs + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph manager install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mgr + + post_tasks: + - name: set ceph manager install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: osds + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph osd install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-osd + + post_tasks: + - name: set ceph osd install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mdss + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph mds install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mds + + post_tasks: + - name: set ceph mds install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rgws + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rgw install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rgw + + post_tasks: + - name: set ceph rgw install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: clients + gather_facts: false + become: True + any_errors_fatal: true + tags: 'ceph_client' + pre_tasks: + - name: set ceph client install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-client + + post_tasks: + - name: set ceph client install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: nfss + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph nfs install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-nfs + + post_tasks: + - name: set ceph nfs install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rbdmirrors + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rbd mirror install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rbd-mirror + + post_tasks: + - name: set ceph rbd mirror install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - rgwloadbalancers + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rgw loadbalancer install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw_loadbalancer: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-rgw-loadbalancer + + post_tasks: + - name: set ceph rgw loadbalancer install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw_loadbalancer: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- import_playbook: dashboard.yml + when: + - dashboard_enabled | bool + - groups.get(monitoring_group_name, []) | length > 0 + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph crash install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + tasks_from: container_binary.yml + - import_role: + name: ceph-handler + - import_role: + name: ceph-crash + + post_tasks: + - name: set ceph crash install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - clients + - mgrs + - monitoring + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - name: Remove tempdir for scripts + ansible.builtin.file: + path: "{{ tmpdirpath.path }}" + state: absent + when: + - tmpdirpath.path is defined + - not _exporter_handler_called | default(false) | bool + - not _crash_handler_called | default(false) | bool + - not _mds_handler_called | default(false) | bool + - not _mgr_handler_called | default(false) | bool + - not _mon_handler_called | default(false) | bool + - not _osd_handler_called | default(false) | bool + - not _rbdmirror_handler_called | default(false) | bool + - not _rgw_handler_called | default(false) | bool + +- hosts: mons + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - import_role: + name: ceph-defaults + - name: get ceph status from the first monitor + command: ceph --cluster {{ cluster }} -s + register: ceph_status + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + + - name: "show ceph status for cluster {{ cluster }}" + debug: + msg: "{{ ceph_status.stdout_lines }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + when: + - ceph_status is not skipped + - ceph_status is successful diff --git a/site.yml copy.sample b/site.yml copy.sample new file mode 100644 index 0000000..8811d3c --- /dev/null +++ b/site.yml copy.sample @@ -0,0 +1,524 @@ +--- +# Defines deployment design and assigns role to server groups + +- hosts: localhost + connection: local + tasks: + - name: Warn about ceph-ansible current status + fail: + msg: "cephadm is the new official installer. Please, consider migrating. + See https://docs.ceph.com/en/latest/cephadm/install for new deployments + or https://docs.ceph.com/en/latest/cephadm/adoption for migrating existing deployments." + when: not yes_i_know | default(false) | bool + +- hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + - monitoring + - rgwloadbalancers + + gather_facts: false + any_errors_fatal: true + become: true + + tags: always + + vars: + delegate_facts_host: True + + pre_tasks: + - import_role: + name: ceph-defaults + # If we can't get python2 installed before any module is used we will fail + # so just try what we can to get it installed + + - import_tasks: raw_install_python.yml + + - name: gather facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + when: + - not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) + + - name: gather and delegate facts + setup: + gather_subset: + - 'all' + - '!facter' + - '!ohai' + delegate_to: "{{ item }}" + delegate_facts: True + with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" + run_once: true + when: delegate_facts_host | bool + + tasks: + + # dummy container setup is only supported on x86_64 + # when running with containerized_deployment: true this task + # creates a group that contains only x86_64 hosts. + # when running with containerized_deployment: false this task + # will add all client hosts to the group (and not filter). + - name: create filtered clients group + add_host: + name: "{{ item }}" + groups: _filtered_clients + with_items: "{{ groups.get(client_group_name, []) | intersect(ansible_play_batch) }}" + when: (hostvars[item]['ansible_facts']['architecture'] == 'x86_64') or (not containerized_deployment | bool) + + - import_role: + name: ceph-facts + - import_role: + name: ceph-handler + - import_role: + name: ceph-validate + - import_role: + name: ceph-infra + - import_role: + name: ceph-common + +- hosts: mons + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph monitor install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mon + - import_role: + name: ceph-mgr + when: groups.get(mgr_group_name, []) | length == 0 or mgr_group_name in group_names + + post_tasks: + - name: set ceph monitor install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mon: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mgrs + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph manager install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mgr + + post_tasks: + - name: set ceph manager install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mgr: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: osds + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph osd install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-osd + + post_tasks: + - name: set ceph osd install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_osd: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: mdss + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph mds install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-mds + + post_tasks: + - name: set ceph mds install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_mds: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rgws + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rgw install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rgw + + post_tasks: + - name: set ceph rgw install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: clients + gather_facts: false + become: True + any_errors_fatal: true + tags: 'ceph_client' + pre_tasks: + - name: set ceph client install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-client + + post_tasks: + - name: set ceph client install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_client: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: nfss + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph nfs install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-nfs + + post_tasks: + - name: set ceph nfs install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_nfs: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: rbdmirrors + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rbd mirror install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-handler + tags: ['ceph_update_config'] + - import_role: + name: ceph-config + tags: ['ceph_update_config'] + - import_role: + name: ceph-rbd-mirror + + post_tasks: + - name: set ceph rbd mirror install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rbdmirror: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - rgwloadbalancers + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph rgw loadbalancer install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw_loadbalancer: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] + - import_role: + name: ceph-rgw-loadbalancer + + post_tasks: + - name: set ceph rgw loadbalancer install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_rgw_loadbalancer: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- import_playbook: dashboard.yml + when: + - dashboard_enabled | bool + - groups.get(monitoring_group_name, []) | length > 0 + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - mgrs + + gather_facts: false + become: True + any_errors_fatal: true + pre_tasks: + - name: set ceph crash install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + tasks_from: container_binary.yml + - import_role: + name: ceph-handler + - import_role: + name: ceph-crash + + post_tasks: + - name: set ceph crash install 'Complete' + run_once: true + set_stats: + data: + installer_phase_ceph_crash: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- hosts: + - mons + - osds + - mdss + - rgws + - rbdmirrors + - clients + - mgrs + - monitoring + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - name: Remove tempdir for scripts + ansible.builtin.file: + path: "{{ tmpdirpath.path }}" + state: absent + when: + - tmpdirpath.path is defined + - not _exporter_handler_called | default(false) | bool + - not _crash_handler_called | default(false) | bool + - not _mds_handler_called | default(false) | bool + - not _mgr_handler_called | default(false) | bool + - not _mon_handler_called | default(false) | bool + - not _osd_handler_called | default(false) | bool + - not _rbdmirror_handler_called | default(false) | bool + - not _rgw_handler_called | default(false) | bool + +- hosts: mons + gather_facts: false + become: True + any_errors_fatal: true + tasks: + - import_role: + name: ceph-defaults + - name: get ceph status from the first monitor + command: ceph --cluster {{ cluster }} -s + register: ceph_status + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + + - name: "show ceph status for cluster {{ cluster }}" + debug: + msg: "{{ ceph_status.stdout_lines }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true + when: + - ceph_status is not skipped + - ceph_status is successful diff --git a/test.yml b/test.yml new file mode 100644 index 0000000..f04a322 --- /dev/null +++ b/test.yml @@ -0,0 +1,16 @@ +--- +- hosts: localhost + become: true + tasks: + - import_role: + name: ceph-common + - import_role: + name: ceph-mon + - import_role: + name: ceph-osd + - import_role: + name: ceph-mds + - import_role: + name: ceph-rgw + - import_role: + name: ceph-fetch-keys diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..a660d65 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,6 @@ +Functional tests +================ + +These playbooks aim to individually validate each Ceph component. +Some of them require packages to be installed. +Ideally you will run these tests from a client machine or from the Ansible server. diff --git a/tests/README.rst b/tests/README.rst new file mode 100644 index 0000000..544adb9 --- /dev/null +++ b/tests/README.rst @@ -0,0 +1,50 @@ +Functional Testing +================== +The directory structure, files, and tests found in this directory all work +together to provide: + +* a set of machines (or even a single one) so that ceph-ansible can run against +* a "scenario" configuration file in Python, that defines what nodes are + configured to what roles and what 'components' they will test +* tests (in functional/tests/) that will all run unless skipped explicitly when + testing a distinct feature dependant on the ansible run. + + +Example run +----------- +The following is the easiest way to try this out locally. Both Vagrant and +VirtualBox are required. Ensure that ``py.test`` and ``pytest-xdist`` are +installed (with pip on a virtualenv) by using the ``requirements.txt`` file in +the ``tests`` directory:: + + pip install -r requirements.txt + +Choose a directory in ``tests/functional`` that has 3 files: + +* ``Vagrantfile`` +* ``vagrant_variables.yml`` +* A Python ("scenario") file. + +For example in: ``tests/functional/ubuntu/16.04/mon/initial_members``:: + + tree . + . + ├── Vagrantfile -> ../../../../../../Vagrantfile + ├── scenario.py + └── vagrant_variables.yml + + 0 directories, 3 files + +It is *required* to be in that directory. It is what triggers all the +preprocessing of complex arguments based on the cluster setup. + +Run vagrant first to setup the environment:: + + vagrant up --no-provision --provider=virtualbox + +Then run ceph-ansible against the hosts with the distinct role (in this case we +are deploying a monitor using ``initial_members``). + +And finally run ``py.test``:: + + py.test -v diff --git a/tests/__pycache__/conftest.cpython-314-pytest-8.4.2.pyc b/tests/__pycache__/conftest.cpython-314-pytest-8.4.2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da73d874d489cea419f921e51d6bfc7a8a2fe7e GIT binary patch literal 10225 zcmc&)TWlLwdLG{IcZn25N*YVDWm=}>NM2uL*;!vCU*wB2mM5{}h#8S1X)KB}IWv?k z#ldp33#`0Z!17D&6fNSH0^X;(FNIMQSTE3r1^O~HrEoGe5IqD~>_Y*iHn7`1wEfSJ z9MKA$ICg;^NN3LFzn%N{pTl0S$4Nl&|NbAipOFNS#+h0$R};@3L!v-T5|rUIAsMu< z@sufV>>y5=B}+%8#wuC08k=PMz&h@)3EBKo#t5;unw1=sDMCoj#+2I#%1l}I8!E(< zwT{pAGLD*L%2r41rtF}_Lm9e=gyT{S_vXf zP$t^oCI+B{GSgN!L0M=5Vk>Qg*fwAjY_z$jDk}q;fWuCkpw6MuIknhDTcE^E+dz_2 zu%EL~9xZJY>=&Km)$XfVguk?XZ?#l#&<<#pS8Yl2If>zNEaH~fOe(_)S$=Vz%}J_H zuSs$9%nYj%OQvLwTxv=tp(e+srKH5ovwU76e?l&q$>bclLyVh@<}vLCKt$lR!aA$Sz&faPvK(KFtpS>v;<^e2U-+(E+-wvWKcsc zS+rEbpt{quY>}cFGpaeq%(JSkYKZFJY{(RsW6&bPLy0U9sjhUE z10^Yq!+|BI7&IfQru2M9wN^x{=2>2pREx+;c|mjo+YSh%777J*Z~rVm&-Tx-^IVSW zA4mK(=o(RLR+7ny+*FqBmsk-O@wsW7>Jt`Ka|${_jcOW2iIMVR-*A>sGgZCF|daTS}dg$bfY z`~CK`5ogbE#354~!5I=3)hwYrJE2;pVQ#RaOOeyL1t!a7NG=D{o`iIs9U`}j3A1X+ z^4D3UdXGvH;->NvJAx38RFfzn)k$MQtpJve3D`v3X*;TY4kRgwPfhWB7Ii~8?sxGD zL@@h|CQFCn4cz8#ai7gCn>MV3-L*XZ%TK=Yg%pQ-#rB)_uY674cw6M?z{87c(E}y# zLD_!rFVB61yY)F?uyj2223ER0o4uX8l`DDMe{I?@LE(RiJ`n$@e;@IW-ACI@zmFRr zR;}}VCZA=&M2nc_1@?uJ4FH2SvO(xm*s2M^&_@VH_&32AL*>>mj;XfrNBMyTu)l=Ds#7@dzE0U_JH&R{0sFm0 zaK41gp>w%jLgAzc-G28=Xk5A!4{fI0FPvmv8cv$}63Sbj6MVFf@@ea*a@P3;KNS!H z=gd?PQXwHgHI3`*X|rEJPjkJTamQv|Ek!le>;kaSZO~Vg5WMQD(SV%r3vwEbVWdy* zgPKGkM0@lz7j}_2F=-N-&c*5_Hyp{92C|yyrkbo7Ko@c12c<+CN@+ZIS|<(q2sYDA zbo1`IHVREI8Pgcuq;tKDCQh~KG-0ZJ+*9k-%`Y9+dn`hA(B6YkmH7y_5mppl)LQ}_ zsL?lqg_AGhCIiU82g8;?ku@1Y3*9n~PlGLCva*LKH9K0y!&|B$J0AUR8MUn@x8ZEl zYXvy}v|XlA`~12L|93TPXs%yWuLM61)t$^mXBYZAZ&Q+Md#HiY&ouwH*J;$OHVrMwm=NHvL zeQ5>THGiilB{4Iqp&!{mkZP@nKzKH&L0-(Hm^>U884fn-1r|iQW)PnjG#DkSo+|7~ zO*4Rm)TUY%$SVgE$LCb*all;{E~;i$nte-)2O!Rg`?Pq!YUoqFLVhaCrBgXpy3V6H z)v75GRr5Tb6IEM=UEtEJsJgNX^Qi?so1ce_4~!F20yMW$t=e<>`4lch7`dSqh?W96 z-0c7OlRgZ~R5K=7+NGh?tVs4RFsMJvO=&Q$AL{zao21BQ$WF1pZ&$Lvzf(0h5uQ+h zq1{!asWrb2Tw2Ba6xgoXfIr1$#ELE0cyt(dq>agB5G#u42+ldM9Vvm4W>tT+;edNt z1f9sjkX2B1YO}~!E2>GsgY&2VfdD4is(r3{W>_f&jo}!;p@3^4tD3I?qJ{%j$a0cu z%CayR(>$7IB-ODg-cc2cnJ`o}qO@uir3_$V)r#;y3aUB7rKLoy0_RSFdUOm&T65gB zQ3vHB>c{1HPNJPSO5kW0j_|&PcH;<}k6`%{s1HYba5RdelQ=qxqvJTjU5AEo^iv#- z;0UifbOItUiIEr)ub(PHui*3-V-56F)(PHguu0-y!HHfV*267@3yRlYIH|Zig%j(c z$lc*P!{t!27)q8x{e`niQ_J16ch26Al$sKSbL$L~YE5Y`ApR5Iwg|YS6&IcW9v4ir^_*(28uxH&CyLV&N)}zF> z$@Uf{-dVIq*K27R(jNcq_inwnl9js#*TQ?t;h|!9=+R&){PttR<4!p|Qu2%zMxXkE zE5rBgYoSD?dmc{qHqfQAb-5<>nQp5Vxa2=jIIY+`E3s8uTxo6pqVMxQ zIWbgfJydQTDYlM0X4YCyfg9xw-k!ZRD@Tr(yeF1TipzU@->rQs@BHSF5{}-zcIR3- z+*1tqJTRBS$#QtG7#@6hq7**3d{POuE}vKjZa}tjTe9TsD|-(Vy$2oz*1U%bC;w>k zKx33x7xeNsE}zo8P1$x(Y3Tv)NbyIMKotK$!V^$}F(sN%T6=MAH1WL6>+UU_*dXg% z9No2m$%+p*I4sQ1uE2(0z{f5WYz^NFevMc0V${MoJ~Xz3hKHenGV0L4^cwso%B=I7 zX(MH+2WbGdjSaPKf?&8DX-HwC?7CdrOK2Q*fXV(68Ykt_3)~soUjNkDondHFa+)`9PF-f9g=Q#FQtR0 z4uXmZZrU)8&!sJ)5sa!iE`DMZwok!Bdm6zh+JY^Z0e}P+G^C)y7@AkW#u#7^$}YG& zh;v2<(OwTmV0Q)yhH>nwZwW(ibw#z%7W^GB3ErzZ9BTx-wh^?q29P?i#76CH1kIu~ zt{OnfbZ9m?YWW%P{Q+42fOe|2Cg(?KkJY$p+A`gmb{7@zGf-{t-`-~!G->1gYIbzg zB>pHnbm05{Y=;hobvr7fVb$}m)(f4l)eHJApljQr{T1gz*K29-u5oSAZmZ>AQTz6q zoF6pL!{FoBC1(Z_n;-4)dW|0ojRB$b#95rJXi)8Ai}K?H_fFr zfuxvb;mVie$tf0Ff|y*N(|MWClk;#(4K{Dx{KV0T5!Et^ zZv-o786#*lkYU8xDV{-@6f_#}er7^#MofkmXRfEP8^q<{S~Z7#rkLjMp{Y!2p2L@f z;2h5L5}SfXAkB+ZE!WuVY*w|-A!ZId1UtA_T#nC+s%4&==hEQIfD6chR|4)$TH*zq z5E!lJRWoAQX>c&W@Qi8#M@%)rD2^_54w2`130hvc!Nem@g6C1`LkT)`h*U#hRMjSt zh0e^th5gdrhHSV8b4i_w1kPlIRg?~sc>M!C-R>9O856EH1I)8Mwg0 zK*6d|-5hYG<{1PR7b3zqO0`etb7@JtIuI7|)KSgi95|st?BS-fya+{A4)iWCB>b9R zs@f3yQ6B0LpdJD(;OGX9G%pi33Q0Kn5JwD-u0y0YiwsN%ftS|NljEJ2%1v_?0hun$PV-tnG_k zpYN)8Ek_>PORcA6bL44rsdw{5qFCfoYIXr(Y6!Iwz*~h_Ep<<#oku7cNgv5_b;v5d*LAg zp0co~8oj&w&hB!sw;1gGGMI$tBUj+|p<9RU^{%?MKM4PQ?6|M5gWB1;6{o1uMx@wQD+q}QJ`SY7)+m50Q zJj&s+?eG)Z;qoZ+v_d2|omsVyy+CyzsP{gx?UfInmnW{s7vGmBXXFoN<+Br$)9b?Ss!fHn$a=RDYe53QnmWu!D*7 z&-{=AC)MaDMhv6>4X%rcHZ^isx7?JPDFbcR;595#a79dw9NHgnL+!dXdSVj+6AzII<#?~>~^ZR$Ap@9+zSH`Rdm z#4Vvt!#~5XAKp|$1822P!)Ne<_NE$iXLGY5bsDa~>)e}aXz+4Try=oL>j2++ZwZ?+ z;Ws6k%`>o0Sud~|fe*aQawy9;rI0n81zGR@B;99s4o10$L*wNnt^}oWu zI1HUqAfEc0m(BR1`P`jz_qVMzbuT*VMDzZm;`ta63?r8}v3 zf-Bsrr{n*)b`498-n+c&*|A0PKrueB<{tbWy&CUJt|Pld&lo{Od` z1JTEb;OO9S=tJyv1f!rj^dgKHD`ie#*2a`U2u~!m2$5>5y29FzQ?y@8RGtzvV26hi z;f0`?QW<^waOGDGN6_Cv1)ldJo?`%542G`>;+MoPJpV>G;7~T&f7Sl;_HT_rga4U{ zF#0!e>OXcl4MQ8PTIHwhE3>PHj&F@84HXh{LrZQ<7_0RqL*QHEM71`!p(UT^j1j|O H&7l7U@6V9p literal 0 HcmV?d00001 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..07e35e2 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,226 @@ +import pytest +import os + + +@pytest.fixture +def ceph_status(host, setup): + def _run(keyring, + name=None, + cluster='ceph', + container_binary='podman'): + containerized_deployment = setup["containerized_deployment"] + container_image = setup["container_image"] + client_name = '' + if name is not None: + client_name = f'-n {name}' + ceph_args = f"--connect-timeout 5 {client_name} -k {keyring} --cluster {cluster} -f json -s" + + if containerized_deployment: + cmd = f"sudo {container_binary} run --rm -v /etc/ceph:/etc/ceph -v {keyring}:{keyring}:z --entrypoint=ceph {container_image} {ceph_args}" + else: + cmd = f"ceph {ceph_args}" + output = host.check_output(cmd) + return output + return _run + + +def str_to_bool(val): + try: + val = val.lower() + except AttributeError: + val = str(val).lower() + if val == 'true': + return True + elif val == 'false': + return False + else: + raise ValueError("Invalid input value: %s" % val) + + +@pytest.fixture(scope="module") +def setup(host): + cluster_address = "" + osd_ids = [] + osds = [] + + ansible_vars = host.ansible.get_variables() + ansible_facts = host.ansible("setup") + + containerized_deployment = ansible_vars.get("containerized_deployment", False) + ceph_docker_registry = ansible_vars.get("ceph_docker_registry") + ceph_docker_image = ansible_vars.get("ceph_docker_image") + ceph_docker_image_tag = ansible_vars.get("ceph_docker_image_tag") + container_image = f"{ceph_docker_registry}/{ceph_docker_image}:{ceph_docker_image_tag}" + docker = ansible_vars.get("docker") + container_binary = ansible_vars.get("container_binary", "") + osd_auto_discovery = ansible_vars.get("osd_auto_discovery") + group_names = ansible_vars["group_names"] + + ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"] + + if ansible_distribution == "CentOS": + public_interface = "eth1" + cluster_interface = "eth2" + else: + public_interface = "ens6" + cluster_interface = "ens7" + + subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) + num_mons = len(ansible_vars["groups"].get('mons', [])) + if osd_auto_discovery: + num_osds = 3 + else: + num_osds = len(ansible_vars.get("devices", [])) + if not num_osds: + num_osds = len(ansible_vars.get("lvm_volumes", [])) + osds_per_device = ansible_vars.get("osds_per_device", 1) + num_osds = num_osds * osds_per_device + + # If number of devices doesn't map to number of OSDs, allow tests to define + # that custom number, defaulting it to ``num_devices`` + num_osds = ansible_vars.get('num_osds', num_osds) + cluster_name = ansible_vars.get("cluster", "ceph") + conf_path = "/etc/ceph/{}.conf".format(cluster_name) + if "osds" in group_names: + cluster_address = host.interface(cluster_interface).addresses[0] + cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"') + if cmd.rc == 0: + osd_ids = cmd.stdout.rstrip("\n").split("\n") + osds = osd_ids + + address = host.interface(public_interface).addresses[0] + + if docker and not container_binary: + container_binary = "podman" + + data = dict( + cluster_name=cluster_name, + containerized_deployment=containerized_deployment, + container_image=container_image, + subnet=subnet, + osd_ids=osd_ids, + num_mons=num_mons, + num_osds=num_osds, + address=address, + osds=osds, + conf_path=conf_path, + public_interface=public_interface, + cluster_interface=cluster_interface, + cluster_address=cluster_address, + container_binary=container_binary) + + return data + + +@pytest.fixture() +def node(host, request): + """ + This fixture represents a single node in the ceph cluster. Using the + host.ansible fixture provided by testinfra it can access all the ansible + variables provided to it by the specific test scenario being ran. + + You must include this fixture on any tests that operate on specific type + of node because it contains the logic to manage which tests a node + should run. + """ + ansible_vars = host.ansible.get_variables() + # tox will pass in this environment variable. we need to do it this way + # because testinfra does not collect and provide ansible config passed in + # from using --extra-vars + ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "quincy") + rolling_update = os.environ.get("ROLLING_UPDATE", "False") + group_names = ansible_vars["group_names"] + docker = ansible_vars.get("docker") + dashboard = ansible_vars.get("dashboard_enabled", True) + radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1) + ceph_rbd_mirror_remote_user = ansible_vars.get('ceph_rbd_mirror_remote_user', '') + ceph_release_num = { + 'jewel': 10, + 'kraken': 11, + 'luminous': 12, + 'mimic': 13, + 'nautilus': 14, + 'octopus': 15, + 'pacific': 16, + 'quincy': 17, + 'reef': 18, + 'squid': 19, + 'dev': 99 + } + + sanitized_group_names = group_names + if 'all' in sanitized_group_names: + sanitized_group_names.remove('all') + + # capture the initial/default state + test_is_applicable = False + for marker in request.node.iter_markers(): + if marker.name in group_names or marker.name == 'all': + test_is_applicable = True + break + # Check if any markers on the test method exist in the nodes group_names. + # If they do not, this test is not valid for the node being tested. + if not test_is_applicable: + reason = "%s: Not a valid test for node type: %s" % ( + request.function, group_names) + pytest.skip(reason) + + if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501 + pytest.skip('Not a valid test for a non-secondary rbd-mirror node') + + if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]: + pytest.skip('Not a valid test for nfs or client nodes') + + if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]: + pytest.skip('Not a valid test for nfs or client nodes') + + if request.node.get_closest_marker("no_docker") and docker: + pytest.skip( + "Not a valid test for containerized deployments or atomic hosts") + + if request.node.get_closest_marker("docker") and not docker: + pytest.skip( + "Not a valid test for non-containerized deployments or atomic hosts") # noqa E501 + + if request.node.get_closest_marker("dashboard") and not dashboard: + pytest.skip( + "Not a valid test with dashboard disabled") + + if request.node.get_closest_marker("dashboard") and sanitized_group_names == ['clients']: + pytest.skip('Not a valid test for client node') + + data = dict( + vars=ansible_vars, + docker=docker, + ceph_stable_release=ceph_stable_release, + ceph_release_num=ceph_release_num, + rolling_update=rolling_update, + radosgw_num_instances=radosgw_num_instances, + ) + return data + + +def pytest_collection_modifyitems(session, config, items): + for item in items: + test_path = item.location[0] + if "mon" in test_path: + item.add_marker(pytest.mark.mons) + elif "osd" in test_path: + item.add_marker(pytest.mark.osds) + elif "mds" in test_path: + item.add_marker(pytest.mark.mdss) + elif "mgr" in test_path: + item.add_marker(pytest.mark.mgrs) + elif "rbd-mirror" in test_path: + item.add_marker(pytest.mark.rbdmirrors) + elif "rgw" in test_path: + item.add_marker(pytest.mark.rgws) + elif "nfs" in test_path: + item.add_marker(pytest.mark.nfss) + elif "grafana" in test_path: + item.add_marker(pytest.mark.grafanas) + else: + item.add_marker(pytest.mark.all) + + if "journal_collocation" in test_path: + item.add_marker(pytest.mark.journal_collocation) diff --git a/tests/functional/.gitignore b/tests/functional/.gitignore new file mode 100644 index 0000000..39190dc --- /dev/null +++ b/tests/functional/.gitignore @@ -0,0 +1,3 @@ +ubuntu-key/ +fetch/ +vagrant_ssh_config diff --git a/tests/functional/add-mdss/Vagrantfile b/tests/functional/add-mdss/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-mdss/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mdss/ceph-override.json b/tests/functional/add-mdss/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-mdss/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mdss/container/Vagrantfile b/tests/functional/add-mdss/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-mdss/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mdss/container/ceph-override.json b/tests/functional/add-mdss/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-mdss/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mdss/container/group_vars/all b/tests/functional/add-mdss/container/group_vars/all new file mode 100644 index 0000000..c8dc44c --- /dev/null +++ b/tests/functional/add-mdss/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.63.0/24" +cluster_network: "192.168.64.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-mdss/container/hosts b/tests/functional/add-mdss/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mdss/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mdss/container/hosts-2 b/tests/functional/add-mdss/container/hosts-2 new file mode 100644 index 0000000..b192b10 --- /dev/null +++ b/tests/functional/add-mdss/container/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mdss] +mds0 diff --git a/tests/functional/add-mdss/container/vagrant_variables.yml b/tests/functional/add-mdss/container/vagrant_variables.yml new file mode 100644 index 0000000..bc14e70 --- /dev/null +++ b/tests/functional/add-mdss/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 1 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.63 +cluster_subnet: 192.168.64 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-mdss/group_vars/all b/tests/functional/add-mdss/group_vars/all new file mode 100644 index 0000000..5bc3323 --- /dev/null +++ b/tests/functional/add-mdss/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.61.0/24" +cluster_network: "192.168.62.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/add-mdss/hosts b/tests/functional/add-mdss/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mdss/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mdss/hosts-2 b/tests/functional/add-mdss/hosts-2 new file mode 100644 index 0000000..b192b10 --- /dev/null +++ b/tests/functional/add-mdss/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mdss] +mds0 diff --git a/tests/functional/add-mdss/vagrant_variables.yml b/tests/functional/add-mdss/vagrant_variables.yml new file mode 100644 index 0000000..702d228 --- /dev/null +++ b/tests/functional/add-mdss/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 1 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.61 +cluster_subnet: 192.168.62 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-mgrs/Vagrantfile b/tests/functional/add-mgrs/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-mgrs/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mgrs/ceph-override.json b/tests/functional/add-mgrs/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-mgrs/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mgrs/container/Vagrantfile b/tests/functional/add-mgrs/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-mgrs/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mgrs/container/ceph-override.json b/tests/functional/add-mgrs/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-mgrs/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mgrs/container/group_vars/all b/tests/functional/add-mgrs/container/group_vars/all new file mode 100644 index 0000000..8530e28 --- /dev/null +++ b/tests/functional/add-mgrs/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.75.0/24" +cluster_network: "192.168.76.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-mgrs/container/hosts b/tests/functional/add-mgrs/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mgrs/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mgrs/container/hosts-2 b/tests/functional/add-mgrs/container/hosts-2 new file mode 100644 index 0000000..6404712 --- /dev/null +++ b/tests/functional/add-mgrs/container/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mgr0 diff --git a/tests/functional/add-mgrs/container/vagrant_variables.yml b/tests/functional/add-mgrs/container/vagrant_variables.yml new file mode 100644 index 0000000..1184440 --- /dev/null +++ b/tests/functional/add-mgrs/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 1 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.75 +cluster_subnet: 192.168.76 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-mgrs/group_vars/all b/tests/functional/add-mgrs/group_vars/all new file mode 100644 index 0000000..b48fc6b --- /dev/null +++ b/tests/functional/add-mgrs/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.73.0/24" +cluster_network: "192.168.74.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False diff --git a/tests/functional/add-mgrs/hosts b/tests/functional/add-mgrs/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mgrs/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mgrs/hosts-2 b/tests/functional/add-mgrs/hosts-2 new file mode 100644 index 0000000..6404712 --- /dev/null +++ b/tests/functional/add-mgrs/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mgr0 diff --git a/tests/functional/add-mgrs/vagrant_variables.yml b/tests/functional/add-mgrs/vagrant_variables.yml new file mode 100644 index 0000000..08792e3 --- /dev/null +++ b/tests/functional/add-mgrs/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 1 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.73 +cluster_subnet: 192.168.74 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-mons/Vagrantfile b/tests/functional/add-mons/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-mons/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mons/ceph-override.json b/tests/functional/add-mons/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-mons/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mons/container/Vagrantfile b/tests/functional/add-mons/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-mons/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-mons/container/ceph-override.json b/tests/functional/add-mons/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-mons/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-mons/container/group_vars/all b/tests/functional/add-mons/container/group_vars/all new file mode 100644 index 0000000..0c0a629 --- /dev/null +++ b/tests/functional/add-mons/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.55.0/24" +cluster_network: "192.168.56.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-mons/container/hosts b/tests/functional/add-mons/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mons/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mons/container/hosts-2 b/tests/functional/add-mons/container/hosts-2 new file mode 100644 index 0000000..6e9489f --- /dev/null +++ b/tests/functional/add-mons/container/hosts-2 @@ -0,0 +1,6 @@ +[mons] +mon0 +mon1 + +[osds] +osd0 diff --git a/tests/functional/add-mons/container/vagrant_variables.yml b/tests/functional/add-mons/container/vagrant_variables.yml new file mode 100644 index 0000000..abfce55 --- /dev/null +++ b/tests/functional/add-mons/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 2 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.55 +cluster_subnet: 192.168.56 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-mons/group_vars/all b/tests/functional/add-mons/group_vars/all new file mode 100644 index 0000000..77e5d0a --- /dev/null +++ b/tests/functional/add-mons/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.53.0/24" +cluster_network: "192.168.54.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False diff --git a/tests/functional/add-mons/hosts b/tests/functional/add-mons/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-mons/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-mons/hosts-2 b/tests/functional/add-mons/hosts-2 new file mode 100644 index 0000000..6e9489f --- /dev/null +++ b/tests/functional/add-mons/hosts-2 @@ -0,0 +1,6 @@ +[mons] +mon0 +mon1 + +[osds] +osd0 diff --git a/tests/functional/add-mons/vagrant_variables.yml b/tests/functional/add-mons/vagrant_variables.yml new file mode 100644 index 0000000..02e7f44 --- /dev/null +++ b/tests/functional/add-mons/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 2 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.53 +cluster_subnet: 192.168.54 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-osds/Vagrantfile b/tests/functional/add-osds/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-osds/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-osds/ceph-override.json b/tests/functional/add-osds/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-osds/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-osds/container/Vagrantfile b/tests/functional/add-osds/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-osds/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-osds/container/ceph-override.json b/tests/functional/add-osds/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-osds/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-osds/container/group_vars/all b/tests/functional/add-osds/container/group_vars/all new file mode 100644 index 0000000..30d9631 --- /dev/null +++ b/tests/functional/add-osds/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.55.0/24" +cluster_network: "192.168.56.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-osds/container/hosts b/tests/functional/add-osds/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-osds/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-osds/container/hosts-2 b/tests/functional/add-osds/container/hosts-2 new file mode 100644 index 0000000..073437d --- /dev/null +++ b/tests/functional/add-osds/container/hosts-2 @@ -0,0 +1,6 @@ +[mons] +mon0 + +[osds] +osd0 +osd1 diff --git a/tests/functional/add-osds/container/vagrant_variables.yml b/tests/functional/add-osds/container/vagrant_variables.yml new file mode 100644 index 0000000..9001b97 --- /dev/null +++ b/tests/functional/add-osds/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.55 +cluster_subnet: 192.168.56 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-osds/group_vars/all b/tests/functional/add-osds/group_vars/all new file mode 100644 index 0000000..2b6e2b8 --- /dev/null +++ b/tests/functional/add-osds/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.53.0/24" +cluster_network: "192.168.54.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/add-osds/hosts b/tests/functional/add-osds/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-osds/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-osds/hosts-2 b/tests/functional/add-osds/hosts-2 new file mode 100644 index 0000000..073437d --- /dev/null +++ b/tests/functional/add-osds/hosts-2 @@ -0,0 +1,6 @@ +[mons] +mon0 + +[osds] +osd0 +osd1 diff --git a/tests/functional/add-osds/vagrant_variables.yml b/tests/functional/add-osds/vagrant_variables.yml new file mode 100644 index 0000000..f18c4db --- /dev/null +++ b/tests/functional/add-osds/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.53 +cluster_subnet: 192.168.54 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-rbdmirrors/Vagrantfile b/tests/functional/add-rbdmirrors/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-rbdmirrors/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/ceph-override.json b/tests/functional/add-rbdmirrors/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-rbdmirrors/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/container/Vagrantfile b/tests/functional/add-rbdmirrors/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/container/ceph-override.json b/tests/functional/add-rbdmirrors/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/container/group_vars/all b/tests/functional/add-rbdmirrors/container/group_vars/all new file mode 100644 index 0000000..3326207 --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.67.0/24" +cluster_network: "192.168.68.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/container/hosts b/tests/functional/add-rbdmirrors/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-rbdmirrors/container/hosts-2 b/tests/functional/add-rbdmirrors/container/hosts-2 new file mode 100644 index 0000000..b87d739 --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rbdmirrors] +rbd-mirror0 diff --git a/tests/functional/add-rbdmirrors/container/vagrant_variables.yml b/tests/functional/add-rbdmirrors/container/vagrant_variables.yml new file mode 100644 index 0000000..55b4e13 --- /dev/null +++ b/tests/functional/add-rbdmirrors/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.67 +cluster_subnet: 192.168.68 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-rbdmirrors/group_vars/all b/tests/functional/add-rbdmirrors/group_vars/all new file mode 100644 index 0000000..6b6d1dc --- /dev/null +++ b/tests/functional/add-rbdmirrors/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.65.0/24" +cluster_network: "192.168.66.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/hosts b/tests/functional/add-rbdmirrors/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-rbdmirrors/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-rbdmirrors/hosts-2 b/tests/functional/add-rbdmirrors/hosts-2 new file mode 100644 index 0000000..b87d739 --- /dev/null +++ b/tests/functional/add-rbdmirrors/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rbdmirrors] +rbd-mirror0 diff --git a/tests/functional/add-rbdmirrors/vagrant_variables.yml b/tests/functional/add-rbdmirrors/vagrant_variables.yml new file mode 100644 index 0000000..b06b181 --- /dev/null +++ b/tests/functional/add-rbdmirrors/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.65 +cluster_subnet: 192.168.66 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-rgws/Vagrantfile b/tests/functional/add-rgws/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/add-rgws/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-rgws/ceph-override.json b/tests/functional/add-rgws/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/add-rgws/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-rgws/container/Vagrantfile b/tests/functional/add-rgws/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/add-rgws/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/add-rgws/container/ceph-override.json b/tests/functional/add-rgws/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/add-rgws/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/add-rgws/container/group_vars/all b/tests/functional/add-rgws/container/group_vars/all new file mode 100644 index 0000000..3f3e476 --- /dev/null +++ b/tests/functional/add-rgws/container/group_vars/all @@ -0,0 +1,33 @@ +--- +docker: True +ceph_origin: repository +ceph_repository: community +containerized_deployment: true +cluster: ceph +public_network: "192.168.71.0/24" +cluster_network: "192.168.72.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/add-rgws/container/hosts b/tests/functional/add-rgws/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-rgws/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-rgws/container/hosts-2 b/tests/functional/add-rgws/container/hosts-2 new file mode 100644 index 0000000..f05a2ad --- /dev/null +++ b/tests/functional/add-rgws/container/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rgws] +rgw0 diff --git a/tests/functional/add-rgws/container/vagrant_variables.yml b/tests/functional/add-rgws/container/vagrant_variables.yml new file mode 100644 index 0000000..d3ba42f --- /dev/null +++ b/tests/functional/add-rgws/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.71 +cluster_subnet: 192.168.72 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/add-rgws/group_vars/all b/tests/functional/add-rgws/group_vars/all new file mode 100644 index 0000000..2aae860 --- /dev/null +++ b/tests/functional/add-rgws/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.69.0/24" +cluster_network: "192.168.70.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/add-rgws/group_vars/rgws b/tests/functional/add-rgws/group_vars/rgws new file mode 100644 index 0000000..d9c09f8 --- /dev/null +++ b/tests/functional/add-rgws/group_vars/rgws @@ -0,0 +1,9 @@ +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/add-rgws/hosts b/tests/functional/add-rgws/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/add-rgws/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/add-rgws/hosts-2 b/tests/functional/add-rgws/hosts-2 new file mode 100644 index 0000000..f05a2ad --- /dev/null +++ b/tests/functional/add-rgws/hosts-2 @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rgws] +rgw0 diff --git a/tests/functional/add-rgws/vagrant_variables.yml b/tests/functional/add-rgws/vagrant_variables.yml new file mode 100644 index 0000000..e717ba1 --- /dev/null +++ b/tests/functional/add-rgws/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.69 +cluster_subnet: 192.168.70 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/all-in-one/Vagrantfile b/tests/functional/all-in-one/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/all-in-one/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all-in-one/ceph-override.json b/tests/functional/all-in-one/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/all-in-one/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/all-in-one/container/Vagrantfile b/tests/functional/all-in-one/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/all-in-one/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all-in-one/container/ceph-override.json b/tests/functional/all-in-one/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/all-in-one/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/all-in-one/container/group_vars/all b/tests/functional/all-in-one/container/group_vars/all new file mode 100644 index 0000000..851a2a1 --- /dev/null +++ b/tests/functional/all-in-one/container/group_vars/all @@ -0,0 +1,46 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +radosgw_num_instances: 2 +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.19.0/24" +cluster_network: "192.168.20.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +dashboard_enabled: false +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 + ec: + pg_num: 16 + type: ec + ec_profile: myecprofile + ec_k: 2 + ec_m: 1 +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/all-in-one/container/hosts b/tests/functional/all-in-one/container/hosts new file mode 100644 index 0000000..26b9e2f --- /dev/null +++ b/tests/functional/all-in-one/container/hosts @@ -0,0 +1,20 @@ +[mons] +osd0 +osd1 +osd2 + +[mgrs] +osd0 +osd1 +osd2 + +[osds] +osd0 +osd1 +osd2 + +[mdss] +osd0 + +[rgws] +osd0 diff --git a/tests/functional/all-in-one/container/vagrant_variables.yml b/tests/functional/all-in-one/container/vagrant_variables.yml new file mode 100644 index 0000000..af0ad49 --- /dev/null +++ b/tests/functional/all-in-one/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 0 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.19 +cluster_subnet: 192.168.20 + +# MEMORY +# set 1024 for CentOS +memory: 4096 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/all-in-one/group_vars/all b/tests/functional/all-in-one/group_vars/all new file mode 100644 index 0000000..e4967a8 --- /dev/null +++ b/tests/functional/all-in-one/group_vars/all @@ -0,0 +1,40 @@ +--- +containerized_deployment: False +ceph_origin: repository +ceph_repository: community +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +dashboard_enabled: False +public_network: "192.168.17.0/24" +cluster_network: "192.168.18.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 + ec: + pg_num: 16 + type: ec + ec_profile: myecprofile + ec_k: 2 + ec_m: 1 \ No newline at end of file diff --git a/tests/functional/all-in-one/hosts b/tests/functional/all-in-one/hosts new file mode 100644 index 0000000..26b9e2f --- /dev/null +++ b/tests/functional/all-in-one/hosts @@ -0,0 +1,20 @@ +[mons] +osd0 +osd1 +osd2 + +[mgrs] +osd0 +osd1 +osd2 + +[osds] +osd0 +osd1 +osd2 + +[mdss] +osd0 + +[rgws] +osd0 diff --git a/tests/functional/all-in-one/vagrant_variables.yml b/tests/functional/all-in-one/vagrant_variables.yml new file mode 100644 index 0000000..dafb2cd --- /dev/null +++ b/tests/functional/all-in-one/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 0 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.17 +cluster_subnet: 192.168.18 + +# MEMORY +# set 1024 for CentOS +memory: 8192 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/all_daemons/Vagrantfile b/tests/functional/all_daemons/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/all_daemons/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all_daemons/ceph-override.json b/tests/functional/all_daemons/ceph-override.json new file mode 100644 index 0000000..0a87e1a --- /dev/null +++ b/tests/functional/all_daemons/ceph-override.json @@ -0,0 +1,40 @@ +{ + "ceph_conf_overrides": { + "global": { + "auth_allow_insecure_global_id_reclaim": false, + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1, + "mon_allow_pool_size_one": true, + "mon_warn_on_pool_no_redundancy": false, + "mon_max_pg_per_osd": 300 + } + }, + "cephfs_pools": [ + { + "name": "cephfs_data", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 2, + "min_size": 0 + }, + { + "name": "cephfs_metadata", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 2, + "min_size": 0 + } + ], + "ceph_mon_docker_memory_limit": "2g", + "radosgw_num_instances": 2 +} diff --git a/tests/functional/all_daemons/container/Vagrantfile b/tests/functional/all_daemons/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/all_daemons/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all_daemons/container/ceph-override.json b/tests/functional/all_daemons/container/ceph-override.json new file mode 120000 index 0000000..772bdc5 --- /dev/null +++ b/tests/functional/all_daemons/container/ceph-override.json @@ -0,0 +1 @@ +../ceph-override.json \ No newline at end of file diff --git a/tests/functional/all_daemons/container/group_vars/all b/tests/functional/all_daemons/container/group_vars/all new file mode 100644 index 0000000..84b83fe --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/all @@ -0,0 +1,32 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.17.0/24" +cluster_network: "192.168.18.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +docker_pull_timeout: 600s +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/all_daemons/container/group_vars/clients b/tests/functional/all_daemons/container/group_vars/clients new file mode 100644 index 0000000..33add85 --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/clients @@ -0,0 +1,15 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" + size: 1 + application: "rbd" +test2: + name: "test2" + size: 1 + application: "rbd" +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/all_daemons/container/group_vars/iscsigws b/tests/functional/all_daemons/container/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/all_daemons/container/group_vars/mons b/tests/functional/all_daemons/container/group_vars/mons new file mode 100644 index 0000000..7b31aa9 --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: True +crush_rule_config: True +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/all_daemons/container/group_vars/osds b/tests/functional/all_daemons/container/group_vars/osds new file mode 100644 index 0000000..ad25b58 --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/osds @@ -0,0 +1,8 @@ +--- +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/all_daemons/container/group_vars/rgws b/tests/functional/all_daemons/container/group_vars/rgws new file mode 100644 index 0000000..639ade9 --- /dev/null +++ b/tests/functional/all_daemons/container/group_vars/rgws @@ -0,0 +1,8 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/all_daemons/container/hosts b/tests/functional/all_daemons/container/hosts new file mode 100644 index 0000000..51d488c --- /dev/null +++ b/tests/functional/all_daemons/container/hosts @@ -0,0 +1,33 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mgr0 + +[osds] +osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" +osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" +osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]" + +[mdss] +mds0 +mds1 +mds2 + +[rgws] +rgw0 + +#[nfss] +#nfs0 + +[clients] +client0 +client1 + +[rbdmirrors] +rbd-mirror0 + +[monitoring] +mon0 diff --git a/tests/functional/all_daemons/container/vagrant_variables.yml b/tests/functional/all_daemons/container/vagrant_variables.yml new file mode 100644 index 0000000..ba682d2 --- /dev/null +++ b/tests/functional/all_daemons/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 3 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 2 +mgr_vms: 1 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.17 +cluster_subnet: 192.168.18 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/all_daemons/group_vars/all b/tests/functional/all_daemons/group_vars/all new file mode 100644 index 0000000..ea16dac --- /dev/null +++ b/tests/functional/all_daemons/group_vars/all @@ -0,0 +1,25 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.110.0/24" +cluster_network: "192.168.220.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" +grafana_server_group_name: ceph_monitoring +dashboard_enabled: false diff --git a/tests/functional/all_daemons/group_vars/clients b/tests/functional/all_daemons/group_vars/clients new file mode 100644 index 0000000..4c37898 --- /dev/null +++ b/tests/functional/all_daemons/group_vars/clients @@ -0,0 +1,13 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/all_daemons/group_vars/iscsigws b/tests/functional/all_daemons/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/all_daemons/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/all_daemons/group_vars/mons b/tests/functional/all_daemons/group_vars/mons new file mode 100644 index 0000000..bc6941b --- /dev/null +++ b/tests/functional/all_daemons/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: true +crush_rule_config: true +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/all_daemons/group_vars/nfss b/tests/functional/all_daemons/group_vars/nfss new file mode 100644 index 0000000..826bdfe --- /dev/null +++ b/tests/functional/all_daemons/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: false +nfs_ganesha_dev: true +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/all_daemons/group_vars/osds b/tests/functional/all_daemons/group_vars/osds new file mode 100644 index 0000000..99c065e --- /dev/null +++ b/tests/functional/all_daemons/group_vars/osds @@ -0,0 +1,10 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/all_daemons/group_vars/rgws b/tests/functional/all_daemons/group_vars/rgws new file mode 100644 index 0000000..d9c09f8 --- /dev/null +++ b/tests/functional/all_daemons/group_vars/rgws @@ -0,0 +1,9 @@ +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/all_daemons/hosts b/tests/functional/all_daemons/hosts new file mode 100644 index 0000000..8e20197 --- /dev/null +++ b/tests/functional/all_daemons/hosts @@ -0,0 +1,33 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mgr0 + +[osds] +osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" +osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" +osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]" + +[mdss] +mds0 +mds1 +mds2 + +[rgws] +rgw0 + +[clients] +client0 +client1 + +#[nfss] +#nfs0 + +[rbdmirrors] +rbd-mirror0 + +[ceph_monitoring] +mon0 diff --git a/tests/functional/all_daemons/hosts-switch-to-containers b/tests/functional/all_daemons/hosts-switch-to-containers new file mode 100644 index 0000000..ceae369 --- /dev/null +++ b/tests/functional/all_daemons/hosts-switch-to-containers @@ -0,0 +1,27 @@ +[all:vars] +docker=True + +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mgr0 + +[osds] +osd0 + +[mdss] +mds0 +mds1 +mds2 + +[rgws] +rgw0 + +[clients] +client0 + +[monitoring] +mon0 \ No newline at end of file diff --git a/tests/functional/all_daemons/vagrant_variables.yml b/tests/functional/all_daemons/vagrant_variables.yml new file mode 100644 index 0000000..0f07c24 --- /dev/null +++ b/tests/functional/all_daemons/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 3 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 2 +mgr_vms: 1 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.110 +cluster_subnet: 192.168.220 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/all_daemons_ipv6/Vagrantfile b/tests/functional/all_daemons_ipv6/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/all_daemons_ipv6/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/ceph-override.json b/tests/functional/all_daemons_ipv6/ceph-override.json new file mode 100644 index 0000000..0a87e1a --- /dev/null +++ b/tests/functional/all_daemons_ipv6/ceph-override.json @@ -0,0 +1,40 @@ +{ + "ceph_conf_overrides": { + "global": { + "auth_allow_insecure_global_id_reclaim": false, + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1, + "mon_allow_pool_size_one": true, + "mon_warn_on_pool_no_redundancy": false, + "mon_max_pg_per_osd": 300 + } + }, + "cephfs_pools": [ + { + "name": "cephfs_data", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 2, + "min_size": 0 + }, + { + "name": "cephfs_metadata", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 2, + "min_size": 0 + } + ], + "ceph_mon_docker_memory_limit": "2g", + "radosgw_num_instances": 2 +} diff --git a/tests/functional/all_daemons_ipv6/container/Vagrantfile b/tests/functional/all_daemons_ipv6/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/container/ceph-override.json b/tests/functional/all_daemons_ipv6/container/ceph-override.json new file mode 120000 index 0000000..772bdc5 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/ceph-override.json @@ -0,0 +1 @@ +../ceph-override.json \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/all b/tests/functional/all_daemons_ipv6/container/group_vars/all new file mode 100644 index 0000000..e660b3a --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/all @@ -0,0 +1,33 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +ip_version: ipv6 +public_network: "fdec:f1fb:29cd:6940::/64" +cluster_network: "fdec:f1fb:29cd:7120::/64" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +docker_pull_timeout: 600s +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/clients b/tests/functional/all_daemons_ipv6/container/group_vars/clients new file mode 100644 index 0000000..ec0bb3e --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/clients @@ -0,0 +1,13 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws b/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/mons b/tests/functional/all_daemons_ipv6/container/group_vars/mons new file mode 100644 index 0000000..7b31aa9 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: True +crush_rule_config: True +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/osds b/tests/functional/all_daemons_ipv6/container/group_vars/osds new file mode 100644 index 0000000..ad25b58 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/osds @@ -0,0 +1,8 @@ +--- +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/rgws b/tests/functional/all_daemons_ipv6/container/group_vars/rgws new file mode 100644 index 0000000..639ade9 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/group_vars/rgws @@ -0,0 +1,8 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/all_daemons_ipv6/container/hosts b/tests/functional/all_daemons_ipv6/container/hosts new file mode 100644 index 0000000..51d488c --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/hosts @@ -0,0 +1,33 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mgr0 + +[osds] +osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" +osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" +osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]" + +[mdss] +mds0 +mds1 +mds2 + +[rgws] +rgw0 + +#[nfss] +#nfs0 + +[clients] +client0 +client1 + +[rbdmirrors] +rbd-mirror0 + +[monitoring] +mon0 diff --git a/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml new file mode 100644 index 0000000..b1ea278 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 3 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 2 +mgr_vms: 1 + +# SUBNETS TO USE FOR THE VMS +public_subnet: "fdec:f1fb:29cd:6940::" +cluster_subnet: "fdec:f1fb:29cd:7120::" + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/all_daemons_ipv6/group_vars/all b/tests/functional/all_daemons_ipv6/group_vars/all new file mode 100644 index 0000000..1748266 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/all @@ -0,0 +1,25 @@ +--- +ceph_origin: repository +ceph_repository: community +ip_version: ipv6 +public_network: "fdec:f1fb:29cd:6940::/64" +cluster_network: "fdec:f1fb:29cd:7120::/64" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" +grafana_server_group_name: ceph_monitoring diff --git a/tests/functional/all_daemons_ipv6/group_vars/clients b/tests/functional/all_daemons_ipv6/group_vars/clients new file mode 100644 index 0000000..4c37898 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/clients @@ -0,0 +1,13 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/all_daemons_ipv6/group_vars/iscsigws b/tests/functional/all_daemons_ipv6/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/all_daemons_ipv6/group_vars/mons b/tests/functional/all_daemons_ipv6/group_vars/mons new file mode 100644 index 0000000..f6ab9a5 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: True +crush_rule_config: True +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/group_vars/nfss b/tests/functional/all_daemons_ipv6/group_vars/nfss new file mode 100644 index 0000000..fc280e2 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: true +nfs_ganesha_dev: false +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/all_daemons_ipv6/group_vars/osds b/tests/functional/all_daemons_ipv6/group_vars/osds new file mode 100644 index 0000000..99c065e --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/osds @@ -0,0 +1,10 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/all_daemons_ipv6/group_vars/rgws b/tests/functional/all_daemons_ipv6/group_vars/rgws new file mode 100644 index 0000000..d9c09f8 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/group_vars/rgws @@ -0,0 +1,9 @@ +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/all_daemons_ipv6/hosts b/tests/functional/all_daemons_ipv6/hosts new file mode 100644 index 0000000..8e20197 --- /dev/null +++ b/tests/functional/all_daemons_ipv6/hosts @@ -0,0 +1,33 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mgr0 + +[osds] +osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" +osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" +osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]" + +[mdss] +mds0 +mds1 +mds2 + +[rgws] +rgw0 + +[clients] +client0 +client1 + +#[nfss] +#nfs0 + +[rbdmirrors] +rbd-mirror0 + +[ceph_monitoring] +mon0 diff --git a/tests/functional/all_daemons_ipv6/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/vagrant_variables.yml new file mode 100644 index 0000000..c6e168e --- /dev/null +++ b/tests/functional/all_daemons_ipv6/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 3 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 2 +mgr_vms: 1 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: "fdec:f1fb:29cd:6940::" +cluster_subnet: "fdec:f1fb:29cd:7120::" + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/cephadm/Vagrantfile b/tests/functional/cephadm/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/cephadm/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/cephadm/group_vars/all b/tests/functional/cephadm/group_vars/all new file mode 100644 index 0000000..5c957f5 --- /dev/null +++ b/tests/functional/cephadm/group_vars/all @@ -0,0 +1,8 @@ +--- +public_network: "192.168.30.0/24" +cluster_network: "192.168.31.0/24" +dashboard_admin_password: $sX!cD$rYU6qR^B! +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +containerized_deployment: true diff --git a/tests/functional/cephadm/hosts b/tests/functional/cephadm/hosts new file mode 100644 index 0000000..28a105b --- /dev/null +++ b/tests/functional/cephadm/hosts @@ -0,0 +1,28 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mon0 +mon1 +mon2 + +[osds] +osd0 +osd1 + +[mdss] +mds0 + +[rgws] +rgw0 + +[nfss] +nfs0 + +[rbdmirrors] +rbd-mirror0 + +[monitoring] +mon0 diff --git a/tests/functional/cephadm/vagrant_variables.yml b/tests/functional/cephadm/vagrant_variables.yml new file mode 100644 index 0000000..c9ef656 --- /dev/null +++ b/tests/functional/cephadm/vagrant_variables.yml @@ -0,0 +1,32 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 2 +mds_vms: 1 +rgw_vms: 1 +nfs_vms: 1 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.30 +cluster_subnet: 192.168.31 + +# MEMORY +# set 1024 for CentOS +memory: 2048 + +vagrant_box: centos/stream9 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true diff --git a/tests/functional/collect-logs.yml b/tests/functional/collect-logs.yml new file mode 100644 index 0000000..72591be --- /dev/null +++ b/tests/functional/collect-logs.yml @@ -0,0 +1,91 @@ +--- +- name: Collect logs + hosts: all + become: true + tasks: + - name: Import_role ceph-defaults + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import_role ceph-facts + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Import set_radosgw_address.yml + ansible.builtin.import_role: + name: ceph-facts + tasks_from: set_radosgw_address.yml + + - name: Set_fact ceph_cmd + ansible.builtin.set_fact: + ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" + + - name: Get some ceph status outputs + ansible.builtin.command: "{{ ceph_cmd }} --connect-timeout 10 --cluster {{ cluster }} {{ item }}" + register: ceph_status + run_once: true + delegate_to: mon0 + failed_when: false + changed_when: false + with_items: + - "-s -f json-pretty" + - "osd tree" + - "osd dump" + - "pg dump" + - "versions" + - "health detail -f json-pretty" + + - name: Save ceph status to file + ansible.builtin.copy: + content: "{{ item.stdout }}" + dest: "{{ archive_path }}/{{ item.item | regex_replace(' ', '_') }}.log" + delegate_to: localhost + run_once: true + with_items: "{{ ceph_status.results }}" + + - name: Get ceph-exporter logs # noqa: ignore-errors + ansible.builtin.shell: journalctl -l -u ceph-exporter@{{ ansible_facts['hostname'] }} | tee -a /var/log/ceph/ceph-exporter.{{ ansible_facts['hostname'] }}.log + changed_when: false + ignore_errors: true + + - name: Get mon log + ansible.builtin.shell: journalctl -l -u ceph-mon@{{ ansible_facts['hostname'] }} > /var/log/ceph/ceph-mon.{{ ansible_facts['hostname'] }}.log + changed_when: false + when: inventory_hostname in groups.get(mon_group_name, []) + + - name: Get mds log + ansible.builtin.shell: journalctl -l -u ceph-mds@{{ ansible_facts['hostname'] }} > /var/log/ceph/ceph-mon.{{ ansible_facts['hostname'] }}.log + changed_when: false + when: inventory_hostname in groups.get(mds_group_name, []) + + - name: Get mgr log + ansible.builtin.shell: journalctl -l -u ceph-mgr@{{ ansible_facts['hostname'] }} > /var/log/ceph/ceph-mgr.{{ ansible_facts['hostname'] }}.log + changed_when: false + when: + - inventory_hostname in groups.get(mgr_group_name, []) + or + (groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, [])) + + - name: Get rgw log + ansible.builtin.shell: journalctl -l -u ceph-radosgw@rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log + changed_when: false + with_items: "{{ rgw_instances | default([]) }}" + when: inventory_hostname in groups.get(rgw_group_name, []) + + - name: Find ceph config file and logs + ansible.builtin.find: + paths: + - /etc/ceph + - /var/log/ceph + patterns: + - "*.conf" + - "*.log" + register: results + + - name: Collect ceph config file and logs + ansible.builtin.fetch: + src: "{{ item.path }}" + dest: "{{ archive_path }}/{{ inventory_hostname }}/" + flat: true + with_items: "{{ results.files }}" diff --git a/tests/functional/collocation/Vagrantfile b/tests/functional/collocation/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/collocation/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/collocation/ceph-override.json b/tests/functional/collocation/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/collocation/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/collocation/container/Vagrantfile b/tests/functional/collocation/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/collocation/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/collocation/container/ceph-override.json b/tests/functional/collocation/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/collocation/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/collocation/container/group_vars/all b/tests/functional/collocation/container/group_vars/all new file mode 100644 index 0000000..d2ad046 --- /dev/null +++ b/tests/functional/collocation/container/group_vars/all @@ -0,0 +1,33 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +radosgw_num_instances: 2 +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.15.0/24" +cluster_network: "192.168.16.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +dashboard_admin_password: $sX!cD$rYU6qR^B! +dashboard_admin_user_ro: true +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" \ No newline at end of file diff --git a/tests/functional/collocation/container/group_vars/clients b/tests/functional/collocation/container/group_vars/clients new file mode 100644 index 0000000..393d396 --- /dev/null +++ b/tests/functional/collocation/container/group_vars/clients @@ -0,0 +1,11 @@ +--- +user_config: True +test: + name: "test" + rule_name: "HDD" +test2: + name: "test2" + rule_name: "HDD" +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/collocation/container/group_vars/osds b/tests/functional/collocation/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/collocation/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/collocation/container/group_vars/rgws b/tests/functional/collocation/container/group_vars/rgws new file mode 100644 index 0000000..bcd5cc3 --- /dev/null +++ b/tests/functional/collocation/container/group_vars/rgws @@ -0,0 +1,7 @@ +--- +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/collocation/container/hosts b/tests/functional/collocation/container/hosts new file mode 100644 index 0000000..a699db8 --- /dev/null +++ b/tests/functional/collocation/container/hosts @@ -0,0 +1,27 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 +osd1 + +[mdss] +mds0 +rgw0 + +[rgws] +rgw0 +mds0 + +[rbdmirrors] +rgw0 +mds0 + +#[nfss] +#rgw0 +#mds0 + +[monitoring] +mon0 \ No newline at end of file diff --git a/tests/functional/collocation/container/vagrant_variables.yml b/tests/functional/collocation/container/vagrant_variables.yml new file mode 100644 index 0000000..4746c40 --- /dev/null +++ b/tests/functional/collocation/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 2 +mds_vms: 1 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.15 +cluster_subnet: 192.168.16 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/collocation/group_vars/all b/tests/functional/collocation/group_vars/all new file mode 100644 index 0000000..8e4061e --- /dev/null +++ b/tests/functional/collocation/group_vars/all @@ -0,0 +1,28 @@ +--- +containerized_deployment: False +ceph_origin: repository +ceph_repository: community +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.15.0/24" +cluster_network: "192.168.16.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +dashboard_admin_password: $sX!cD$rYU6qR^B! +dashboard_admin_user_ro: true +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" \ No newline at end of file diff --git a/tests/functional/collocation/group_vars/clients b/tests/functional/collocation/group_vars/clients new file mode 100644 index 0000000..393d396 --- /dev/null +++ b/tests/functional/collocation/group_vars/clients @@ -0,0 +1,11 @@ +--- +user_config: True +test: + name: "test" + rule_name: "HDD" +test2: + name: "test2" + rule_name: "HDD" +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/collocation/group_vars/osds b/tests/functional/collocation/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/collocation/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/collocation/group_vars/rgws b/tests/functional/collocation/group_vars/rgws new file mode 100644 index 0000000..bcd5cc3 --- /dev/null +++ b/tests/functional/collocation/group_vars/rgws @@ -0,0 +1,7 @@ +--- +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/collocation/hosts b/tests/functional/collocation/hosts new file mode 100644 index 0000000..95b228e --- /dev/null +++ b/tests/functional/collocation/hosts @@ -0,0 +1,28 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 +osd1 + +[mdss] +mds0 +rgw0 + +[rgws] +osd0 +rgw0 +mds0 + +[rbdmirrors] +rgw0 +mds0 + +#[nfss] +#rgw0 +#mds0 + +[monitoring] +mon0 \ No newline at end of file diff --git a/tests/functional/collocation/vagrant_variables.yml b/tests/functional/collocation/vagrant_variables.yml new file mode 100644 index 0000000..d62442d --- /dev/null +++ b/tests/functional/collocation/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 2 +mds_vms: 1 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.15 +cluster_subnet: 192.168.16 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/dev_setup.yml b/tests/functional/dev_setup.yml new file mode 100644 index 0000000..fa85231 --- /dev/null +++ b/tests/functional/dev_setup.yml @@ -0,0 +1,38 @@ +--- +- hosts: localhost + gather_facts: false + become: no + tags: vagrant_setup + tasks: + + + - block: + - name: set_fact group_vars_path + set_fact: + group_vars_path: "{{ change_dir + '/inventory/group_vars' if 'external_clients' in change_dir.split('/') else change_dir + '/group_vars' }}" + + - block: + - name: change ceph_repository to 'dev' + replace: + regexp: "ceph_repository:.*" + replace: "ceph_repository: dev" + dest: "{{ group_vars_path }}/all" + + - block: + - name: ensure nfs_ganesha_stable is set to False + replace: + regexp: "nfs_ganesha_stable:.*" + replace: "nfs_ganesha_stable: false" + dest: "{{ group_vars_path }}/nfss" + + - name: ensure nfs_ganesha_dev is set to True + replace: + regexp: "nfs_ganesha_dev:.*" + replace: "nfs_ganesha_dev: true" + dest: "{{ group_vars_path }}/nfss" + when: "'all_daemons' in group_vars_path.split('/')" + when: change_dir is defined + + - name: print contents of {{ group_vars_path }}/all + command: "cat {{ group_vars_path }}/all" + when: dev_setup diff --git a/tests/functional/docker2podman/Vagrantfile b/tests/functional/docker2podman/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/docker2podman/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/docker2podman/ceph-override.json b/tests/functional/docker2podman/ceph-override.json new file mode 100644 index 0000000..45b050d --- /dev/null +++ b/tests/functional/docker2podman/ceph-override.json @@ -0,0 +1,37 @@ +{ + "ceph_conf_overrides": { + "global": { + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1, + "mon_allow_pool_size_one": true, + "mon_warn_on_pool_no_redundancy": false + } + }, + "cephfs_pools": [ + { + "name": "cephfs_data", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 3, + "min_size": 0 + }, + { + "name": "cephfs_metadata", + "pg_num": 8, + "pgp_num": 8, + "rule_name": "replicated_rule", + "type": 1, + "erasure_profile": "", + "expected_num_objects": "", + "application": "cephfs", + "size": 3, + "min_size": 0 + } + ], + "ceph_mon_docker_memory_limit": "2g" +} diff --git a/tests/functional/docker2podman/group_vars/all b/tests/functional/docker2podman/group_vars/all new file mode 100644 index 0000000..1d2d2c7 --- /dev/null +++ b/tests/functional/docker2podman/group_vars/all @@ -0,0 +1,29 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True +container_binary: docker + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.58.0/24" +cluster_network: "192.168.59.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/docker2podman/group_vars/clients b/tests/functional/docker2podman/group_vars/clients new file mode 100644 index 0000000..d1d7677 --- /dev/null +++ b/tests/functional/docker2podman/group_vars/clients @@ -0,0 +1,12 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" +test2: + name: "test2" + rule_name: "HDD" +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/docker2podman/group_vars/iscsigws b/tests/functional/docker2podman/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/docker2podman/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/docker2podman/group_vars/mons b/tests/functional/docker2podman/group_vars/mons new file mode 100644 index 0000000..f7f3792 --- /dev/null +++ b/tests/functional/docker2podman/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: False +crush_rule_config: False +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/docker2podman/group_vars/osds b/tests/functional/docker2podman/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/docker2podman/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/docker2podman/group_vars/rgws b/tests/functional/docker2podman/group_vars/rgws new file mode 100644 index 0000000..18a2be6 --- /dev/null +++ b/tests/functional/docker2podman/group_vars/rgws @@ -0,0 +1,7 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + bar: + pg_num: 16 diff --git a/tests/functional/docker2podman/hosts b/tests/functional/docker2podman/hosts new file mode 100644 index 0000000..0b3fc09 --- /dev/null +++ b/tests/functional/docker2podman/hosts @@ -0,0 +1,11 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 + +[monitoring] +mon0 \ No newline at end of file diff --git a/tests/functional/docker2podman/vagrant_variables.yml b/tests/functional/docker2podman/vagrant_variables.yml new file mode 100644 index 0000000..854bb68 --- /dev/null +++ b/tests/functional/docker2podman/vagrant_variables.yml @@ -0,0 +1,32 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.58 +cluster_subnet: 192.168.59 + +# MEMORY +# set 1024 for CentOS +memory: 2048 + +vagrant_box: centos/stream9 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true diff --git a/tests/functional/external_clients/Vagrantfile b/tests/functional/external_clients/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/external_clients/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/external_clients/ceph-override.json b/tests/functional/external_clients/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/external_clients/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/external_clients/container/Vagrantfile b/tests/functional/external_clients/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/external_clients/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/external_clients/container/ceph-override.json b/tests/functional/external_clients/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/external_clients/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/external_clients/container/inventory/external_clients-hosts b/tests/functional/external_clients/container/inventory/external_clients-hosts new file mode 100644 index 0000000..5378903 --- /dev/null +++ b/tests/functional/external_clients/container/inventory/external_clients-hosts @@ -0,0 +1,3 @@ +[clients] +client0 +client1 \ No newline at end of file diff --git a/tests/functional/external_clients/container/inventory/group_vars/all b/tests/functional/external_clients/container/inventory/group_vars/all new file mode 100644 index 0000000..77dd9fc --- /dev/null +++ b/tests/functional/external_clients/container/inventory/group_vars/all @@ -0,0 +1,40 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +radosgw_num_instances: 2 +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.31.0/24" +cluster_network: "192.168.32.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +dashboard_enabled: false +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +rgw_create_pools: + foo: + pg_num: 16 + bar: + pg_num: 16 +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +fsid: 40358a87-ab6e-4bdc-83db-1d909147861c +generate_fsid: false +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/external_clients/container/inventory/group_vars/clients b/tests/functional/external_clients/container/inventory/group_vars/clients new file mode 100644 index 0000000..fff928d --- /dev/null +++ b/tests/functional/external_clients/container/inventory/group_vars/clients @@ -0,0 +1,10 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" +test2: + name: "test2" +pools: + - "{{ test }}" + - "{{ test2 }}" \ No newline at end of file diff --git a/tests/functional/external_clients/container/inventory/hosts b/tests/functional/external_clients/container/inventory/hosts new file mode 100644 index 0000000..a63f471 --- /dev/null +++ b/tests/functional/external_clients/container/inventory/hosts @@ -0,0 +1,7 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mon0 diff --git a/tests/functional/external_clients/container/vagrant_variables.yml b/tests/functional/external_clients/container/vagrant_variables.yml new file mode 100644 index 0000000..74930c0 --- /dev/null +++ b/tests/functional/external_clients/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 0 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 2 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.31 +cluster_subnet: 192.168.32 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/external_clients/inventory/external_clients-hosts b/tests/functional/external_clients/inventory/external_clients-hosts new file mode 100644 index 0000000..5378903 --- /dev/null +++ b/tests/functional/external_clients/inventory/external_clients-hosts @@ -0,0 +1,3 @@ +[clients] +client0 +client1 \ No newline at end of file diff --git a/tests/functional/external_clients/inventory/group_vars/all b/tests/functional/external_clients/inventory/group_vars/all new file mode 100644 index 0000000..29c0ed4 --- /dev/null +++ b/tests/functional/external_clients/inventory/group_vars/all @@ -0,0 +1,34 @@ +--- +containerized_deployment: False +ceph_origin: repository +ceph_repository: community +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +dashboard_enabled: False +public_network: "192.168.31.0/24" +cluster_network: "192.168.32.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +rgw_create_pools: + foo: + pg_num: 16 + bar: + pg_num: 16 +fsid: 40358a87-ab6e-4bdc-83db-1d909147861c +generate_fsid: false \ No newline at end of file diff --git a/tests/functional/external_clients/inventory/group_vars/clients b/tests/functional/external_clients/inventory/group_vars/clients new file mode 100644 index 0000000..fff928d --- /dev/null +++ b/tests/functional/external_clients/inventory/group_vars/clients @@ -0,0 +1,10 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" +test2: + name: "test2" +pools: + - "{{ test }}" + - "{{ test2 }}" \ No newline at end of file diff --git a/tests/functional/external_clients/inventory/hosts b/tests/functional/external_clients/inventory/hosts new file mode 100644 index 0000000..a63f471 --- /dev/null +++ b/tests/functional/external_clients/inventory/hosts @@ -0,0 +1,7 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mon0 diff --git a/tests/functional/external_clients/vagrant_variables.yml b/tests/functional/external_clients/vagrant_variables.yml new file mode 100644 index 0000000..935b5ae --- /dev/null +++ b/tests/functional/external_clients/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 0 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 2 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.31 +cluster_subnet: 192.168.32 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/external_clients_admin_key.yml b/tests/functional/external_clients_admin_key.yml new file mode 100644 index 0000000..5d1e5c5 --- /dev/null +++ b/tests/functional/external_clients_admin_key.yml @@ -0,0 +1,27 @@ +--- +- hosts: clients + gather_facts: false + become: yes + tasks: + + - name: get keys from monitors + command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin" + register: _key + delegate_to: "{{ groups.get('mons')[0] }}" + run_once: true + + - name: create /etc/ceph + file: + path: /etc/ceph + state: directory + owner: 167 + group: 167 + mode: "0755" + + - name: copy ceph key(s) if needed + copy: + dest: "/etc/ceph/ceph.client.admin.keyring" + content: "{{ _key.stdout + '\n' }}" + owner: 167 + group: 167 + mode: "0600" diff --git a/tests/functional/infra_lv_create/Vagrantfile b/tests/functional/infra_lv_create/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/infra_lv_create/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/infra_lv_create/group_vars/all b/tests/functional/infra_lv_create/group_vars/all new file mode 100644 index 0000000..06d56b9 --- /dev/null +++ b/tests/functional/infra_lv_create/group_vars/all @@ -0,0 +1,33 @@ +--- + +logfile_path: ./lv-create.log +# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case. +nvme_device: /dev/sdb + +# Path of hdd devices designated for LV creation. +hdd_devices: + - /dev/sdc + +journal_size: 1024 + +# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others +nvme_device_lvs: + - lv_name: "ceph-bucket-index-1" + size: 100%FREE + journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}" + +## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ## + +# all hdd's have to be the same size and the LVs on them are dedicated for OSD data +hdd_lv_size: 100%FREE + +# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book. +# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs. +nvme_device_basename: "{{ nvme_device.split('/')[-1] }}" + +# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device". +nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}" + +hdd_vg_prefix: "ceph-hdd-vg" +hdd_lv_prefix: "ceph-hdd-lv" +hdd_journal_prefix: "ceph-journal" diff --git a/tests/functional/infra_lv_create/hosts b/tests/functional/infra_lv_create/hosts new file mode 100644 index 0000000..02b4c16 --- /dev/null +++ b/tests/functional/infra_lv_create/hosts @@ -0,0 +1,2 @@ +[osds] +osd0 diff --git a/tests/functional/infra_lv_create/vagrant_variables.yml b/tests/functional/infra_lv_create/vagrant_variables.yml new file mode 100644 index 0000000..6a1aaba --- /dev/null +++ b/tests/functional/infra_lv_create/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 0 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-auto-discovery/Vagrantfile b/tests/functional/lvm-auto-discovery/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/lvm-auto-discovery/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/ceph-override.json b/tests/functional/lvm-auto-discovery/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/lvm-auto-discovery/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/container/Vagrantfile b/tests/functional/lvm-auto-discovery/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/lvm-auto-discovery/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/container/ceph-override.json b/tests/functional/lvm-auto-discovery/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/lvm-auto-discovery/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/container/group_vars/all b/tests/functional/lvm-auto-discovery/container/group_vars/all new file mode 100644 index 0000000..102399f --- /dev/null +++ b/tests/functional/lvm-auto-discovery/container/group_vars/all @@ -0,0 +1,31 @@ +--- + +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +crush_device_class: test +copy_admin_key: true +osd_auto_discovery: true +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 diff --git a/tests/functional/lvm-auto-discovery/container/hosts b/tests/functional/lvm-auto-discovery/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/lvm-auto-discovery/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml b/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml new file mode 100644 index 0000000..0aae459 --- /dev/null +++ b/tests/functional/lvm-auto-discovery/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 2048 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-auto-discovery/group_vars/all b/tests/functional/lvm-auto-discovery/group_vars/all new file mode 100644 index 0000000..157cb79 --- /dev/null +++ b/tests/functional/lvm-auto-discovery/group_vars/all @@ -0,0 +1,22 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +osd_objectstore: "bluestore" +crush_device_class: test +copy_admin_key: true +osd_auto_discovery: true +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/hosts b/tests/functional/lvm-auto-discovery/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/lvm-auto-discovery/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/lvm-auto-discovery/vagrant_variables.yml b/tests/functional/lvm-auto-discovery/vagrant_variables.yml new file mode 100644 index 0000000..48d5f1c --- /dev/null +++ b/tests/functional/lvm-auto-discovery/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-batch/Vagrantfile b/tests/functional/lvm-batch/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/lvm-batch/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-batch/ceph-override.json b/tests/functional/lvm-batch/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/lvm-batch/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-batch/container/Vagrantfile b/tests/functional/lvm-batch/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/lvm-batch/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-batch/container/ceph-override.json b/tests/functional/lvm-batch/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/lvm-batch/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-batch/container/group_vars/all b/tests/functional/lvm-batch/container/group_vars/all new file mode 100644 index 0000000..27b18ea --- /dev/null +++ b/tests/functional/lvm-batch/container/group_vars/all @@ -0,0 +1,31 @@ +--- + +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +crush_device_class: test +copy_admin_key: true +devices: + - /dev/sdb + - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00003 +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/lvm-batch/container/hosts b/tests/functional/lvm-batch/container/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/lvm-batch/container/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/lvm-batch/container/vagrant_variables.yml b/tests/functional/lvm-batch/container/vagrant_variables.yml new file mode 100644 index 0000000..aeb0d2b --- /dev/null +++ b/tests/functional/lvm-batch/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-batch/group_vars/all b/tests/functional/lvm-batch/group_vars/all new file mode 100644 index 0000000..22c46d1 --- /dev/null +++ b/tests/functional/lvm-batch/group_vars/all @@ -0,0 +1,23 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +crush_device_class: test +copy_admin_key: true +devices: + - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002 + - /dev/sdc +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 \ No newline at end of file diff --git a/tests/functional/lvm-batch/hosts b/tests/functional/lvm-batch/hosts new file mode 100644 index 0000000..f6a265a --- /dev/null +++ b/tests/functional/lvm-batch/hosts @@ -0,0 +1,5 @@ +[mons] +mon0 + +[osds] +osd0 diff --git a/tests/functional/lvm-batch/vagrant_variables.yml b/tests/functional/lvm-batch/vagrant_variables.yml new file mode 100644 index 0000000..48d5f1c --- /dev/null +++ b/tests/functional/lvm-batch/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-osds/Vagrantfile b/tests/functional/lvm-osds/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/lvm-osds/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-osds/ceph-override.json b/tests/functional/lvm-osds/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/lvm-osds/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-osds/container/Vagrantfile b/tests/functional/lvm-osds/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/lvm-osds/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/lvm-osds/container/ceph-override.json b/tests/functional/lvm-osds/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/lvm-osds/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/lvm-osds/container/group_vars/all b/tests/functional/lvm-osds/container/group_vars/all new file mode 100644 index 0000000..4089f38 --- /dev/null +++ b/tests/functional/lvm-osds/container/group_vars/all @@ -0,0 +1,26 @@ +--- + +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +ceph_origin: repository +ceph_repository: community +public_network: "192.168.33.0/24" +cluster_network: "192.168.34.0/24" +copy_admin_key: true +containerized_deployment: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 diff --git a/tests/functional/lvm-osds/container/hosts b/tests/functional/lvm-osds/container/hosts new file mode 100644 index 0000000..98db36a --- /dev/null +++ b/tests/functional/lvm-osds/container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true +osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true diff --git a/tests/functional/lvm-osds/container/vagrant_variables.yml b/tests/functional/lvm-osds/container/vagrant_variables.yml new file mode 100644 index 0000000..65b49dd --- /dev/null +++ b/tests/functional/lvm-osds/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 4 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.33 +cluster_subnet: 192.168.34 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm-osds/group_vars/all b/tests/functional/lvm-osds/group_vars/all new file mode 100644 index 0000000..c39cfd4 --- /dev/null +++ b/tests/functional/lvm-osds/group_vars/all @@ -0,0 +1,19 @@ +--- + +ceph_origin: repository +ceph_repository: community +public_network: "192.168.39.0/24" +cluster_network: "192.168.40.0/24" +copy_admin_key: true +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 + diff --git a/tests/functional/lvm-osds/hosts b/tests/functional/lvm-osds/hosts new file mode 100644 index 0000000..98db36a --- /dev/null +++ b/tests/functional/lvm-osds/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd1 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true +osd2 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd3 lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true diff --git a/tests/functional/lvm-osds/vagrant_variables.yml b/tests/functional/lvm-osds/vagrant_variables.yml new file mode 100644 index 0000000..3dac6a0 --- /dev/null +++ b/tests/functional/lvm-osds/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 4 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.39 +cluster_subnet: 192.168.40 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/lvm_setup.yml b/tests/functional/lvm_setup.yml new file mode 100644 index 0000000..94149d9 --- /dev/null +++ b/tests/functional/lvm_setup.yml @@ -0,0 +1,83 @@ +--- +- hosts: all + gather_facts: false + become: yes + tasks: + - import_tasks: ../../raw_install_python.yml + +- hosts: osds + gather_facts: false + become: yes + tasks: + - name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + tags: always + + - name: set_fact is_atomic + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + tags: always + + # Some images may not have lvm2 installed + - name: install lvm2 + package: + name: lvm2 + state: present + register: result + until: result is succeeded + when: not is_atomic | bool + + - name: create volume group + lvg: + vg: test_group + pvs: "{{ pv_devices[0] | default('/dev/sdb') }}" + + - name: create logical volume 1 + lvol: + vg: test_group + lv: data-lv1 + size: 50%FREE + shrink: false + + - name: create logical volume 2 + lvol: + vg: test_group + lv: data-lv2 + size: 100%FREE + shrink: false + + - name: partition "{{ pv_devices[1] | default('/dev/sdc') }}"for journals + parted: + device: "{{ pv_devices[1] | default('/dev/sdc') }}" + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + tags: partitions + + - name: partition "{{ pv_devices[1] | default('/dev/sdc') }}"for journals + parted: + device: "{{ pv_devices[1] | default('/dev/sdc') }}" + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + tags: partitions + + - name: create journals vg from "{{ pv_devices[1] | default('/dev/sdc') }}2" + lvg: + vg: journals + pvs: "{{ pv_devices[1] | default('/dev/sdc') }}2" + + - name: create journal1 lv + lvol: + vg: journals + lv: journal1 + size: 100%FREE + shrink: false diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile b/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all new file mode 100644 index 0000000..657e301 --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all @@ -0,0 +1,25 @@ +--- + +ceph_origin: repository +ceph_repository: community +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +journal_size: 100 +osd_objectstore: "bluestore" +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n \ No newline at end of file diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts b/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts new file mode 100644 index 0000000..157cccb --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/hosts @@ -0,0 +1,10 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml new file mode 100644 index 0000000..5eb82a8 --- /dev/null +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/tests/functional/podman/Vagrantfile b/tests/functional/podman/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/podman/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/podman/ceph-override.json b/tests/functional/podman/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/podman/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/podman/group_vars/all b/tests/functional/podman/group_vars/all new file mode 100644 index 0000000..04311e5 --- /dev/null +++ b/tests/functional/podman/group_vars/all @@ -0,0 +1,28 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.30.0/24" +cluster_network: "192.168.31.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/podman/group_vars/clients b/tests/functional/podman/group_vars/clients new file mode 100644 index 0000000..d1d7677 --- /dev/null +++ b/tests/functional/podman/group_vars/clients @@ -0,0 +1,12 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" +test2: + name: "test2" + rule_name: "HDD" +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/podman/group_vars/iscsigws b/tests/functional/podman/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/podman/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/podman/group_vars/mons b/tests/functional/podman/group_vars/mons new file mode 100644 index 0000000..7b31aa9 --- /dev/null +++ b/tests/functional/podman/group_vars/mons @@ -0,0 +1,11 @@ +--- +create_crush_tree: True +crush_rule_config: True +crush_rule_hdd: + name: HDD + root: default + type: host + class: hdd + default: true +crush_rules: + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/podman/group_vars/osds b/tests/functional/podman/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/podman/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/podman/group_vars/rgws b/tests/functional/podman/group_vars/rgws new file mode 100644 index 0000000..6b7abc9 --- /dev/null +++ b/tests/functional/podman/group_vars/rgws @@ -0,0 +1,7 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: diff --git a/tests/functional/podman/hosts b/tests/functional/podman/hosts new file mode 100644 index 0000000..dea6a9e --- /dev/null +++ b/tests/functional/podman/hosts @@ -0,0 +1,30 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }" +osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }" + +[mdss] +mds0 + +[rgws] +rgw0 + +#[nfss] +#nfs0 + +[clients] +client0 +client1 + +[rbdmirrors] +rbd-mirror0 + +[monitoring] +mon0 + +#[all:vars] +#ansible_python_interpreter=/usr/bin/python3 \ No newline at end of file diff --git a/tests/functional/podman/vagrant_variables.yml b/tests/functional/podman/vagrant_variables.yml new file mode 100644 index 0000000..d9b6333 --- /dev/null +++ b/tests/functional/podman/vagrant_variables.yml @@ -0,0 +1,32 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 2 +mds_vms: 1 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 2 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.30 +cluster_subnet: 192.168.31 + +# MEMORY +# set 1024 for CentOS +memory: 2048 + +vagrant_box: centos/stream9 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true diff --git a/tests/functional/rbd_map_devices.yml b/tests/functional/rbd_map_devices.yml new file mode 100644 index 0000000..47bfc29 --- /dev/null +++ b/tests/functional/rbd_map_devices.yml @@ -0,0 +1,58 @@ +--- +- hosts: client0 + gather_facts: false + become: yes + tasks: + - name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + tags: always + +# all our containerized job are based on atomic os, so we can rely on is_atomic to detect +# whether we are running a containerized job + - name: set_fact is_atomic + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + tags: always + + - name: load rbd module + modprobe: + name: rbd + state: present + delegate_to: "{{ item }}" + with_items: + - mon0 + - client0 + + - name: create an rbd image - non container + command: "rbd create --size=1024 test/rbd_test" + delegate_to: "mon0" + when: + - not is_atomic | bool + - not containerized_deployment | default(false) | bool + + - name: create an rbd image - container + command: "podman run --rm -v /etc/ceph:/etc/ceph --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} create --size=1024 test/rbd_test" + delegate_to: "mon0" + when: is_atomic | bool or containerized_deployment | default(false) | bool + + - name: non container + when: + - not is_atomic | bool + - not containerized_deployment | default(false) | bool + block: + - name: disable features unsupported by the kernel + command: rbd feature disable test/rbd_test object-map fast-diff deep-flatten + + - name: map a device + command: rbd map test/rbd_test + + - name: container + when: is_atomic | bool or containerized_deployment | default(false) | bool + block: + - name: disable features unsupported by the kernel + command: "podman run --rm -v /etc/ceph:/etc/ceph --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} feature disable test/rbd_test object-map fast-diff deep-flatten" + + - name: map a device + command: "podman run --rm --privileged -v /etc/ceph:/etc/ceph -v /dev:/dev --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} map test/rbd_test" diff --git a/tests/functional/rbdmirror.yml b/tests/functional/rbdmirror.yml new file mode 100644 index 0000000..d2f94c1 --- /dev/null +++ b/tests/functional/rbdmirror.yml @@ -0,0 +1,32 @@ +--- +- hosts: mon0 + gather_facts: True + become: True + tasks: + - name: import_role ceph-defaults + import_role: + name: ceph-defaults + + - name: import_role ceph-facts + include_role: + name: ceph-facts + tasks_from: "container_binary.yml" + + - name: set_fact ceph_cmd + set_fact: + rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}" + + - name: create an image in rbd mirrored pool + command: "{{ rbd_cmd }} create foo --size 1024 --pool {{ ceph_rbd_mirror_pool }} --image-feature exclusive-lock,journaling" + changed_when: false + tags: primary + + - name: check the image is replicated + command: "{{ rbd_cmd }} --pool {{ ceph_rbd_mirror_pool }} ls --format json" + register: rbd_ls + changed_when: false + tags: secondary + retries: 30 + delay: 1 + until: "'foo' in (rbd_ls.stdout | default('{}') | from_json)" + diff --git a/tests/functional/rbdmirror/Vagrantfile b/tests/functional/rbdmirror/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/rbdmirror/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/rbdmirror/container/Vagrantfile b/tests/functional/rbdmirror/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/rbdmirror/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/rbdmirror/container/group_vars/all b/tests/functional/rbdmirror/container/group_vars/all new file mode 100644 index 0000000..d429481 --- /dev/null +++ b/tests/functional/rbdmirror/container/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +containerized_deployment: true +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.144.0/24" +cluster_network: "192.168.145.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 512 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 diff --git a/tests/functional/rbdmirror/container/hosts b/tests/functional/rbdmirror/container/hosts new file mode 100644 index 0000000..fb02068 --- /dev/null +++ b/tests/functional/rbdmirror/container/hosts @@ -0,0 +1,11 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 + +[rbdmirrors] +osd0 diff --git a/tests/functional/rbdmirror/container/secondary/Vagrantfile b/tests/functional/rbdmirror/container/secondary/Vagrantfile new file mode 120000 index 0000000..dfd7436 --- /dev/null +++ b/tests/functional/rbdmirror/container/secondary/Vagrantfile @@ -0,0 +1 @@ +../../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/rbdmirror/container/secondary/group_vars/all b/tests/functional/rbdmirror/container/secondary/group_vars/all new file mode 100644 index 0000000..3d1d4be --- /dev/null +++ b/tests/functional/rbdmirror/container/secondary/group_vars/all @@ -0,0 +1,31 @@ +--- +docker: True +containerized_deployment: true +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.146.0/24" +cluster_network: "192.168.147.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 512 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 diff --git a/tests/functional/rbdmirror/container/secondary/hosts b/tests/functional/rbdmirror/container/secondary/hosts new file mode 100644 index 0000000..56d5ae7 --- /dev/null +++ b/tests/functional/rbdmirror/container/secondary/hosts @@ -0,0 +1,12 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 + +[rbdmirrors] +osd0 + diff --git a/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml b/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml new file mode 100644 index 0000000..d673eb2 --- /dev/null +++ b/tests/functional/rbdmirror/container/secondary/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.146 +cluster_subnet: 192.168.147 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/rbdmirror/container/vagrant_variables.yml b/tests/functional/rbdmirror/container/vagrant_variables.yml new file mode 100644 index 0000000..da84f1f --- /dev/null +++ b/tests/functional/rbdmirror/container/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: true + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.144 +cluster_subnet: 192.168.145 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/rbdmirror/group_vars/all b/tests/functional/rbdmirror/group_vars/all new file mode 100644 index 0000000..ef706ca --- /dev/null +++ b/tests/functional/rbdmirror/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.140.0/24" +cluster_network: "192.168.141.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 512 +dashboard_enabled: False diff --git a/tests/functional/rbdmirror/hosts b/tests/functional/rbdmirror/hosts new file mode 100644 index 0000000..56d5ae7 --- /dev/null +++ b/tests/functional/rbdmirror/hosts @@ -0,0 +1,12 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 + +[rbdmirrors] +osd0 + diff --git a/tests/functional/rbdmirror/secondary/Vagrantfile b/tests/functional/rbdmirror/secondary/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/rbdmirror/secondary/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/rbdmirror/secondary/group_vars/all b/tests/functional/rbdmirror/secondary/group_vars/all new file mode 100644 index 0000000..b5bf0ea --- /dev/null +++ b/tests/functional/rbdmirror/secondary/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +cluster: ceph +public_network: "192.168.142.0/24" +cluster_network: "192.168.143.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +journal_size: 100 +osd_objectstore: "bluestore" +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 512 +dashboard_enabled: False diff --git a/tests/functional/rbdmirror/secondary/hosts b/tests/functional/rbdmirror/secondary/hosts new file mode 100644 index 0000000..56d5ae7 --- /dev/null +++ b/tests/functional/rbdmirror/secondary/hosts @@ -0,0 +1,12 @@ +[mons] +mon0 + +[mgrs] +mon0 + +[osds] +osd0 + +[rbdmirrors] +osd0 + diff --git a/tests/functional/rbdmirror/secondary/vagrant_variables.yml b/tests/functional/rbdmirror/secondary/vagrant_variables.yml new file mode 100644 index 0000000..987318d --- /dev/null +++ b/tests/functional/rbdmirror/secondary/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.142 +cluster_subnet: 192.168.143 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/rbdmirror/vagrant_variables.yml b/tests/functional/rbdmirror/vagrant_variables.yml new file mode 100644 index 0000000..1e29ce5 --- /dev/null +++ b/tests/functional/rbdmirror/vagrant_variables.yml @@ -0,0 +1,70 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.140 +cluster_subnet: 192.168.141 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/reboot.yml b/tests/functional/reboot.yml new file mode 100644 index 0000000..e79cc13 --- /dev/null +++ b/tests/functional/reboot.yml @@ -0,0 +1,9 @@ +--- +- hosts: all + gather_facts: true + tasks: + - name: reboot the machines + reboot: + reboot_timeout: 180 + test_command: uptime + become: yes diff --git a/tests/functional/setup.yml b/tests/functional/setup.yml new file mode 100644 index 0000000..683e456 --- /dev/null +++ b/tests/functional/setup.yml @@ -0,0 +1,104 @@ +--- +- hosts: all + gather_facts: true + become: yes + tasks: + + - name: check if it is Atomic host + stat: path=/run/ostree-booted + register: stat_ostree + check_mode: no + + - name: set fact for using Atomic host + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + + # - name: List repo files + # find: + # paths: /etc/yum.repos.d/ + # file_type: file + # patterns: 'CentOS-Stream-*.repo' + # register: pre_stream_repo_files + # when: + # - ansible_facts['distribution'] == 'CentOS' + # - ansible_facts['distribution_major_version'] | int > 7 + # - not is_atomic | bool + # + # # From ansible docs: 'replace: If not set, matches are removed entirely.' + # - name: Remove all mirrorlists + # replace: + # path: "{{ item.path }}" + # regexp: '^mirrorlist=.*' + # with_items: "{{ pre_stream_repo_files.files }}" + # when: + # - ansible_facts['distribution'] == 'CentOS' + # - ansible_facts['distribution_major_version'] | int > 7 + # - not is_atomic | bool + # + # - name: Uncomment baseurls + # replace: + # path: "{{ item.path }}" + # regexp: '^mirrorlist=.*' + # regexp: '^\s*#*\s*(baseurl=.*)' + # replace: '\1' + # with_items: "{{ pre_stream_repo_files.files }}" + # when: + # - ansible_facts['distribution'] == 'CentOS' + # - ansible_facts['distribution_major_version'] | int > 7 + # - not is_atomic | bool + # + # - name: Point baseurls to archive server + # replace: + # path: "{{ item.path }}" + # regexp: 'mirror.centos.org/\$contentdir/\$stream' + # replace: 'apt-mirror.front.sepia.ceph.com/centos/8-stream' + # with_items: "{{ pre_stream_repo_files.files }}" + # when: + # - ansible_facts['distribution'] == 'CentOS' + # - ansible_facts['distribution_major_version'] | int > 7 + # - not is_atomic | bool + + - name: update the system on RHEL-based OS # noqa: package-latest + ansible.builtin.yum: + name: '*' + state: latest + register: yum_upgrade + when: ansible_facts['os_family'] == 'RedHat' + + - name: update the system on Debian-based OS # noqa: package-latest + ansible.builtin.apt: + name: '*' + state: latest + update_cache: true + when: ansible_facts['os_family'] == 'Debian' + + - name: get root mount information + set_fact: + rootmount: "{{ ansible_facts['mounts']|json_query('[?mount==`/`]|[0]') }}" + + # mount -o remount doesn't work on RHEL 8 for now + - name: add mount options to / + ansible.posix.mount: + path: '{{ rootmount.mount }}' + src: '{{ rootmount.device }}' + opts: "noatime,nodiratime,nobarrier" + fstype: '{{ rootmount.fstype }}' + state: mounted + + # we need to install this so the Socket testinfra module + # can use netcat for testing + - name: install net-tools + package: + name: net-tools + state: present + register: result + until: result is succeeded + when: not is_atomic | bool + + - name: Resize logical volume for root partition to fill remaining free space + lvol: + lv: root + vg: atomicos + size: +100%FREE + resizefs: yes + when: is_atomic | bool diff --git a/tests/functional/shrink_mds/Vagrantfile b/tests/functional/shrink_mds/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_mds/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mds/ceph-override.json b/tests/functional/shrink_mds/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_mds/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/Vagrantfile b/tests/functional/shrink_mds/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_mds/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/ceph-override.json b/tests/functional/shrink_mds/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_mds/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/group_vars/all b/tests/functional/shrink_mds/container/group_vars/all new file mode 100644 index 0000000..0cd22f6 --- /dev/null +++ b/tests/functional/shrink_mds/container/group_vars/all @@ -0,0 +1,19 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.79.0/24" +cluster_network: "192.168.80.0/24" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +copy_admin_key: True +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/group_vars/mons b/tests/functional/shrink_mds/container/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mds/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/group_vars/osds b/tests/functional/shrink_mds/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/shrink_mds/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mds/container/hosts b/tests/functional/shrink_mds/container/hosts new file mode 100644 index 0000000..b192b10 --- /dev/null +++ b/tests/functional/shrink_mds/container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mdss] +mds0 diff --git a/tests/functional/shrink_mds/container/vagrant_variables.yml b/tests/functional/shrink_mds/container/vagrant_variables.yml new file mode 100644 index 0000000..7c3b22b --- /dev/null +++ b/tests/functional/shrink_mds/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 1 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.79 +cluster_subnet: 192.168.80 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_mds/group_vars/all b/tests/functional/shrink_mds/group_vars/all new file mode 100644 index 0000000..b82aa45 --- /dev/null +++ b/tests/functional/shrink_mds/group_vars/all @@ -0,0 +1,15 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.77.0/24" +cluster_network: "192.168.78.0/24" +radosgw_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +copy_admin_key: true +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/shrink_mds/group_vars/mons b/tests/functional/shrink_mds/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mds/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mds/group_vars/osds b/tests/functional/shrink_mds/group_vars/osds new file mode 100644 index 0000000..3ec1d6e --- /dev/null +++ b/tests/functional/shrink_mds/group_vars/osds @@ -0,0 +1,11 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mds/hosts b/tests/functional/shrink_mds/hosts new file mode 100644 index 0000000..b192b10 --- /dev/null +++ b/tests/functional/shrink_mds/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mdss] +mds0 diff --git a/tests/functional/shrink_mds/vagrant_variables.yml b/tests/functional/shrink_mds/vagrant_variables.yml new file mode 100644 index 0000000..df4f9fa --- /dev/null +++ b/tests/functional/shrink_mds/vagrant_variables.yml @@ -0,0 +1,65 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 1 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.77 +cluster_subnet: 192.168.78 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For Xenial use disks: [ '/dev/sdb', '/dev/sdc' ] +# For CentOS7 use disks: [ '/dev/sda', '/dev/sdb' ] +disks: [ '/dev/sdb', '/dev/sdc' ] + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial or bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# openSUSE: opensuse/openSUSE-42.3-x86_64 +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +vagrant_sync_dir: /home/vagrant/sync +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# Debug mode, runs Ansible with -vvvv +debug: false diff --git a/tests/functional/shrink_mgr/Vagrantfile b/tests/functional/shrink_mgr/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_mgr/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mgr/ceph-override.json b/tests/functional/shrink_mgr/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_mgr/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/Vagrantfile b/tests/functional/shrink_mgr/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_mgr/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/ceph-override.json b/tests/functional/shrink_mgr/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_mgr/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/group_vars/all b/tests/functional/shrink_mgr/container/group_vars/all new file mode 100644 index 0000000..3bff880 --- /dev/null +++ b/tests/functional/shrink_mgr/container/group_vars/all @@ -0,0 +1,18 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.83.0/24" +cluster_network: "192.168.84.0/24" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/group_vars/mons b/tests/functional/shrink_mgr/container/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mgr/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/group_vars/osds b/tests/functional/shrink_mgr/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/shrink_mgr/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mgr/container/hosts b/tests/functional/shrink_mgr/container/hosts new file mode 100644 index 0000000..ae0f0f9 --- /dev/null +++ b/tests/functional/shrink_mgr/container/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mgr0 +mgr1 diff --git a/tests/functional/shrink_mgr/container/vagrant_variables.yml b/tests/functional/shrink_mgr/container/vagrant_variables.yml new file mode 100644 index 0000000..dd05994 --- /dev/null +++ b/tests/functional/shrink_mgr/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 2 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.83 +cluster_subnet: 192.168.84 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_mgr/group_vars/all b/tests/functional/shrink_mgr/group_vars/all new file mode 100644 index 0000000..05bd9ff --- /dev/null +++ b/tests/functional/shrink_mgr/group_vars/all @@ -0,0 +1,12 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.81.0/24" +cluster_network: "192.168.82.0/24" +radosgw_interface: eth1 +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False diff --git a/tests/functional/shrink_mgr/group_vars/mons b/tests/functional/shrink_mgr/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mgr/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mgr/group_vars/osds b/tests/functional/shrink_mgr/group_vars/osds new file mode 100644 index 0000000..3ec1d6e --- /dev/null +++ b/tests/functional/shrink_mgr/group_vars/osds @@ -0,0 +1,11 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mgr/hosts b/tests/functional/shrink_mgr/hosts new file mode 100644 index 0000000..ae0f0f9 --- /dev/null +++ b/tests/functional/shrink_mgr/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mgr0 +mgr1 diff --git a/tests/functional/shrink_mgr/vagrant_variables.yml b/tests/functional/shrink_mgr/vagrant_variables.yml new file mode 100644 index 0000000..4a9b1e6 --- /dev/null +++ b/tests/functional/shrink_mgr/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 2 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.81 +cluster_subnet: 192.168.82 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/shrink_mon/Vagrantfile b/tests/functional/shrink_mon/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_mon/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mon/ceph-override.json b/tests/functional/shrink_mon/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_mon/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/Vagrantfile b/tests/functional/shrink_mon/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_mon/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/ceph-override.json b/tests/functional/shrink_mon/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_mon/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/group_vars/all b/tests/functional/shrink_mon/container/group_vars/all new file mode 100644 index 0000000..57d2690 --- /dev/null +++ b/tests/functional/shrink_mon/container/group_vars/all @@ -0,0 +1,18 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.17.0/24" +cluster_network: "192.168.18.0/24" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/group_vars/mons b/tests/functional/shrink_mon/container/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mon/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/group_vars/osds b/tests/functional/shrink_mon/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/shrink_mon/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/hosts b/tests/functional/shrink_mon/container/hosts new file mode 100644 index 0000000..5d91b7d --- /dev/null +++ b/tests/functional/shrink_mon/container/hosts @@ -0,0 +1,7 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 \ No newline at end of file diff --git a/tests/functional/shrink_mon/container/vagrant_variables.yml b/tests/functional/shrink_mon/container/vagrant_variables.yml new file mode 100644 index 0000000..c90c9db --- /dev/null +++ b/tests/functional/shrink_mon/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.17 +cluster_subnet: 192.168.18 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_mon/group_vars/all b/tests/functional/shrink_mon/group_vars/all new file mode 100644 index 0000000..1496d53 --- /dev/null +++ b/tests/functional/shrink_mon/group_vars/all @@ -0,0 +1,11 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/shrink_mon/group_vars/mons b/tests/functional/shrink_mon/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_mon/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_mon/group_vars/osds b/tests/functional/shrink_mon/group_vars/osds new file mode 100644 index 0000000..3ec1d6e --- /dev/null +++ b/tests/functional/shrink_mon/group_vars/osds @@ -0,0 +1,11 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_mon/hosts b/tests/functional/shrink_mon/hosts new file mode 100644 index 0000000..5d91b7d --- /dev/null +++ b/tests/functional/shrink_mon/hosts @@ -0,0 +1,7 @@ +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 \ No newline at end of file diff --git a/tests/functional/shrink_mon/hosts-switch-to-containers b/tests/functional/shrink_mon/hosts-switch-to-containers new file mode 100644 index 0000000..3ab7231 --- /dev/null +++ b/tests/functional/shrink_mon/hosts-switch-to-containers @@ -0,0 +1,19 @@ +[all:vars] +docker=True + +[mons] +mon0 +mon1 +mon2 + +[osds] +osd0 + +[mdss] +mds0 + +[rgws] +rgw0 + +[clients] +client0 diff --git a/tests/functional/shrink_mon/vagrant_variables.yml b/tests/functional/shrink_mon/vagrant_variables.yml new file mode 100644 index 0000000..69dfe82 --- /dev/null +++ b/tests/functional/shrink_mon/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/shrink_osd/Vagrantfile b/tests/functional/shrink_osd/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_osd/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_osd/ceph-override.json b/tests/functional/shrink_osd/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_osd/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_osd/container/Vagrantfile b/tests/functional/shrink_osd/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_osd/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_osd/container/ceph-override.json b/tests/functional/shrink_osd/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_osd/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_osd/container/group_vars/all b/tests/functional/shrink_osd/container/group_vars/all new file mode 100644 index 0000000..593997b --- /dev/null +++ b/tests/functional/shrink_osd/container/group_vars/all @@ -0,0 +1,19 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.73.0/24" +cluster_network: "192.168.74.0/24" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +copy_admin_key: True +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_osd/container/hosts b/tests/functional/shrink_osd/container/hosts new file mode 100644 index 0000000..3dc07cb --- /dev/null +++ b/tests/functional/shrink_osd/container/hosts @@ -0,0 +1,6 @@ +[mons] +mon0 + +[osds] +osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true diff --git a/tests/functional/shrink_osd/container/vagrant_variables.yml b/tests/functional/shrink_osd/container/vagrant_variables.yml new file mode 100644 index 0000000..a519324 --- /dev/null +++ b/tests/functional/shrink_osd/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.73 +cluster_subnet: 192.168.74 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_osd/group_vars/all b/tests/functional/shrink_osd/group_vars/all new file mode 100644 index 0000000..956f325 --- /dev/null +++ b/tests/functional/shrink_osd/group_vars/all @@ -0,0 +1,10 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.71.0/24" +cluster_network: "192.168.72.0/24" +ceph_conf_overrides: + global: + osd_pool_default_size: 3 +dashboard_enabled: False +copy_admin_key: True \ No newline at end of file diff --git a/tests/functional/shrink_osd/group_vars/osds b/tests/functional/shrink_osd/group_vars/osds new file mode 100644 index 0000000..77f10f0 --- /dev/null +++ b/tests/functional/shrink_osd/group_vars/osds @@ -0,0 +1,3 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } diff --git a/tests/functional/shrink_osd/hosts b/tests/functional/shrink_osd/hosts new file mode 100644 index 0000000..9b68b7b --- /dev/null +++ b/tests/functional/shrink_osd/hosts @@ -0,0 +1,6 @@ +[mons] +mon0 monitor_address=192.168.71.10 + +[osds] +osd0 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]" +osd1 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true diff --git a/tests/functional/shrink_osd/vagrant_variables.yml b/tests/functional/shrink_osd/vagrant_variables.yml new file mode 100644 index 0000000..976293b --- /dev/null +++ b/tests/functional/shrink_osd/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.71 +cluster_subnet: 192.168.72 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/shrink_rbdmirror/Vagrantfile b/tests/functional/shrink_rbdmirror/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_rbdmirror/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/ceph-override.json b/tests/functional/shrink_rbdmirror/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/Vagrantfile b/tests/functional/shrink_rbdmirror/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/ceph-override.json b/tests/functional/shrink_rbdmirror/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/group_vars/all b/tests/functional/shrink_rbdmirror/container/group_vars/all new file mode 100644 index 0000000..0998402 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/group_vars/all @@ -0,0 +1,18 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True +public_network: "192.168.87.0/24" +cluster_network: "192.168.88.0/24" +containerized_deployment: True +ceph_mon_docker_subnet: "{{ public_network }}" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +copy_admin_key: True +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/group_vars/mons b/tests/functional/shrink_rbdmirror/container/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/group_vars/osds b/tests/functional/shrink_rbdmirror/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/hosts b/tests/functional/shrink_rbdmirror/container/hosts new file mode 100644 index 0000000..b87d739 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rbdmirrors] +rbd-mirror0 diff --git a/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml b/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml new file mode 100644 index 0000000..5fa978c --- /dev/null +++ b/tests/functional/shrink_rbdmirror/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.87 +cluster_subnet: 192.168.88 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_rbdmirror/group_vars/all b/tests/functional/shrink_rbdmirror/group_vars/all new file mode 100644 index 0000000..1a90c97 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/group_vars/all @@ -0,0 +1,13 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.85.0/24" +cluster_network: "192.168.86.0/24" +osd_objectstore: "bluestore" +copy_admin_key: true +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/group_vars/mons b/tests/functional/shrink_rbdmirror/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/group_vars/osds b/tests/functional/shrink_rbdmirror/group_vars/osds new file mode 100644 index 0000000..3ec1d6e --- /dev/null +++ b/tests/functional/shrink_rbdmirror/group_vars/osds @@ -0,0 +1,11 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/hosts b/tests/functional/shrink_rbdmirror/hosts new file mode 100644 index 0000000..b87d739 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rbdmirrors] +rbd-mirror0 diff --git a/tests/functional/shrink_rbdmirror/vagrant_variables.yml b/tests/functional/shrink_rbdmirror/vagrant_variables.yml new file mode 100644 index 0000000..825b7b9 --- /dev/null +++ b/tests/functional/shrink_rbdmirror/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 1 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.85 +cluster_subnet: 192.168.86 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/shrink_rgw/Vagrantfile b/tests/functional/shrink_rgw/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/shrink_rgw/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_rgw/ceph-override.json b/tests/functional/shrink_rgw/ceph-override.json new file mode 120000 index 0000000..fe2ff40 --- /dev/null +++ b/tests/functional/shrink_rgw/ceph-override.json @@ -0,0 +1 @@ +../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/Vagrantfile b/tests/functional/shrink_rgw/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/shrink_rgw/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/ceph-override.json b/tests/functional/shrink_rgw/container/ceph-override.json new file mode 120000 index 0000000..8417cc0 --- /dev/null +++ b/tests/functional/shrink_rgw/container/ceph-override.json @@ -0,0 +1 @@ +../../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/group_vars/all b/tests/functional/shrink_rgw/container/group_vars/all new file mode 100644 index 0000000..ed7717d --- /dev/null +++ b/tests/functional/shrink_rgw/container/group_vars/all @@ -0,0 +1,20 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +public_network: "192.168.91.0/24" +cluster_network: "192.168.92.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False +copy_admin_key: True +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/group_vars/mons b/tests/functional/shrink_rgw/container/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_rgw/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/group_vars/osds b/tests/functional/shrink_rgw/container/group_vars/osds new file mode 100644 index 0000000..27268d4 --- /dev/null +++ b/tests/functional/shrink_rgw/container/group_vars/osds @@ -0,0 +1,9 @@ +--- +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_rgw/container/group_vars/rgws b/tests/functional/shrink_rgw/container/group_vars/rgws new file mode 100644 index 0000000..66e660d --- /dev/null +++ b/tests/functional/shrink_rgw/container/group_vars/rgws @@ -0,0 +1,10 @@ +--- +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/shrink_rgw/container/hosts b/tests/functional/shrink_rgw/container/hosts new file mode 100644 index 0000000..f05a2ad --- /dev/null +++ b/tests/functional/shrink_rgw/container/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rgws] +rgw0 diff --git a/tests/functional/shrink_rgw/container/vagrant_variables.yml b/tests/functional/shrink_rgw/container/vagrant_variables.yml new file mode 100644 index 0000000..a3d85da --- /dev/null +++ b/tests/functional/shrink_rgw/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.91 +cluster_subnet: 192.168.92 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/shrink_rgw/group_vars/all b/tests/functional/shrink_rgw/group_vars/all new file mode 100644 index 0000000..bc1e286 --- /dev/null +++ b/tests/functional/shrink_rgw/group_vars/all @@ -0,0 +1,14 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.89.0/24" +cluster_network: "192.168.90.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +osd_objectstore: "bluestore" +copy_admin_key: true +ceph_conf_overrides: + global: + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 +dashboard_enabled: False diff --git a/tests/functional/shrink_rgw/group_vars/mons b/tests/functional/shrink_rgw/group_vars/mons new file mode 100644 index 0000000..0e67979 --- /dev/null +++ b/tests/functional/shrink_rgw/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: False +crush_rule_config: False \ No newline at end of file diff --git a/tests/functional/shrink_rgw/group_vars/osds b/tests/functional/shrink_rgw/group_vars/osds new file mode 100644 index 0000000..3ec1d6e --- /dev/null +++ b/tests/functional/shrink_rgw/group_vars/osds @@ -0,0 +1,11 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +lvm_volumes: + - data: data-lv1 + data_vg: test_group + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals \ No newline at end of file diff --git a/tests/functional/shrink_rgw/group_vars/rgws b/tests/functional/shrink_rgw/group_vars/rgws new file mode 100644 index 0000000..66e660d --- /dev/null +++ b/tests/functional/shrink_rgw/group_vars/rgws @@ -0,0 +1,10 @@ +--- +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/shrink_rgw/hosts b/tests/functional/shrink_rgw/hosts new file mode 100644 index 0000000..f05a2ad --- /dev/null +++ b/tests/functional/shrink_rgw/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[rgws] +rgw0 diff --git a/tests/functional/shrink_rgw/vagrant_variables.yml b/tests/functional/shrink_rgw/vagrant_variables.yml new file mode 100644 index 0000000..a9ebb3d --- /dev/null +++ b/tests/functional/shrink_rgw/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 1 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.89 +cluster_subnet: 192.168.90 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/subset_update/Vagrantfile b/tests/functional/subset_update/Vagrantfile new file mode 120000 index 0000000..706a5bb --- /dev/null +++ b/tests/functional/subset_update/Vagrantfile @@ -0,0 +1 @@ +../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/subset_update/ceph-override.json b/tests/functional/subset_update/ceph-override.json new file mode 100644 index 0000000..4643d45 --- /dev/null +++ b/tests/functional/subset_update/ceph-override.json @@ -0,0 +1,15 @@ +{ + "ceph_conf_overrides": { + "global": { + "auth_allow_insecure_global_id_reclaim": false, + "osd_pool_default_pg_num": 12, + "osd_pool_default_size": 1, + "mon_allow_pool_size_one": true, + "mon_warn_on_pool_no_redundancy": false, + "mon_max_pg_per_osd": 300 + } + } + ], + "ceph_mon_docker_memory_limit": "2g", + "radosgw_num_instances": 2 +} diff --git a/tests/functional/subset_update/container/Vagrantfile b/tests/functional/subset_update/container/Vagrantfile new file mode 120000 index 0000000..16076e4 --- /dev/null +++ b/tests/functional/subset_update/container/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/subset_update/container/ceph-override.json b/tests/functional/subset_update/container/ceph-override.json new file mode 120000 index 0000000..772bdc5 --- /dev/null +++ b/tests/functional/subset_update/container/ceph-override.json @@ -0,0 +1 @@ +../ceph-override.json \ No newline at end of file diff --git a/tests/functional/subset_update/container/group_vars/all b/tests/functional/subset_update/container/group_vars/all new file mode 100644 index 0000000..8e10ffd --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/all @@ -0,0 +1,34 @@ +--- +# this is only here to let the CI tests know +# that this scenario is using docker +docker: True + +containerized_deployment: True +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_mon_docker_subnet: "{{ public_network }}" +public_network: "192.168.5.0/24" +cluster_network: "192.168.6.0/24" +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +docker_pull_timeout: 600s +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +# TODO: add monitoring later +dashboard_enabled: false +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +ceph_docker_image: ceph/ceph +ceph_docker_image_tag: v19 +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/subset_update/container/group_vars/clients b/tests/functional/subset_update/container/group_vars/clients new file mode 100644 index 0000000..ec0bb3e --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/clients @@ -0,0 +1,13 @@ +--- +user_config: True +copy_admin_key: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/subset_update/container/group_vars/iscsigws b/tests/functional/subset_update/container/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/subset_update/container/group_vars/mons b/tests/functional/subset_update/container/group_vars/mons new file mode 100644 index 0000000..441a4aa --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: false +crush_rule_config: false diff --git a/tests/functional/subset_update/container/group_vars/osds b/tests/functional/subset_update/container/group_vars/osds new file mode 100644 index 0000000..2a4cfbb --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/osds @@ -0,0 +1,6 @@ +--- +osd_objectstore: "bluestore" +devices: + - /dev/sda + - /dev/sdb + - /dev/sdc diff --git a/tests/functional/subset_update/container/group_vars/rgws b/tests/functional/subset_update/container/group_vars/rgws new file mode 100644 index 0000000..639ade9 --- /dev/null +++ b/tests/functional/subset_update/container/group_vars/rgws @@ -0,0 +1,8 @@ +--- +copy_admin_key: True +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 diff --git a/tests/functional/subset_update/container/hosts b/tests/functional/subset_update/container/hosts new file mode 100644 index 0000000..8823f1a --- /dev/null +++ b/tests/functional/subset_update/container/hosts @@ -0,0 +1,17 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mon0 +mon1 + +[osds] +osd0 +osd1 +osd2 + +[rgws] +rgw0 +rgw1 diff --git a/tests/functional/subset_update/container/vagrant_variables.yml b/tests/functional/subset_update/container/vagrant_variables.yml new file mode 100644 index 0000000..cd342bd --- /dev/null +++ b/tests/functional/subset_update/container/vagrant_variables.yml @@ -0,0 +1,60 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: True + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 2 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.5 +cluster_subnet: 192.168.6 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sda', '/dev/sdb' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#client_vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/subset_update/group_vars/all b/tests/functional/subset_update/group_vars/all new file mode 100644 index 0000000..4161dde --- /dev/null +++ b/tests/functional/subset_update/group_vars/all @@ -0,0 +1,26 @@ +--- +ceph_origin: repository +ceph_repository: community +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" +ceph_conf_overrides: + global: + auth_allow_insecure_global_id_reclaim: false + mon_allow_pool_size_one: true + mon_warn_on_pool_no_redundancy: false + osd_pool_default_size: 1 + mon_max_pg_per_osd: 300 +handler_health_mon_check_delay: 10 +handler_health_osd_check_delay: 10 +mds_max_mds: 2 +# TODO: add monitoring later +dashboard_enabled: false +dashboard_admin_password: $sX!cD$rYU6qR^B! +grafana_admin_password: +xFRe+RES@7vg24n +ceph_docker_registry: quay.io +node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" +prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" +alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" +grafana_server_group_name: ceph_monitoring diff --git a/tests/functional/subset_update/group_vars/clients b/tests/functional/subset_update/group_vars/clients new file mode 100644 index 0000000..4c37898 --- /dev/null +++ b/tests/functional/subset_update/group_vars/clients @@ -0,0 +1,13 @@ +--- +copy_admin_key: True +user_config: True +test: + name: "test" + rule_name: "HDD" + size: 1 +test2: + name: "test2" + size: 1 +pools: + - "{{ test }}" + - "{{ test2 }}" diff --git a/tests/functional/subset_update/group_vars/iscsigws b/tests/functional/subset_update/group_vars/iscsigws new file mode 100644 index 0000000..8d0932a --- /dev/null +++ b/tests/functional/subset_update/group_vars/iscsigws @@ -0,0 +1,2 @@ +--- +generate_crt: True diff --git a/tests/functional/subset_update/group_vars/mons b/tests/functional/subset_update/group_vars/mons new file mode 100644 index 0000000..441a4aa --- /dev/null +++ b/tests/functional/subset_update/group_vars/mons @@ -0,0 +1,3 @@ +--- +create_crush_tree: false +crush_rule_config: false diff --git a/tests/functional/subset_update/group_vars/nfss b/tests/functional/subset_update/group_vars/nfss new file mode 100644 index 0000000..fc280e2 --- /dev/null +++ b/tests/functional/subset_update/group_vars/nfss @@ -0,0 +1,10 @@ +copy_admin_key: true +nfs_file_gw: false +nfs_obj_gw: true +ganesha_conf_overrides: | + CACHEINODE { + Entries_HWMark = 100000; + } +nfs_ganesha_stable: true +nfs_ganesha_dev: false +nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/subset_update/group_vars/osds b/tests/functional/subset_update/group_vars/osds new file mode 100644 index 0000000..9f9e8a0 --- /dev/null +++ b/tests/functional/subset_update/group_vars/osds @@ -0,0 +1,8 @@ +--- +os_tuning_params: + - { name: fs.file-max, value: 26234859 } +osd_objectstore: "bluestore" +devices: + - /dev/sda + - /dev/sdb + - /dev/sdc diff --git a/tests/functional/subset_update/group_vars/rgws b/tests/functional/subset_update/group_vars/rgws new file mode 100644 index 0000000..d9c09f8 --- /dev/null +++ b/tests/functional/subset_update/group_vars/rgws @@ -0,0 +1,9 @@ +copy_admin_key: true +rgw_create_pools: + foo: + pg_num: 16 + type: replicated + bar: + pg_num: 16 +rgw_override_bucket_index_max_shards: 16 +rgw_bucket_default_quota_max_objects: 1638400 diff --git a/tests/functional/subset_update/hosts b/tests/functional/subset_update/hosts new file mode 100644 index 0000000..ce63629 --- /dev/null +++ b/tests/functional/subset_update/hosts @@ -0,0 +1,18 @@ +[mons] +mon0 +mon1 +mon2 + +[mgrs] +mon0 +mon1 + +[osds] +osd0 +osd1 +osd2 + +[rgws] +rgw0 +rgw1 + diff --git a/tests/functional/subset_update/vagrant_variables.yml b/tests/functional/subset_update/vagrant_variables.yml new file mode 100644 index 0000000..f6331dd --- /dev/null +++ b/tests/functional/subset_update/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 2 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/stream9 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +vagrant_sync_dir: /vagrant +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# VM prefix name, need to match the hostname +# label_prefix: ceph diff --git a/tests/functional/tests/__init__.py b/tests/functional/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/grafana/test_grafana.py b/tests/functional/tests/grafana/test_grafana.py new file mode 100644 index 0000000..fb12009 --- /dev/null +++ b/tests/functional/tests/grafana/test_grafana.py @@ -0,0 +1,26 @@ +import pytest + + +class TestGrafanas(object): + + @pytest.mark.dashboard + @pytest.mark.no_docker + def test_grafana_dashboard_is_installed(self, node, host): + assert host.package("ceph-grafana-dashboards").is_installed + + @pytest.mark.dashboard + @pytest.mark.parametrize('svc', [ + 'alertmanager', 'grafana-server', 'prometheus' + ]) + def test_grafana_service_enabled_and_running(self, node, host, svc): + s = host.service(svc) + assert s.is_enabled + assert s.is_running + + @pytest.mark.dashboard + @pytest.mark.parametrize('port', [ + '3000', '9092', '9093' + ]) + def test_grafana_socket(self, node, host, setup, port): + s = host.socket('tcp://%s:%s' % (setup["address"], port)) + assert s.is_listening diff --git a/tests/functional/tests/mds/__init__.py b/tests/functional/tests/mds/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/mds/test_mds.py b/tests/functional/tests/mds/test_mds.py new file mode 100644 index 0000000..2e1da22 --- /dev/null +++ b/tests/functional/tests/mds/test_mds.py @@ -0,0 +1,25 @@ +import pytest +import json + + +class TestMDSs(object): + + @pytest.mark.no_docker + def test_mds_is_installed(self, node, host): + assert host.package("ceph-mds").is_installed + + def test_mds_service_enabled_and_running(self, node, host): + service_name = "ceph-mds@{hostname}".format( + hostname=node["vars"]["inventory_hostname"] + ) + s = host.service(service_name) + assert s.is_enabled + assert s.is_running + + def test_mds_is_up(self, node, setup, ceph_status): + cluster = setup["cluster_name"] + name = 'client.bootstrap-mds' + output = ceph_status(f'/var/lib/ceph/bootstrap-mds/{cluster}.keyring', name=name) + cluster_status = json.loads(output) + assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get( # noqa E501 + 'up:standby', 0)) == len(node["vars"]["groups"]["mdss"]) diff --git a/tests/functional/tests/mgr/__init__.py b/tests/functional/tests/mgr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/mgr/test_mgr.py b/tests/functional/tests/mgr/test_mgr.py new file mode 100644 index 0000000..a14fed6 --- /dev/null +++ b/tests/functional/tests/mgr/test_mgr.py @@ -0,0 +1,39 @@ +import pytest +import json + + +class TestMGRs(object): + + @pytest.mark.no_docker + def test_mgr_is_installed(self, node, host): + assert host.package("ceph-mgr").is_installed + + @pytest.mark.dashboard + @pytest.mark.no_docker + def test_mgr_dashboard_is_installed(self, node, host): + assert host.package("ceph-mgr-dashboard").is_installed + + def test_mgr_service_is_enabled_and_running(self, node, host): + service_name = "ceph-mgr@{hostname}".format( + hostname=node["vars"]["inventory_hostname"] + ) + s = host.service(service_name) + assert s.is_enabled + assert s.is_running + + @pytest.mark.dashboard + @pytest.mark.parametrize('port', [ + '8443', '9283' + ]) + def test_mgr_dashboard_is_listening(self, node, host, setup, port): + s = host.socket('tcp://%s:%s' % (setup["address"], port)) + assert s.is_listening + + def test_mgr_is_up(self, node, setup, ceph_status): + hostname = node["vars"]["inventory_hostname"] + cluster = setup["cluster_name"] + name = f"mgr.{hostname}" + output_raw = ceph_status(f'/var/lib/ceph/mgr/{cluster}-{hostname}/keyring', name=name) + output_json = json.loads(output_raw) + + assert output_json['mgrmap']['available'] diff --git a/tests/functional/tests/mon/__init__.py b/tests/functional/tests/mon/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/mon/test_mons.py b/tests/functional/tests/mon/test_mons.py new file mode 100644 index 0000000..d50f798 --- /dev/null +++ b/tests/functional/tests/mon/test_mons.py @@ -0,0 +1,29 @@ +import pytest + + +class TestMons(object): + + @pytest.mark.no_docker + def test_ceph_mon_package_is_installed(self, node, host): + assert host.package("ceph-mon").is_installed + + @pytest.mark.parametrize("mon_port", [3300, 6789]) + def test_mon_listens(self, node, host, setup, mon_port): + assert host.socket("tcp://{address}:{port}".format( + address=setup["address"], + port=mon_port + )).is_listening + + def test_mon_service_enabled_and_running(self, node, host): + service_name = "ceph-mon@{hostname}".format( + hostname=node["vars"]["inventory_hostname"] + ) + s = host.service(service_name) + assert s.is_enabled + assert s.is_running + + @pytest.mark.no_docker + def test_can_get_cluster_health(self, node, host, setup): + cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(setup["cluster_name"]) # noqa E501 + output = host.check_output(cmd) + assert output.strip().startswith("cluster") diff --git a/tests/functional/tests/nfs/test_nfs_ganesha.py b/tests/functional/tests/nfs/test_nfs_ganesha.py new file mode 100644 index 0000000..fda75ad --- /dev/null +++ b/tests/functional/tests/nfs/test_nfs_ganesha.py @@ -0,0 +1,48 @@ +import json +import pytest + + +class TestNFSs(object): + + @pytest.mark.no_docker + @pytest.mark.parametrize('pkg', [ + 'nfs-ganesha', + 'nfs-ganesha-rgw' + ]) + def test_nfs_ganesha_package_is_installed(self, node, host, pkg): + assert host.package(pkg).is_installed + + @pytest.mark.no_docker + def test_nfs_service_enabled_and_running(self, node, host): + s = host.service("nfs-ganesha") + assert s.is_enabled + assert s.is_running + + @pytest.mark.no_docker + def test_nfs_config_override(self, node, host): + assert host.file( + "/etc/ganesha/ganesha.conf").contains("Entries_HWMark") + + def test_nfs_is_up(self, node, setup, ceph_status): + hostname = node["vars"]["inventory_hostname"] + cluster = setup["cluster_name"] + name = f"client.rgw.{hostname}" + output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name) + keys = list(json.loads( + output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys()) + keys.remove('summary') + daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"] + hostnames = [] + for key in keys: + hostnames.append(daemons[key]['metadata']['hostname']) + + +# NOTE (guits): This check must be fixed. (Permission denied error) +# @pytest.mark.no_docker +# def test_nfs_rgw_fsal_export(self, node, host): +# if(host.mount_point("/mnt").exists): +# cmd = host.run("sudo umount /mnt") +# assert cmd.rc == 0 +# cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/") +# assert cmd.rc == 0 +# assert host.mount_point("/mnt").exists diff --git a/tests/functional/tests/node-exporter/test_node_exporter.py b/tests/functional/tests/node-exporter/test_node_exporter.py new file mode 100644 index 0000000..9925295 --- /dev/null +++ b/tests/functional/tests/node-exporter/test_node_exporter.py @@ -0,0 +1,14 @@ +import pytest + + +class TestNodeExporter(object): + + @pytest.mark.dashboard + def test_node_exporter_service_enabled_and_running(self, node, host): + s = host.service("node_exporter") + assert s.is_enabled + assert s.is_running + + @pytest.mark.dashboard + def test_node_exporter_socket(self, node, host): + assert host.socket('tcp://9100').is_listening diff --git a/tests/functional/tests/osd/__init__.py b/tests/functional/tests/osd/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py new file mode 100644 index 0000000..800531f --- /dev/null +++ b/tests/functional/tests/osd/test_osds.py @@ -0,0 +1,81 @@ +import pytest +import json + + +class TestOSDs(object): + + @pytest.mark.no_docker + def test_ceph_osd_package_is_installed(self, node, host): + assert host.package("ceph-osd").is_installed + + def test_osds_listen_on_public_network(self, node, host, setup): + # TODO: figure out way to paramaterize this test + nb_port = (setup["num_osds"] * 4) + assert host.check_output( + "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (setup["address"])) == str(nb_port) # noqa E501 + + def test_osds_listen_on_cluster_network(self, node, host, setup): + # TODO: figure out way to paramaterize this test + nb_port = (setup["num_osds"] * 4) + assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501 + (setup["cluster_address"])) == str(nb_port) + + def test_osd_service_enabled_and_running(self, node, host, setup): + # TODO: figure out way to paramaterize node['osds'] for this test + for osd in setup["osds"]: + s = host.service("ceph-osd@%s" % osd) + assert s.is_enabled + assert s.is_running + + @pytest.mark.no_docker + def test_osd_are_mounted(self, node, host, setup): + # TODO: figure out way to paramaterize setup['osd_ids'] for this test + for osd_id in setup["osd_ids"]: + osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format( + cluster=setup["cluster_name"], + osd_id=osd_id, + ) + assert host.mount_point(osd_path).exists + + @pytest.mark.no_docker + @pytest.mark.parametrize('cmd', [ + 'ceph-volume', + 'ceph-volume-systemd' + ]) + def test_ceph_volume_command_exists(self, node, host, cmd): + assert host.exists(cmd) + + def _get_osd_id_from_host(self, node, osd_tree): + children = [] + for n in osd_tree['nodes']: + if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501 + children = n['children'] + return children + + def _get_nb_up_osds_from_ids(self, node, osd_tree): + nb_up = 0 + ids = self._get_osd_id_from_host(node, osd_tree) + for n in osd_tree['nodes']: + if n['id'] in ids and n['status'] == 'up': + nb_up += 1 + return nb_up + + @pytest.mark.no_docker + def test_all_osds_are_up_and_in(self, node, host, setup): + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501 + cluster=setup["cluster_name"]) + output = json.loads(host.check_output(cmd)) + assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output) + + @pytest.mark.docker + def test_all_docker_osds_are_up_and_in(self, node, host, setup): + container_binary = setup["container_binary"] + osd_id = host.check_output(container_binary + " ps -q --filter='name=" + "ceph-osd' | head -1") + cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501 + osd_id=osd_id, + cluster=setup["cluster_name"], + container_binary=container_binary + ) + output = json.loads(host.check_output(cmd)) + assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output) diff --git a/tests/functional/tests/rbd-mirror/__init__.py b/tests/functional/tests/rbd-mirror/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/rbd-mirror/test_rbd_mirror.py b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py new file mode 100644 index 0000000..12af2e1 --- /dev/null +++ b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py @@ -0,0 +1,33 @@ +import pytest +import json + + +class TestRbdMirrors(object): + + @pytest.mark.rbdmirror_secondary + @pytest.mark.no_docker + def test_rbd_mirror_is_installed(self, node, host): + assert host.package("rbd-mirror").is_installed + + @pytest.mark.rbdmirror_secondary + def test_rbd_mirror_service_enabled_and_running(self, node, host): + service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( + hostname=node["vars"]["inventory_hostname"] + ) + s = host.service(service_name) + assert s.is_enabled + assert s.is_running + + @pytest.mark.rbdmirror_secondary + def test_rbd_mirror_is_up(self, node, setup, ceph_status): + hostname = node["vars"]["inventory_hostname"] + cluster = setup["cluster_name"] + output = ceph_status(f'/var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring') + status = json.loads(output) + daemon_ids = [i for i in status["servicemap"]["services"] + ["rbd-mirror"]["daemons"].keys() if i != "summary"] + daemons = [] + for daemon_id in daemon_ids: + daemons.append(status["servicemap"]["services"]["rbd-mirror"] + ["daemons"][daemon_id]["metadata"]["hostname"]) + assert hostname in daemons diff --git a/tests/functional/tests/rgw/__init__.py b/tests/functional/tests/rgw/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/functional/tests/rgw/test_rgw.py b/tests/functional/tests/rgw/test_rgw.py new file mode 100644 index 0000000..6d3d159 --- /dev/null +++ b/tests/functional/tests/rgw/test_rgw.py @@ -0,0 +1,47 @@ +import pytest +import json + + +class TestRGWs(object): + + @pytest.mark.no_docker + def test_rgw_is_installed(self, node, host): + result = host.package("radosgw").is_installed + if not result: + result = host.package("ceph-radosgw").is_installed + assert result + + def test_rgw_service_enabled_and_running(self, node, host): + for i in range(int(node["radosgw_num_instances"])): + service_name = "ceph-radosgw@rgw.{rgw_zone}.{hostname}.rgw{seq}".format( + hostname=node["vars"]["inventory_hostname"], + seq=i, + rgw_zone=node["vars"].get("rgw_zone", "default"), + ) + s = host.service(service_name) + assert s.is_enabled + assert s.is_running + + def test_rgw_is_up(self, node, setup, ceph_status): + hostname = node["vars"]["inventory_hostname"] + cluster = setup["cluster_name"] + name = "client.bootstrap-rgw" + output = ceph_status(f'/var/lib/ceph/bootstrap-rgw/{cluster}.keyring', name=name) + keys = list(json.loads( + output)["servicemap"]["services"]["rgw"]["daemons"].keys()) + keys.remove('summary') + daemons = json.loads(output)["servicemap"]["services"]["rgw"]["daemons"] + hostnames = [] + for key in keys: + hostnames.append(daemons[key]['metadata']['hostname']) + assert hostname in hostnames + + @pytest.mark.no_docker + def test_rgw_http_endpoint(self, node, host, setup): + # rgw frontends ip_addr is configured on public_interface + ip_addr = host.interface(setup['public_interface']).addresses[0] + for i in range(int(node["radosgw_num_instances"])): + assert host.socket( + "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, + port=(8080+i)) + ).is_listening # noqa E501 diff --git a/tests/functional/tests/test_install.py b/tests/functional/tests/test_install.py new file mode 100644 index 0000000..38d87c2 --- /dev/null +++ b/tests/functional/tests/test_install.py @@ -0,0 +1,56 @@ +import pytest +import re + + +class TestInstall(object): + + def test_ceph_dir_exists_and_is_directory(self, host, node): + f = host.file('/etc/ceph') + assert f.exists + assert f.is_directory + + def test_ceph_conf_exists_and_is_file(self, host, node, setup): + f = host.file(setup["conf_path"]) + assert f.exists + assert f.is_file + + @pytest.mark.no_docker + def test_ceph_command_exists(self, host, node): + assert host.exists("ceph") + + +class TestCephConf(object): + + def test_mon_host_line_has_correct_value(self, node, host, setup): + mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501 + result = True + for x in range(0, setup["num_mons"]): + pattern = re.compile(("v2:{subnet}.1{x}:3300,v1:{subnet}.1{x}:6789".format(subnet=setup["subnet"], x=x))) # noqa E501 + if pattern.search(mon_host_line) is None: + result = False + assert result + + +class TestCephCrash(object): + @pytest.mark.no_docker + @pytest.mark.ceph_crash + def test_ceph_crash_service_enabled_and_running(self, node, host): + s = host.service("ceph-crash") + assert s.is_enabled + assert s.is_running + + @pytest.mark.docker + @pytest.mark.ceph_crash + def test_ceph_crash_service_enabled_and_running_container(self, node, host): + s = host.service("ceph-crash@{hostname}".format(hostname=node["vars"]["inventory_hostname"])) + assert s.is_enabled + assert s.is_running + + +class TestCephExporter(object): + @pytest.mark.docker + @pytest.mark.ceph_exporter + def test_ceph_exporter_service_enabled_and_running_container(self, node, host): + s = host.service("ceph-exporter@{hostname}".format(hostname=node["vars"]["inventory_hostname"])) + assert s.is_enabled + assert s.is_running diff --git a/tests/inventories/single-machine.yml b/tests/inventories/single-machine.yml new file mode 100644 index 0000000..f291227 --- /dev/null +++ b/tests/inventories/single-machine.yml @@ -0,0 +1,11 @@ +[mons] +localhost + +[osds] +localhost + +[rgws] +localhost + +[mdss] +localhost diff --git a/tests/library/ca_test_common.py b/tests/library/ca_test_common.py new file mode 100644 index 0000000..eaa0bd6 --- /dev/null +++ b/tests/library/ca_test_common.py @@ -0,0 +1,29 @@ +from ansible.module_utils import basic +from ansible.module_utils._text import to_bytes +import json + + +def set_module_args(args): + if '_ansible_remote_tmp' not in args: + args['_ansible_remote_tmp'] = '/tmp' + if '_ansible_keep_remote_files' not in args: + args['_ansible_keep_remote_files'] = False + + args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + basic._ANSIBLE_ARGS = to_bytes(args) + + +class AnsibleExitJson(Exception): + pass + + +class AnsibleFailJson(Exception): + pass + + +def exit_json(*args, **kwargs): + raise AnsibleExitJson(kwargs) + + +def fail_json(*args, **kwargs): + raise AnsibleFailJson(kwargs) diff --git a/tests/library/test_ceph_crush.py b/tests/library/test_ceph_crush.py new file mode 100644 index 0000000..ea09c21 --- /dev/null +++ b/tests/library/test_ceph_crush.py @@ -0,0 +1,112 @@ +import sys +import pytest + +sys.path.append('./library') +import ceph_crush # noqa: E402 + + +class TestCephCrushModule(object): + + def test_no_host(self): + location = [ + ("chassis", "monchassis"), + ("rack", "monrack"), + ("row", "marow"), + ("pdu", "monpdu"), + ("pod", "monpod"), + ("room", "maroom"), + ("datacenter", "mondc"), + ("region", "maregion"), + ("root", "maroute"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_lower_than_two_bucket(self): + location = [ + ("chassis", "monchassis"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_invalid_bucket_type(self): + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rackyyyyy", "monrack"), + ] + with pytest.raises(Exception): + ceph_crush.sort_osd_crush_location(location, None) + + def test_ordering(self): + expected_result = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ("row", "marow"), + ("pdu", "monpdu"), + ("pod", "monpod"), + ("room", "maroom"), + ("datacenter", "mondc"), + ("region", "maregion"), + ("root", "maroute"), + ] + expected_result_reverse = expected_result[::-1] + result = ceph_crush.sort_osd_crush_location( + expected_result_reverse, None) + assert expected_result == result + + def test_generate_commands(self): + cluster = "test" + expected_command_list = [ + ['ceph', '--cluster', cluster, 'osd', + 'crush', "add-bucket", "monhost", "host"], + ['ceph', '--cluster', cluster, 'osd', 'crush', + "add-bucket", "monchassis", "chassis"], + ['ceph', '--cluster', cluster, 'osd', 'crush', + "move", "monhost", "chassis=monchassis"], + ['ceph', '--cluster', cluster, 'osd', + 'crush', "add-bucket", "monrack", "rack"], + ['ceph', '--cluster', cluster, 'osd', 'crush', + "move", "monchassis", "rack=monrack"], + ] + + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ] + + crush_map = {"nodes": []} + + result = ceph_crush.create_and_move_buckets_list( + cluster, location, crush_map) + assert result == expected_command_list + + def test_generate_commands_container(self): + cluster = "test" + containerized = "docker exec -ti ceph-mon" + expected_command_list = [ + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', + cluster, 'osd', 'crush', "add-bucket", "monhost", "host"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', + cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, + 'osd', 'crush', "move", "monhost", "chassis=monchassis"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', + cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"], + ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', + cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"], + ] + + location = [ + ("host", "monhost"), + ("chassis", "monchassis"), + ("rack", "monrack"), + ] + + crush_map = {"nodes": []} + + result = ceph_crush.create_and_move_buckets_list( + cluster, location, crush_map, containerized) + assert result == expected_command_list diff --git a/tests/library/test_ceph_crush_rule.py b/tests/library/test_ceph_crush_rule.py new file mode 100644 index 0000000..4351ab2 --- /dev/null +++ b/tests/library/test_ceph_crush_rule.py @@ -0,0 +1,439 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_crush_rule +import ceph_crush_rule_info + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_name = 'foo' +fake_bucket_root = 'default' +fake_bucket_type = 'host' +fake_device_class = 'ssd' +fake_profile = 'default' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) + + +class TestCephCrushRuleModule(object): + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_name_only(self, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['msg'] == 'state is present but all of the following are missing: rule_type' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_replicated_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_replicated_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type, fake_device_class] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_replicated_rule_device_class(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_non_existing_erasure_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 2 + get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + get_stdout = '' + create_rc = 0 + create_stderr = '' + create_stdout = 'created rule {} at 1'.format(fake_name) + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'create-erasure', fake_name, fake_profile] + assert result['rc'] == create_rc + assert result['stderr'] == create_stderr + assert result['stdout'] == create_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_existing_erasure_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_update_existing_replicated_rule(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'replicated', + 'bucket_root': fake_bucket_root, + 'bucket_type': fake_bucket_type, + 'device_class': fake_device_class + }) + m_fail_json.side_effect = ca_test_common.fail_json + rc = 0 + stderr = '' + stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + print(result) + assert not result['changed'] + assert result['msg'] == 'Can not convert crush rule {} to replicated'.format(fake_name) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_update_existing_erasure_rule(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'rule_type': 'erasure', + 'profile': fake_profile + }) + m_fail_json.side_effect = ca_test_common.fail_json + rc = 0 + stderr = '' + stdout = '{{"type":1,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + print(result) + assert not result['changed'] + assert result['msg'] == 'Can not convert crush rule {} to erasure'.format(fake_name) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_remove_non_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 2 + stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + stdout = '' + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == "Crush Rule {} doesn't exist".format(fake_name) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_remove_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + get_rc = 0 + get_stderr = '' + get_stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + remove_rc = 0 + remove_stderr = '' + remove_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (remove_rc, remove_stdout, remove_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'rm', fake_name] + assert result['rc'] == remove_rc + assert result['stderr'] == remove_stderr + assert result['stdout'] == remove_stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_non_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 2 + stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name) + stdout = '' + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule_info.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_existing_rule(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule_info.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_get_all_rules(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': str(), + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule_info.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', 'rule', + 'dump', '', '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + }) + m_exit_json.side_effect = ca_test_common.exit_json + rc = 0 + stderr = '' + stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type) + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_crush_rule_info.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'crush', + 'rule', 'dump', fake_name, '--format=json'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_ceph_dashboard_user.py b/tests/library/test_ceph_dashboard_user.py new file mode 100644 index 0000000..d0d6e9c --- /dev/null +++ b/tests/library/test_ceph_dashboard_user.py @@ -0,0 +1,170 @@ +from mock.mock import MagicMock, patch +import pytest +import os +import ca_test_common +import ceph_dashboard_user + +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' + + +class TestCephDashboardUserModule(object): + def setup_method(self): + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_name = 'foo' + self.fake_user = 'foo' + self.fake_password = 'bar' + self.fake_roles = ['read-only', 'block-manager'] + self.fake_params = {'cluster': self.fake_cluster, + 'name': self.fake_user, + 'password': self.fake_password, + 'roles': self.fake_roles} + self.fake_module = MagicMock() + self.fake_module.params = self.fake_params + + def test_create_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-create', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.create_user(self.fake_module) == expected_cmd + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_create_user_container(self): + fake_container_cmd = [ + fake_container_binary, + 'run', + '--interactive', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + self.fake_module.params = self.fake_params + expected_cmd = fake_container_cmd + [ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-create', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.create_user(self.fake_module, container_image=fake_container_image) == expected_cmd + + def test_set_roles(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-roles', + self.fake_user + ] + expected_cmd.extend(self.fake_roles) + + assert ceph_dashboard_user.set_roles(self.fake_module) == expected_cmd + + def test_set_password(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-password', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.set_password(self.fake_module) == expected_cmd + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_set_password_container(self): + fake_container_cmd = [ + fake_container_binary, + 'run', + '--interactive', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + self.fake_module.params = self.fake_params + expected_cmd = fake_container_cmd + [ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-set-password', + '-i', '-', + self.fake_user + ] + + assert ceph_dashboard_user.set_password(self.fake_module, container_image=fake_container_image) == expected_cmd + + def test_get_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-show', + self.fake_user, + '--format=json' + ] + + assert ceph_dashboard_user.get_user(self.fake_module) == expected_cmd + + def test_remove_user(self): + self.fake_module.params = self.fake_params + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'dashboard', 'ac-user-delete', + self.fake_user + ] + + assert ceph_dashboard_user.remove_user(self.fake_module) == expected_cmd + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_create_user_fail_with_weak_password(self, m_run_command, m_fail_json): + ca_test_common.set_module_args(self.fake_module.params) + m_fail_json.side_effect = ca_test_common.fail_json + get_rc = 2 + get_stderr = 'Error ENOENT: User {} does not exist.'.format(self.fake_user) + get_stdout = '' + create_rc = 22 + create_stderr = 'Error EINVAL: Password is too weak.' + create_stdout = '' + m_run_command.side_effect = [ + (get_rc, get_stdout, get_stderr), + (create_rc, create_stdout, create_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_dashboard_user.main() + + result = result.value.args[0] + assert result['msg'] == create_stderr + assert result['rc'] == 1 diff --git a/tests/library/test_ceph_ec_profile.py b/tests/library/test_ceph_ec_profile.py new file mode 100644 index 0000000..c20cb0b --- /dev/null +++ b/tests/library/test_ceph_ec_profile.py @@ -0,0 +1,243 @@ +from mock.mock import MagicMock, patch +import ca_test_common +import ceph_ec_profile +import pytest + + +class TestCephEcProfile(object): + def setup_method(self): + self.fake_params = [] + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_name = 'foo' + self.fake_k = 2 + self.fake_m = 4 + self.fake_module = MagicMock() + self.fake_module.params = self.fake_params + + def test_get_profile(self): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'get', self.fake_name, + '--format=json' + ] + + assert ceph_ec_profile.get_profile(self.fake_name) == expected_cmd + + @pytest.mark.parametrize("stripe_unit,crush_device_class,force", [(False, None, False), + (32, None, True), + (False, None, True), + (32, None, False), + (False, 'hdd', False), + (32, 'ssd', True), + (False, 'nvme', True), + (32, 'hdd', False)]) + def test_create_profile(self, stripe_unit, crush_device_class, force): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'set', self.fake_name, + 'k={}'.format(self.fake_k), 'm={}'.format(self.fake_m), + ] + if stripe_unit: + expected_cmd.append('stripe_unit={}'.format(stripe_unit)) + if crush_device_class: + expected_cmd.append('crush-device-class={}'.format(crush_device_class)) + if force: + expected_cmd.append('--force') + + user_profile = { + "k": self.fake_k, + "m": self.fake_m + } + + if stripe_unit: + user_profile["stripe_unit"] = stripe_unit + if crush_device_class: + user_profile["crush-device-class"] = crush_device_class + + assert ceph_ec_profile.create_profile(self.fake_name, + user_profile, + force) == expected_cmd + + def test_delete_profile(self): + expected_cmd = [ + self.fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', self.fake_cluster, + 'osd', 'erasure-code-profile', + 'rm', self.fake_name + ] + + assert ceph_ec_profile.delete_profile(self.fake_name, + self.fake_cluster) == expected_cmd + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_nothing_to_update(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 4, + "stripe_unit": 32, + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 + '') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'] + assert result['stdout'] == '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}' # noqa: E501 + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_profile_to_update(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 6, + "stripe_unit": 32 + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.side_effect = [ + (0, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501 + ''), + (0, + ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'], + '', + '' + ) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'] + assert not result['stdout'] + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_present_profile_doesnt_exist(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "present", + "name": "foo", + "k": 2, + "m": 4, + "stripe_unit": 32 + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.side_effect = [ + (2, + ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'], + '', + "Error ENOENT: unknown erasure code profile 'foo'"), + (0, + ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'], + '', + '' + ) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'] + assert not result['stdout'] + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_absent_on_existing_profile(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "absent", + "name": "foo" + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], + '', + '') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] + assert result['stdout'] == 'Profile foo removed.' + assert not result['stderr'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ceph_ec_profile.exec_command') + def test_state_absent_on_nonexisting_profile(self, m_exec_command, m_exit_json, m_fail_json): + ca_test_common.set_module_args({"state": "absent", + "name": "foo" + }) + m_exit_json.side_effect = ca_test_common.exit_json + m_fail_json.side_effect = ca_test_common.fail_json + m_exec_command.return_value = (0, + ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'], + '', + 'erasure-code-profile foo does not exist') + + with pytest.raises(ca_test_common.AnsibleExitJson) as r: + ceph_ec_profile.run_module() + + result = r.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'] + assert result['stdout'] == "Skipping, the profile foo doesn't exist" + assert result['stderr'] == 'erasure-code-profile foo does not exist' + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': 'foo', + 'k': 2, + 'm': 4, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_ec_profile.run_module() + + result = result.value.args[0] + assert not result['changed'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] diff --git a/tests/library/test_ceph_fs.py b/tests/library/test_ceph_fs.py new file mode 100644 index 0000000..f18b506 --- /dev/null +++ b/tests/library/test_ceph_fs.py @@ -0,0 +1,107 @@ +from mock.mock import MagicMock +import ceph_fs + + +fake_binary = 'ceph' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_fs = 'foo' +fake_data_pool = 'bar_data' +fake_metadata_pool = 'bar_metadata' +fake_max_mds = 2 +fake_params = {'cluster': fake_cluster, + 'name': fake_fs, + 'data': fake_data_pool, + 'metadata': fake_metadata_pool, + 'max_mds': fake_max_mds} + + +class TestCephFsModule(object): + + def test_create_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'new', + fake_fs, + fake_metadata_pool, + fake_data_pool + ] + + assert ceph_fs.create_fs(fake_module) == expected_cmd + + def test_set_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'set', + fake_fs, + 'max_mds', + str(fake_max_mds) + ] + + assert ceph_fs.set_fs(fake_module) == expected_cmd + + def test_get_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'get', + fake_fs, + '--format=json' + ] + + assert ceph_fs.get_fs(fake_module) == expected_cmd + + def test_remove_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'rm', + fake_fs, + '--yes-i-really-mean-it' + ] + + assert ceph_fs.remove_fs(fake_module) == expected_cmd + + def test_fail_fs(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', fake_cluster, + 'fs', 'fail', + fake_fs + ] + + assert ceph_fs.fail_fs(fake_module) == expected_cmd diff --git a/tests/library/test_ceph_key.py b/tests/library/test_ceph_key.py new file mode 100644 index 0000000..81d9023 --- /dev/null +++ b/tests/library/test_ceph_key.py @@ -0,0 +1,589 @@ +import json +import os +import mock +import pytest +import ca_test_common +import ceph_key +import ceph_key_info + + +@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'}) +class TestCephKeyModule(object): + + def test_generate_secret(self): + expected_length = 40 + result = len(ceph_key.generate_secret()) + assert result == expected_length + + def test_generate_caps_ceph_authtool(self): + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_type = "ceph-authtool" + expected_command_list = [ + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx' + ] + result = ceph_key.generate_caps(fake_type, fake_caps) + assert result == expected_command_list + + def test_generate_caps_not_ceph_authtool(self): + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_type = "" + expected_command_list = [ + 'mon', + 'allow *', + 'osd', + 'allow rwx' + ] + result = ceph_key.generate_caps(fake_type, fake_caps) + assert result == expected_command_list + + def test_generate_ceph_cmd_list_non_container(self): + fake_cluster = "fake" + fake_args = ['arg'] + fake_user = "fake-user" + fake_user_key = "/tmp/my-key" + expected_command_list = [ + 'ceph', + '-n', + "fake-user", + '-k', + "/tmp/my-key", + '--cluster', + fake_cluster, + 'auth', + 'arg' + ] + result = ceph_key.generate_cmd( + sub_cmd=['auth'], + args=fake_args, + cluster=fake_cluster, + user=fake_user, + user_key=fake_user_key) + assert result == expected_command_list + + def test_generate_ceph_cmd_list_container(self): + fake_cluster = "fake" + fake_args = ['arg'] + fake_user = "fake-user" + fake_user_key = "/tmp/my-key" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', + 'run', + '--rm', + '--net=host', # noqa E501 + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', + "fake-user", + '-k', + "/tmp/my-key", + '--cluster', + fake_cluster, + 'auth', + 'arg'] + result = ceph_key.generate_cmd( + sub_cmd=['auth'], + args=fake_args, + cluster=fake_cluster, + user=fake_user, + user_key=fake_user_key, + container_image=fake_container_image) + assert result == expected_command_list + + def test_generate_ceph_authtool_cmd_non_container_no_auid(self): + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + 'ceph-authtool', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx', + ] + result = ceph_key.generate_ceph_authtool_cmd( + fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination) # noqa E501 + assert result == expected_command_list + + def test_generate_ceph_authtool_cmd_container(self): + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx'] + result = ceph_key.generate_ceph_authtool_cmd( + fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination, fake_container_image) # noqa E501 + assert result == expected_command_list + + def test_create_key_non_container(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_import_key = True + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + ['ceph-authtool', '--create-keyring', fake_file_destination, '--name', fake_name, + '--add-key', fake_secret, '--cap', 'mon', 'allow *', '--cap', 'osd', 'allow rwx'], + ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth', + 'import', '-i', fake_file_destination], + ] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, + fake_name, fake_secret, fake_caps, fake_import_key, + fake_file_destination) + assert result == expected_command_list + + def test_create_key_container(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = True + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [ + ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', fake_file_destination, + '--name', fake_name, + '--add-key', fake_secret, + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow rwx'], + ['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', 'client.admin', + '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, + 'auth', 'import', + '-i', fake_file_destination]] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, fake_name, + fake_secret, fake_caps, fake_import_key, fake_file_destination, + fake_container_image) + assert result == expected_command_list + + def test_create_key_non_container_no_import(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = False + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 + expected_command_list = [[ + 'ceph-authtool', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx', ] + ] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, + fake_name, fake_secret, fake_caps, fake_import_key, + fake_file_destination) # noqa E501 + assert result == expected_command_list + + def test_create_key_container_no_import(self): + fake_module = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_secret = "super-secret" + fake_caps = { + 'mon': 'allow *', + 'osd': 'allow rwx', + } + fake_dest = "/fake/ceph" + fake_import_key = False + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501 + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph-authtool', + 'quay.io/ceph/daemon:latest-luminous', + '--create-keyring', + fake_file_destination, + '--name', + fake_name, + '--add-key', + fake_secret, + '--cap', + 'mon', + 'allow *', + '--cap', + 'osd', + 'allow rwx']] + result = ceph_key.create_key(fake_module, fake_cluster, fake_user, fake_user_key, fake_name, + fake_secret, fake_caps, fake_import_key, fake_file_destination, + fake_container_image) + assert result == expected_command_list + + def test_delete_key_non_container(self): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + expected_command_list = [ + ['ceph', '-n', 'client.admin', '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, 'auth', 'del', fake_name], + ] + result = ceph_key.delete_key(fake_cluster, fake_user, fake_user_key, fake_name) + assert result == expected_command_list + + def test_delete_key_container(self): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', 'client.admin', + '-k', '/etc/ceph/fake.client.admin.keyring', + '--cluster', fake_cluster, + 'auth', 'del', fake_name]] + result = ceph_key.delete_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_info_key_non_container(self, output_format): + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_cluster = "fake" + fake_name = "client.fake" + fake_user = "fake-user" + expected_command_list = [ + ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth', + 'get', fake_name, '-f', output_format], + ] + result = ceph_key.info_key( + fake_cluster, fake_name, fake_user, fake_user_key, output_format) + assert result == expected_command_list + + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_info_key_container_json(self, output_format): + fake_cluster = "fake" + fake_name = "client.fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', fake_user, + '-k', fake_user_key, + '--cluster', fake_cluster, + 'auth', 'get', fake_name, + '-f', output_format]] + result = ceph_key.info_key( + fake_cluster, fake_name, fake_user, fake_user_key, output_format, fake_container_image) # noqa E501 + assert result == expected_command_list + + def test_list_key_non_container(self): + fake_cluster = "fake" + fake_user = "fake-user" + fake_key = "/tmp/my-key" + expected_command_list = [ + ['ceph', '-n', "fake-user", '-k', "/tmp/my-key", + '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'], + ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key) + assert result == expected_command_list + + def test_get_key_container(self): + fake_cluster = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_name = "client.fake" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + fake_dest = "/fake/ceph" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [['docker', # noqa E128 + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', fake_user, + '-k', fake_user_key, + '--cluster', fake_cluster, + 'auth', 'get', + fake_name, '-o', fake_file_destination]] + result = ceph_key.get_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination, fake_container_image) + assert result == expected_command_list + + def test_get_key_non_container(self): + fake_cluster = "fake" + fake_user = 'client.admin' + fake_user_key = '/etc/ceph/fake.client.admin.keyring' + fake_dest = "/fake/ceph" + fake_name = "client.fake" + fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring" + fake_file_destination = os.path.join(fake_dest, fake_keyring_filename) + expected_command_list = [ + ['ceph', '-n', fake_user, '-k', fake_user_key, + '--cluster', fake_cluster, 'auth', 'get', fake_name, '-o', fake_file_destination], + ] + result = ceph_key.get_key( + fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination) + assert result == expected_command_list + + def test_list_key_non_container_with_mon_key(self): + fake_hostname = "mon01" + fake_cluster = "fake" + fake_user = "mon." + fake_keyring_dirname = fake_cluster + "-" + fake_hostname + fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') + expected_command_list = [ + ['ceph', '-n', "mon.", '-k', "/var/lib/ceph/mon/fake-mon01/keyring", + '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'], + ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key) + assert result == expected_command_list + + def test_list_key_container_with_mon_key(self): + fake_hostname = "mon01" + fake_cluster = "fake" + fake_user = "mon." + fake_keyring_dirname = fake_cluster + "-" + fake_hostname + fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', "mon.", + '-k', "/var/lib/ceph/mon/fake-mon01/keyring", + '--cluster', fake_cluster, + 'auth', 'ls', + '-f', 'json'], ] + result = ceph_key.list_keys(fake_cluster, fake_user, fake_key, fake_container_image) + assert result == expected_command_list + + def test_list_key_container(self): + fake_cluster = "fake" + fake_user = "fake-user" + fake_key = "/tmp/my-key" + fake_container_image = "quay.io/ceph/daemon:latest-luminous" + expected_command_list = [['docker', + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + 'quay.io/ceph/daemon:latest-luminous', + '-n', "fake-user", + '-k', "/tmp/my-key", + '--cluster', fake_cluster, + 'auth', 'ls', + '-f', 'json'], ] + result = ceph_key.list_keys( + fake_cluster, fake_user, fake_key, fake_container_image) + assert result == expected_command_list + + def test_lookup_ceph_initial_entities(self): + fake_module = "fake" + fake_ceph_dict = { "auth_dump":[ { "entity":"osd.0", "key":"AQAJkMhbszeBBBAA4/V1tDFXGlft1GnHJS5wWg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"osd.1", "key":"AQAjkMhbshueAhAAjZec50aBgd1NObLz57SQvg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"client.admin", "key":"AQDZjshbrJv6EhAAY9v6LzLYNDpPdlC3HD5KHA==", "auid":0, "caps":{ "mds":"allow", "mgr":"allow *", "mon":"allow *", "osd":"allow *" } }, { "entity":"client.bootstrap-mds", "key":"AQDojshbc4QCHhAA1ZTrkt9dbSZRVU2GzI6U4A==", "caps":{ "mon":"allow profile bootstrap-mds" } }, { "entity":"client.bootstrap-mgr", "key":"AQBfiu5bAAAAABAARcNG24hUMlk4AdstVA5MVQ==", "caps":{ "mon":"allow profile bootstrap-mgr" } }, { "entity":"client.bootstrap-osd", "key":"AQDjjshbYW+uGxAAyHcPCXXmVoL8VsTBI8z1Ng==", "caps":{ "mon":"allow profile bootstrap-osd" } }, { "entity":"client.bootstrap-rbd", "key":"AQDyjshb522eIhAAtAz6nUPMOdG4H9u0NgpXhA==", "caps":{ "mon":"allow profile bootstrap-rbd" } }, { "entity":"client.bootstrap-rbd-mirror", "key":"AQDfh+5bAAAAABAAEGBD59Lj2vAKIdN8pq4lbQ==", "caps":{ "mon":"allow profile bootstrap-rbd-mirror" } }, { "entity":"client.bootstrap-rgw", "key":"AQDtjshbDl8oIBAAq1SfSYQKDR49hJNWJVwDQw==", "caps":{ "mon":"allow profile bootstrap-rgw" } }, { "entity":"mgr.mon0", "key":"AQA0j8hbgGapORAAoDkyAvXVkM5ej4wNn4cwTQ==", "caps":{ "mds":"allow *", "mon":"allow profile mgr", "osd":"allow *" } } ] } # noqa E501 + fake_ceph_dict_str = json.dumps(fake_ceph_dict) # convert to string + expected_entity_list = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501 + 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501 + result = ceph_key.lookup_ceph_initial_entities(fake_module, fake_ceph_dict_str) + assert result == expected_entity_list + + def test_build_key_path_admin(self): + fake_cluster = "fake" + entity = "client.admin" + expected_result = "/etc/ceph/fake.client.admin.keyring" + result = ceph_key.build_key_path(fake_cluster, entity) + assert result == expected_result + + def test_build_key_path_bootstrap_osd(self): + fake_cluster = "fake" + entity = "client.bootstrap-osd" + expected_result = "/var/lib/ceph/bootstrap-osd/fake.keyring" + result = ceph_key.build_key_path(fake_cluster, entity) + assert result == expected_result + + @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @mock.patch('ceph_key_info.exec_commands') + @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml']) + def test_state_info(self, m_exec_commands, m_exit_json, output_format): + ca_test_common.set_module_args({"state": "info", + "cluster": "ceph", + "name": "client.admin", + "output_format": output_format}) + m_exit_json.side_effect = ca_test_common.exit_json + m_exec_commands.return_value = (0, + ['ceph', 'auth', 'get', 'client.admin', '-f', output_format], + '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]', # noqa: E501 + 'exported keyring for client.admin') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_key_info.run_module() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', 'auth', 'get', 'client.admin', '-f', output_format] + assert result['stdout'] == '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]' # noqa: E501 + assert result['stderr'] == 'exported keyring for client.admin' + assert result['rc'] == 0 + + @mock.patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_state_info_invalid_format(self, m_fail_json): + invalid_format = 'txt' + ca_test_common.set_module_args({"state": "info", + "cluster": "ceph", + "name": "client.admin", + "output_format": invalid_format}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_key_info.run_module() + + result = result.value.args[0] + assert result['msg'] == 'value of output_format must be one of: json, plain, xml, yaml, got: {}'.format(invalid_format) + + @mock.patch('ceph_key.generate_secret') + @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_generate_key(self, m_exit_json, m_generate_secret): + fake_secret = b'AQDaLb1fAAAAABAAsIMKdGEKu+lGOyXnRfT0Hg==' + ca_test_common.set_module_args({"state": "generate_secret"}) + m_exit_json.side_effect = ca_test_common.exit_json + m_generate_secret.return_value = fake_secret + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_key.run_module() + assert result.value.args[0]['stdout'] == fake_secret.decode() diff --git a/tests/library/test_ceph_mgr_module.py b/tests/library/test_ceph_mgr_module.py new file mode 100644 index 0000000..d426a95 --- /dev/null +++ b/tests/library/test_ceph_mgr_module.py @@ -0,0 +1,162 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_mgr_module + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_module = 'noup' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) + + +class TestCephMgrModuleModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error ENOENT: all mgr daemons do not support module \'{}\', pass --force to force enablement'.format(fake_module) + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_enable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_already_enable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = 'module \'{}\' is already enabled'.format(fake_module) + stdout = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_disable_module(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + 'state': 'disable' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'disable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_module, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_module) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_mgr_module.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_ceph_osd.py b/tests/library/test_ceph_osd.py new file mode 100644 index 0000000..6d1f314 --- /dev/null +++ b/tests/library/test_ceph_osd.py @@ -0,0 +1,244 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_osd + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_id = '42' +fake_ids = ['0', '7', '13'] +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) +invalid_state = 'foo' + + +class TestCephOSDModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: ids, state' + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_invalid_state(self, m_fail_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': invalid_state, + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == ('value of state must be one of: destroy, down, ' + 'in, out, purge, rm, got: {}'.format(invalid_state)) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': 'rm', + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': 'rm' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error EBUSY: osd.{} is still up; must be down before removal.'.format(fake_id) + rc = 16 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm']) + def test_set_state(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}'.format(state, fake_id) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id] + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm']) + def test_set_state_multiple_ids(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = '' + stdout = '' + for osd in fake_ids: + stderr += 'marked {} osd.{} '.format(state, osd) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state] + cmd.extend(fake_ids) + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'purge']) + def test_invalid_state_multiple_ids(self, m_run_command, m_fail_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['msg'] == 'destroy and purge only support one OSD at at time' + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out']) + def test_already_set_state(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'osd.{} is already {}.'.format(fake_id, state) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm']) + def test_one_already_set_state_multiple_ids(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_ids, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}. osd.{} does not exist. osd.{} does not exist.'.format(state, fake_ids[0], fake_ids[1], fake_ids[2]) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state] + cmd.extend(fake_ids) + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm']) + def test_set_state_with_container(self, m_run_command, m_exit_json, state): + ca_test_common.set_module_args({ + 'ids': fake_id, + 'state': state + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'marked {} osd.{}'.format(state, fake_id) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + cmd = [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', state, fake_id] + if state in ['destroy', 'purge']: + cmd.append('--yes-i-really-mean-it') + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == cmd + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_ceph_osd_flag.py b/tests/library/test_ceph_osd_flag.py new file mode 100644 index 0000000..3527c90 --- /dev/null +++ b/tests/library/test_ceph_osd_flag.py @@ -0,0 +1,156 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_osd_flag + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_flag = 'noup' +fake_user = 'client.admin' +fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user) +invalid_flag = 'nofoo' + + +class TestCephOSDFlagModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_with_invalid_flag(self, m_fail_json): + ca_test_common.set_module_args({ + 'name': invalid_flag, + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['msg'] == ('value of name must be one of: noup, nodown, ' + 'noout, nobackfill, norebalance, norecover, ' + 'noscrub, nodeep-scrub, noautoscale, got: {}'.format(invalid_flag)) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'Error EINVAL: invalid command' + rc = 22 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_set_flag(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_unset_flag(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + 'state': 'absent' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is unset'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_flag, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '{} is set'.format(fake_flag) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_osd_flag.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', fake_container_image, + '-n', fake_user, '-k', fake_keyring, + '--cluster', fake_cluster, 'osd', 'set', fake_flag] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_ceph_pool.py b/tests/library/test_ceph_pool.py new file mode 100644 index 0000000..b3501dc --- /dev/null +++ b/tests/library/test_ceph_pool.py @@ -0,0 +1,763 @@ +import os +import sys +import ceph_pool +from mock.mock import patch +import pytest + +sys.path.append('./library') +fake_user = 'client.admin' +fake_user_key = '/etc/ceph/ceph.client.admin.keyring' +fake_pool_name = 'foo' +fake_cluster_name = 'ceph' +fake_container_image_name = 'quay.io/ceph/daemon:latest-luminous' + + +@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'}) +class TestCephPoolModule(object): + def setup_method(self): + self.fake_running_pool_details = { + 'pool_id': 39, + 'pool_name': 'foo2', + 'create_time': '2020-05-12T12:32:03.696673+0000', + 'flags': 32769, + 'flags_names': 'hashpspool,creating', + 'type': 1, + 'size': 2, + 'min_size': 1, + 'crush_rule': 'replicated_rule', + 'object_hash': 2, + 'pg_autoscale_mode': 'on', + 'pg_num': 32, + 'pg_placement_num': 32, + 'pg_placement_num_target': 32, + 'pg_num_target': 32, + 'pg_num_pending': 32, + 'last_pg_merge_meta': { + 'source_pgid': '0.0', + 'ready_epoch': 0, + 'last_epoch_started': 0, + 'last_epoch_clean': 0, + 'source_version': "0'0", + 'target_version': "0'0" + }, + 'last_change': '109', + 'last_force_op_resend': '0', + 'last_force_op_resend_prenautilus': '0', + 'last_force_op_resend_preluminous': '0', + 'auid': 0, + 'snap_mode': 'selfmanaged', + 'snap_seq': 0, + 'snap_epoch': 0, + 'pool_snaps': [], + 'removed_snaps': '[]', + 'quota_max_bytes': 0, + 'quota_max_objects': 0, + 'tiers': [], + 'tier_of': -1, + 'read_tier': -1, + 'write_tier': -1, + 'cache_mode': 'none', + 'target_max_bytes': 0, + 'target_max_objects': 0, + 'cache_target_dirty_ratio_micro': 400000, + 'cache_target_dirty_high_ratio_micro': 600000, + 'cache_target_full_ratio_micro': 800000, + 'cache_min_flush_age': 0, + 'cache_min_evict_age': 0, + 'erasure_code_profile': '', + 'hit_set_params': { + 'type': 'none' + }, + 'hit_set_period': 0, + 'hit_set_count': 0, + 'use_gmt_hitset': True, + 'min_read_recency_for_promote': 0, + 'min_write_recency_for_promote': 0, + 'hit_set_grade_decay_rate': 0, + 'hit_set_search_last_n': 0, + 'grade_table': [], + 'stripe_width': 0, + 'expected_num_objects': 0, + 'fast_read': False, + 'options': {}, + # 'target_size_ratio' is a key present in the dict above + # 'options': {} + # see comment in get_pool_details() for more details + 'target_size_ratio': 0.3, + 'application_metadata': { + 'rbd': {} + }, + 'application': 'rbd' + } + self.fake_user_pool_config = { + 'pool_name': { + 'value': 'foo2' + }, + 'pg_num': { + 'value': '32', + 'cli_set_opt': 'pg_num' + }, + 'pgp_num': { + 'value': '0', + 'cli_set_opt': 'pgp_num' + }, + 'pg_autoscale_mode': { + 'value': 'on', + 'cli_set_opt': 'pg_autoscale_mode' + }, + 'target_size_ratio': { + 'value': '0.3', + 'cli_set_opt': 'target_size_ratio' + }, + 'application': { + 'value': 'rbd' + }, + 'type': { + 'value': 'replicated' + }, + 'erasure_profile': { + 'value': 'default' + }, + 'crush_rule': { + 'value': 'replicated_rule', + 'cli_set_opt': 'crush_rule' + }, + 'expected_num_objects': { + 'value': '0' + }, + 'size': { + 'value': '2', + 'cli_set_opt': 'size' + }, + 'min_size': { + 'value': '0', + 'cli_set_opt': 'min_size' + }, + 'pg_placement_num': { + 'value': '32', + 'cli_set_opt': 'pgp_num' + }} + + def test_check_pool_exist(self): + expected_command_list = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + 'ceph', + 'osd', + 'pool', + 'stats', + self.fake_user_pool_config['pool_name']['value'], + '-f', + 'json' + ] + + cmd = ceph_pool.check_pool_exist(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, output_format='json', + container_image=fake_container_image_name) + assert cmd == expected_command_list + + def test_get_default_running_config(self): + params = ['osd_pool_default_size', + 'osd_pool_default_min_size', + 'osd_pool_default_pg_num', + 'osd_pool_default_pgp_num'] + + expected_command_list = [] + cmd_list = [] + + for param in params: + expected_command_list.append([ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'config', + 'get', + 'mon.*', + param + ]) + cmd_list.append(ceph_pool.generate_get_config_cmd(param, + fake_cluster_name, + fake_user, fake_user_key, + container_image=fake_container_image_name)) + assert cmd_list == expected_command_list + + def test_get_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'get', + self.fake_user_pool_config['pool_name']['value'], + '-f', + 'json' + ] + + cmd = ceph_pool.get_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, 'json', + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_get_crush_rule_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'get', + self.fake_user_pool_config['pool_name']['value'], + 'crush_rule', + '-f', + 'json' + ] + + cmd = ceph_pool.get_crush_rule_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, 'json', + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_enable_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'enable', + self.fake_user_pool_config['pool_name']['value'], + 'rbd' + ] + + cmd = ceph_pool.enable_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + 'rbd', fake_user, fake_user_key, + container_image=fake_container_image_name) + + assert cmd == expected_command + + @pytest.mark.parametrize("container_image", [None, fake_container_image_name]) + def test_init_rbd_pool(self, container_image): + if container_image: + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=rbd', + fake_container_image_name, + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + fake_cluster_name, + 'pool', + 'init', + self.fake_user_pool_config['pool_name']['value'] + ] + else: + expected_command = [ + 'rbd', + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + fake_cluster_name, + 'pool', + 'init', + self.fake_user_pool_config['pool_name']['value'] + ] + + cmd = ceph_pool.init_rbd_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, + container_image) + + assert cmd == expected_command + + def test_disable_application_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'application', + 'disable', + self.fake_user_pool_config['pool_name']['value'], + 'rbd', + '--yes-i-really-mean-it' + ] + + cmd = ceph_pool.disable_application_pool(fake_cluster_name, + self.fake_user_pool_config['pool_name']['value'], + 'rbd', fake_user, fake_user_key, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_compare_pool_config_no_diff(self): + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {} + + def test_compare_pool_config_std_diff(self): + self.fake_user_pool_config['size']['value'] = '3' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'size': {'cli_set_opt': 'size', 'value': '3'}} + + def test_compare_pool_config_target_size_ratio_diff(self): + self.fake_user_pool_config['target_size_ratio']['value'] = '0.5' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'target_size_ratio': {'cli_set_opt': 'target_size_ratio', 'value': '0.5'}} + + def test_compare_pool_config_application_diff(self): + self.fake_user_pool_config['application']['value'] = 'foo' + delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details) + + assert delta == {'application': {'new_application': 'foo', 'old_application': 'rbd', 'value': 'foo'}} + + def test_list_pools_details(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'ls', + 'detail', + '-f', + 'json' + ] + + cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, True, 'json', container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_list_pools_nodetails(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'ls', + '-f', + 'json' + ] + + cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, False, 'json', container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_replicated_pool_pg_autoscaler_enabled(self): + self.fake_user_pool_config['type']['value'] = 'replicated' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--target_size_ratio', + self.fake_user_pool_config['target_size_ratio']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'], + '--size', + self.fake_user_pool_config['size']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_replicated_pool_pg_autoscaler_disabled(self): + self.fake_user_pool_config['type']['value'] = 'replicated' + self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--pg_num', + self.fake_user_pool_config['pg_num']['value'], + '--pgp_num', + self.fake_user_pool_config['pgp_num']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'], + '--size', + self.fake_user_pool_config['size']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, + self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_erasure_pool_pg_autoscaler_enabled(self): + self.fake_user_pool_config['type']['value'] = 'erasure' + self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default' + self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--target_size_ratio', + self.fake_user_pool_config['target_size_ratio']['value'], + self.fake_user_pool_config['erasure_profile']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_create_erasure_pool_pg_autoscaler_disabled(self): + self.fake_user_pool_config['type']['value'] = 'erasure' + self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default' + self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule' + self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off' + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + '--pg_num', + self.fake_user_pool_config['pg_num']['value'], + '--pgp_num', + self.fake_user_pool_config['pgp_num']['value'], + self.fake_user_pool_config['erasure_profile']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'] + ] + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + @pytest.mark.parametrize("target_size_ratio", ['', '0.3']) + @pytest.mark.parametrize("pg_num", ['0', '32']) + @pytest.mark.parametrize("pg_autoscale_mode", ["on", "off", "warn"]) + def test_create_pool_autoscale_pgnum_targetsize(self, pg_autoscale_mode, pg_num, target_size_ratio): + self.fake_user_pool_config['type']['value'] = 'erasure' + self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default' + self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule' + self.fake_user_pool_config['pg_autoscale_mode']['value'] = pg_autoscale_mode + self.fake_user_pool_config['pg_num']['value'] = pg_num + self.fake_user_pool_config['pgp_num']['value'] = pg_num + self.fake_user_pool_config['target_size_ratio']['value'] = target_size_ratio + + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + fake_user, + '-k', + fake_user_key, + '--cluster', + fake_cluster_name, + 'osd', + 'pool', + 'create', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['type']['value'], + ] + + if pg_autoscale_mode in ['off', 'warn']: + expected_command.extend(['--pg_num', pg_num]) + expected_command.extend(['--pgp_num', pg_num]) + + if pg_autoscale_mode in ['on', 'warn'] and target_size_ratio: + expected_command.extend(['--target_size_ratio', target_size_ratio]) + + expected_command.extend([ + self.fake_user_pool_config['erasure_profile']['value'], + self.fake_user_pool_config['crush_rule']['value'], + '--expected_num_objects', + self.fake_user_pool_config['expected_num_objects']['value'], + '--autoscale-mode', + self.fake_user_pool_config['pg_autoscale_mode']['value'] + ]) + + cmd = ceph_pool.create_pool(fake_cluster_name, + fake_user, fake_user_key, self.fake_user_pool_config, + container_image=fake_container_image_name) + + assert cmd == expected_command + + def test_remove_pool(self): + expected_command = [ + 'podman', + 'run', + '--rm', + '--net=host', + '-v', + '/etc/ceph:/etc/ceph:z', + '-v', + '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', + '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=ceph', + fake_container_image_name, + '-n', + 'client.admin', + '-k', + '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + 'ceph', + 'osd', + 'pool', + 'rm', + self.fake_user_pool_config['pool_name']['value'], + self.fake_user_pool_config['pool_name']['value'], + '--yes-i-really-really-mean-it' + ] + + cmd = ceph_pool.remove_pool(fake_cluster_name, self.fake_user_pool_config['pool_name']['value'], + fake_user, fake_user_key, container_image=fake_container_image_name) + + assert cmd == expected_command diff --git a/tests/library/test_ceph_volume.py b/tests/library/test_ceph_volume.py new file mode 100644 index 0000000..0499a53 --- /dev/null +++ b/tests/library/test_ceph_volume.py @@ -0,0 +1,480 @@ +import sys +import mock +import os +import pytest +import ca_test_common +sys.path.append('./library') +import ceph_volume # noqa: E402 + + +# Python 3 +try: + from unittest.mock import MagicMock, patch +except ImportError: + # Python 2 + try: + from mock import MagicMock, patch + except ImportError: + print('You need the mock library installed on python2.x to run tests') + + +def get_mounts(mounts=None): + volumes = {} + volumes['/run/lock/lvm'] = '/run/lock/lvm:z' + volumes['/var/run/udev'] = '/var/run/udev:z' + volumes['/dev'] = '/dev' + volumes['/etc/ceph'] = '/etc/ceph:z' + volumes['/run/lvm'] = '/run/lvm' + volumes['/var/lib/ceph'] = '/var/lib/ceph:z' + volumes['/var/log/ceph'] = '/var/log/ceph:z' + if mounts is not None: + volumes.update(mounts) + + return sum([['-v', '{}:{}'.format(src_dir, dst_dir)] for src_dir, dst_dir in volumes.items()], []) + + +def get_container_cmd(mounts=None): + + return ['docker', 'run', '--rm', '--privileged', + '--net=host', '--ipc=host'] + \ + get_mounts(mounts) + ['--entrypoint=ceph-volume'] + + +@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'}) +class TestCephVolumeModule(object): + + def test_data_no_vg(self): + result = ceph_volume.get_data("/dev/sda", None) + assert result == "/dev/sda" + + def test_data_with_vg(self): + result = ceph_volume.get_data("data-lv", "data-vg") + assert result == "data-vg/data-lv" + + def test_journal_no_vg(self): + result = ceph_volume.get_journal("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_journal_with_vg(self): + result = ceph_volume.get_journal("journal-lv", "journal-vg") + assert result == "journal-vg/journal-lv" + + def test_db_no_vg(self): + result = ceph_volume.get_db("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_db_with_vg(self): + result = ceph_volume.get_db("db-lv", "db-vg") + assert result == "db-vg/db-lv" + + def test_wal_no_vg(self): + result = ceph_volume.get_wal("/dev/sda1", None) + assert result == "/dev/sda1" + + def test_wal_with_vg(self): + result = ceph_volume.get_wal("wal-lv", "wal-vg") + assert result == "wal-vg/wal-lv" + + def test_container_exec(self): + fake_binary = "ceph-volume" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + [fake_container_image] + result = ceph_volume.container_exec(fake_binary, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd(self): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '/dev/sda'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_fsid(self): + fake_module = MagicMock() + fake_module.params = {'osd_fsid': 'a_uuid'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '--osd-fsid', + 'a_uuid'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_zap_osd_id(self): + fake_module = MagicMock() + fake_module.params = {'osd_id': '123'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'zap', + '--destroy', + '--osd-id', + '123'] + result = ceph_volume.zap_devices(fake_module, fake_container_image) + assert result == expected_command_list + + def test_activate_osd(self): + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'activate', + '--all'] + result = ceph_volume.activate_osd() + assert result == expected_command_list + + def test_list_osd(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json'] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_osd_container(self): + fake_module = MagicMock() + fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd( + { + '/var/lib/ceph': '/var/lib/ceph:ro' + }) + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'list', + '/dev/sda', + '--format=json'] + result = ceph_volume.list_osd(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_storage_inventory(self): + fake_module = MagicMock() + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'inventory', + '--format=json', + ] + result = ceph_volume.list_storage_inventory(fake_module, fake_container_image) + assert result == expected_command_list + + def test_list_storage_inventory_container(self): + fake_module = MagicMock() + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'inventory', + '--format=json'] + result = ceph_volume.list_storage_inventory(fake_module, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_create_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_action = "create" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'create', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_create_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "create" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'create', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_prepare_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_action = "prepare" + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_prepare_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'cluster': 'ceph', } + + fake_container_image = None + fake_action = "prepare" + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'prepare', + '--%s' % objectstore, + '--data', + '/dev/sda'] + result = ceph_volume.prepare_or_create_osd( + fake_module, fake_action, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_batch_osd_container(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'block_db_size': '4096', + 'journal_size': '4096', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = "quay.io/ceph/daemon:latest" + expected_command_list = get_container_cmd() + \ + [fake_container_image, + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--%s' % objectstore, + '--yes', + '--prepare', + '--block-db-size', + '4096', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + @pytest.mark.parametrize('objectstore', ['bluestore']) + def test_batch_osd(self, objectstore): + fake_module = MagicMock() + fake_module.params = {'data': '/dev/sda', + 'objectstore': objectstore, + 'block_db_size': '4096', + 'journal_size': '4096', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--%s' % objectstore, + '--yes', + '--block-db-size', + '4096', + '/dev/sda', + '/dev/sdb'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + def test_batch_bluestore_with_dedicated_db(self): + fake_module = MagicMock() + fake_module.params = {'objectstore': 'bluestore', + 'block_db_size': '-1', + 'cluster': 'ceph', + 'batch_devices': ["/dev/sda", "/dev/sdb"], + 'block_db_devices': ["/dev/sdc", "/dev/sdd"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--bluestore', + '--yes', + '/dev/sda', + '/dev/sdb', + '--db-devices', + '/dev/sdc', + '/dev/sdd'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + def test_batch_bluestore_with_dedicated_wal(self): + fake_module = MagicMock() + fake_module.params = {'objectstore': 'bluestore', + 'cluster': 'ceph', + 'block_db_size': '-1', + 'batch_devices': ["/dev/sda", "/dev/sdb"], + 'wal_devices': ["/dev/sdc", "/dev/sdd"]} + + fake_container_image = None + expected_command_list = ['ceph-volume', + '--cluster', + 'ceph', + 'lvm', + 'batch', + '--bluestore', + '--yes', + '/dev/sda', + '/dev/sdb', + '--wal-devices', + '/dev/sdc', + '/dev/sdd'] + result = ceph_volume.batch( + fake_module, fake_container_image) + assert result == expected_command_list + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_prepare_no_keyring_in_output(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({'data': '/dev/sda', + 'objectstore': 'bluestore', + 'cluster': 'ceph', + 'action': 'prepare'}) + keyring = 'AQBqkhNhQDlqEhAAXKxu87L3Mh3mHY+agonKZA==' + m_exit_json.side_effect = ca_test_common.exit_json + list_rc = 0 + list_stderr = '' + list_stdout = '{}' + prepare_rc = 0 + prepare_stderr = """ + Running command: /usr/bin/ceph-authtool --gen-print-key + Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 + Running command: /usr/bin/chown -h ceph:ceph /dev/test_group/data-lv1 + Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 + Running command: /usr/bin/ln -s /dev/test_group/data-lv1 /var/lib/ceph/osd/ceph-1/block + stderr: got monmap epoch 1 + Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key {} + stdout: creating /var/lib/ceph/osd/ceph-1/keyring + added entity osd.1 auth(key={}) +""".format(keyring, keyring) + prepare_stdout = '' + m_run_command.side_effect = [ + (list_rc, list_stdout, list_stderr), + (prepare_rc, prepare_stdout, prepare_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', 'ceph', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sda'] + assert result['rc'] == 0 + assert keyring not in result['stderr'] + assert '*' * 8 in result['stderr'] + assert not result['stdout'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_batch_no_keyring_in_output(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({'batch_devices': ['/dev/sda'], + 'objectstore': 'bluestore', + 'cluster': 'ceph', + 'action': 'batch'}) + keyring = 'AQBUixJhnDF1NRAAhl2xrnmOHCCI/T+W6FjqmA==' + m_exit_json.side_effect = ca_test_common.exit_json + report_rc = 0 + report_stderr = '' + report_stdout = '[{"data": "/dev/sda", "data_size": "50.00 GB", "encryption": "None"}]' + batch_rc = 0 + batch_stderr = """ + Running command: /usr/bin/ceph-authtool --gen-print-key + Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 + Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-863337c4-bef9-4b96-aaac-27cde8c42b8f/osd-block-b1d1036f-0d6e-493b-9d1a-6f6b96df64b1 + Running command: /usr/bin/chown -R ceph:ceph /dev/mapper/ceph--863337c4--bef9--4b96--aaac--27cde8c42b8f-osd--block--b1d1036f--0d6e--493b--9d1a--6f6b96df64b1 + Running command: /usr/bin/ln -s /dev/ceph-863337c4-bef9-4b96-aaac-27cde8c42b8f/osd-block-b1d1036f-0d6e-493b-9d1a-6f6b96df64b1 /var/lib/ceph/osd/ceph-0/block + stderr: got monmap epoch 1 + Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key {} + stdout: creating /var/lib/ceph/osd/ceph-0/keyring + added entity osd.0 auth(key={}) +""".format(keyring, keyring) + batch_stdout = '' + m_run_command.side_effect = [ + (report_rc, report_stdout, report_stderr), + (batch_rc, batch_stdout, batch_stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', 'ceph', 'lvm', 'batch', '--bluestore', '--yes', '/dev/sda'] + assert result['rc'] == 0 + assert keyring not in result['stderr'] + assert '*' * 8 in result['stderr'] + assert not result['stdout'] diff --git a/tests/library/test_ceph_volume_simple_activate.py b/tests/library/test_ceph_volume_simple_activate.py new file mode 100644 index 0000000..8eb1707 --- /dev/null +++ b/tests/library/test_ceph_volume_simple_activate.py @@ -0,0 +1,174 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_volume_simple_activate + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_id = '42' +fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52' +fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid) + + +class TestCephVolumeSimpleActivateModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'error' + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_all_osds(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_all': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=False) + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_activate_path_not_exists(self, m_fail_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['msg'] == '{} does not exist'.format(fake_path) + assert result['rc'] == 1 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_without_systemd(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + 'systemd': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_activate_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'osd_id': fake_id, + 'osd_fsid': fake_uuid, + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_activate.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', fake_container_image, + '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_ceph_volume_simple_scan.py b/tests/library/test_ceph_volume_simple_scan.py new file mode 100644 index 0000000..f43dec4 --- /dev/null +++ b/tests/library/test_ceph_volume_simple_scan.py @@ -0,0 +1,166 @@ +from mock.mock import patch +import os +import pytest +import ca_test_common +import ceph_volume_simple_scan + +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'quay.io/ceph/daemon:latest' +fake_path = '/var/lib/ceph/osd/ceph-0' + + +class TestCephVolumeSimpleScanModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'error' + rc = 2 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_all_osds(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_path_exists(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.object(os.path, 'exists', return_value=False) + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_scan_path_not_exists(self, m_fail_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path + }) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['msg'] == '{} does not exist'.format(fake_path) + assert result['rc'] == 1 + + @patch.object(os.path, 'exists', return_value=True) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_path_stdout_force(self, m_run_command, m_exit_json, m_os_path): + ca_test_common.set_module_args({ + 'path': fake_path, + 'force': True, + 'stdout': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', '--force', '--stdout', fake_path] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_scan_with_container(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + ceph_volume_simple_scan.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == [fake_container_binary, + 'run', '--rm', '--privileged', + '--ipc=host', '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '-v', '/run/lvm/:/run/lvm/', + '-v', '/run/lock/lvm/:/run/lock/lvm/', + '--entrypoint=ceph-volume', fake_container_image, + '--cluster', fake_cluster, 'simple', 'scan'] + assert result['rc'] == rc + assert result['stderr'] == stderr + assert result['stdout'] == stdout diff --git a/tests/library/test_cephadm_adopt.py b/tests/library/test_cephadm_adopt.py new file mode 100644 index 0000000..36e3bbf --- /dev/null +++ b/tests/library/test_cephadm_adopt.py @@ -0,0 +1,208 @@ +from mock.mock import patch +import pytest +import ca_test_common +import cephadm_adopt + +fake_cluster = 'ceph' +fake_image = 'quay.io/ceph/daemon-base:latest' +fake_name = 'mon.foo01' + + +class TestCephadmAdoptModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: name' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['cephadm', 'ls', '--no-detail'] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_fail_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_fail_json.side_effect = ca_test_common.fail_json + stdout = '' + stderr = 'ERROR: cephadm should be run as root' + rc = 1 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['rc'] == 1 + assert result['msg'] == 'ERROR: cephadm should be run as root' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_default_values(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = 'Stopping old systemd unit ceph-mon@{}...\n' \ + 'Disabling old systemd unit ceph-mon@{}...\n' \ + 'Moving data...\n' \ + 'Chowning content...\n' \ + 'Moving logs...\n' \ + 'Creating new units...\n' \ + 'firewalld ready'.format(fake_name, fake_name) + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == stdout + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_already_adopted(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name + }) + m_exit_json.side_effect = ca_test_common.exit_json + stderr = '' + stdout = '[{{"style":"cephadm:v1","name":"{}"}}]'.format(fake_name) + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['cephadm', 'ls', '--no-detail'] + assert result['rc'] == 0 + assert result['stderr'] == stderr + assert result['stdout'] == '{} is already adopted'.format(fake_name) + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_docker(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'docker': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--docker', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_custom_image(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'image': fake_image + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--image', fake_image, 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_pull(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'pull': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-pull'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_firewalld(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'name': fake_name, + 'firewalld': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.side_effect = [ + (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''), + (rc, stdout, stderr) + ] + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_adopt.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-firewalld'] + assert result['rc'] == 0 diff --git a/tests/library/test_cephadm_bootstrap.py b/tests/library/test_cephadm_bootstrap.py new file mode 100644 index 0000000..80a0e12 --- /dev/null +++ b/tests/library/test_cephadm_bootstrap.py @@ -0,0 +1,304 @@ +from mock.mock import patch +import pytest +import ca_test_common +import cephadm_bootstrap + +fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b' +fake_image = 'quay.io/ceph/ceph:v19' +fake_ip = '192.168.42.1' +fake_registry = 'quay.io' +fake_registry_user = 'foo' +fake_registry_pass = 'bar' +fake_registry_json = 'registry.json' + + +class TestCephadmBootstrapModule(object): + + @patch('ansible.module_utils.basic.AnsibleModule.fail_json') + def test_without_parameters(self, m_fail_json): + ca_test_common.set_module_args({}) + m_fail_json.side_effect = ca_test_common.fail_json + + with pytest.raises(ca_test_common.AnsibleFailJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['msg'] == 'missing required arguments: mon_ip' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + def test_with_check_mode(self, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + '_ansible_check_mode': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert not result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip] + assert result['rc'] == 0 + assert not result['stdout'] + assert not result['stderr'] + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_failure(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = 'ERROR: cephadm should be run as root' + rc = 1 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip] + assert result['rc'] == 1 + assert result['stderr'] == 'ERROR: cephadm should be run as root' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_default_values(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = 'Bootstrap complete.' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip] + assert result['rc'] == 0 + assert result['stdout'] == 'Bootstrap complete.' + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_docker(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'docker': True + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--docker', 'bootstrap', '--mon-ip', fake_ip] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_custom_image(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'image': fake_image + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', '--image', fake_image, 'bootstrap', '--mon-ip', fake_ip] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_custom_fsid(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'fsid': fake_fsid + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--fsid', fake_fsid] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_pull(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'pull': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-pull'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_dashboard_user_password(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'dashboard': True, + 'dashboard_user': 'foo', + 'dashboard_password': 'bar' + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--initial-dashboard-user', 'foo', '--initial-dashboard-password', 'bar'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_dashboard(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'dashboard': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-dashboard'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_monitoring(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'monitoring': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-monitoring-stack'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_without_firewalld(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'firewalld': False + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-firewalld'] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_registry_credentials(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'registry_url': fake_registry, + 'registry_username': fake_registry_user, + 'registry_password': fake_registry_pass + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, + '--registry-url', fake_registry, + '--registry-username', fake_registry_user, + '--registry-password', fake_registry_pass] + assert result['rc'] == 0 + + @patch('ansible.module_utils.basic.AnsibleModule.exit_json') + @patch('ansible.module_utils.basic.AnsibleModule.run_command') + def test_with_registry_json_file(self, m_run_command, m_exit_json): + ca_test_common.set_module_args({ + 'mon_ip': fake_ip, + 'registry_json': fake_registry_json + }) + m_exit_json.side_effect = ca_test_common.exit_json + stdout = '' + stderr = '' + rc = 0 + m_run_command.return_value = rc, stdout, stderr + + with pytest.raises(ca_test_common.AnsibleExitJson) as result: + cephadm_bootstrap.main() + + result = result.value.args[0] + assert result['changed'] + assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, + '--registry-json', fake_registry_json] + assert result['rc'] == 0 diff --git a/tests/library/test_radosgw_caps.py b/tests/library/test_radosgw_caps.py new file mode 100644 index 0000000..1fd7a16 --- /dev/null +++ b/tests/library/test_radosgw_caps.py @@ -0,0 +1,101 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest + +sys.path.append("./library") +import radosgw_caps # noqa: E402 + + +fake_binary = "radosgw-admin" +fake_cluster = "ceph" +fake_container_binary = "podman" +fake_container_image = "docker.io/ceph/daemon:latest" +fake_container_cmd = [ + fake_container_binary, + "run", + "--rm", + "--net=host", + "-v", + "/etc/ceph:/etc/ceph:z", + "-v", + "/var/lib/ceph/:/var/lib/ceph/:z", + "-v", + "/var/log/ceph/:/var/log/ceph/:z", + "--entrypoint=" + fake_binary, + fake_container_image, +] +fake_user = "foo" +fake_caps = ["users=write", "zone=*", "metadata=read,write"] +fake_params = { + "cluster": fake_cluster, + "name": fake_user, + "caps": fake_caps, +} + + +class TestRadosgwCapsModule(object): + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_caps.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_caps.is_containerized() is None + + @patch.dict(os.environ, {"CEPH_CONTAINER_IMAGE": fake_container_image}) + def test_is_containerized(self): + assert radosgw_caps.is_containerized() == fake_container_image + + @pytest.mark.parametrize("image", [None, fake_container_image]) + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_caps.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize("image", [None, fake_container_image]) + @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend(["--cluster", fake_cluster, "caps"]) + assert ( + radosgw_caps.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + ) + + def test_add_caps(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + "--cluster", + fake_cluster, + "caps", + "add", + "--uid=" + fake_user, + "--caps=" + ";".join(fake_caps), + ] + + assert radosgw_caps.add_caps(fake_module) == expected_cmd + + def test_remove_caps(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + "--cluster", + fake_cluster, + "caps", + "rm", + "--uid=" + fake_user, + "--caps=" + ";".join(fake_caps), + ] + + assert radosgw_caps.remove_caps(fake_module) == expected_cmd diff --git a/tests/library/test_radosgw_realm.py b/tests/library/test_radosgw_realm.py new file mode 100644 index 0000000..072455c --- /dev/null +++ b/tests/library/test_radosgw_realm.py @@ -0,0 +1,125 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_realm # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_params = {'cluster': fake_cluster, + 'name': fake_realm, + 'default': True} +fake_url = 'http://192.168.42.100:8080' +fake_access_key = '8XQHmFxixz7LCM2AdM2p' +fake_secret_key = 'XC8IhEPJprL6SrpaJDmolVs7jbOvoe2E3AaWKGRx' + + +class TestRadosgwRealmModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_realm.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_realm.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_realm.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_realm.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'realm' + ]) + assert radosgw_realm.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'create', + '--rgw-realm=' + fake_realm, + '--default' + ] + + assert radosgw_realm.create_realm(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_realm.get_realm(fake_module) == expected_cmd + + def test_remove_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'delete', + '--rgw-realm=' + fake_realm + ] + + assert radosgw_realm.remove_realm(fake_module) == expected_cmd + + def test_pull_realm(self): + fake_module = MagicMock() + fake_params.update({'url': fake_url, 'access_key': fake_access_key, 'secret_key': fake_secret_key}) + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'pull', + '--rgw-realm=' + fake_realm, + '--url=' + fake_url, + '--access-key=' + fake_access_key, + '--secret=' + fake_secret_key, + '--default' + ] + + assert radosgw_realm.pull_realm(fake_module) == expected_cmd diff --git a/tests/library/test_radosgw_user.py b/tests/library/test_radosgw_user.py new file mode 100644 index 0000000..374c512 --- /dev/null +++ b/tests/library/test_radosgw_user.py @@ -0,0 +1,205 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_user # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_user = 'foo' +fake_realm = 'canada' +fake_zonegroup = 'quebec' +fake_zone = 'montreal' +fake_params = {'cluster': fake_cluster, + 'name': fake_user, + 'display_name': fake_user, + 'email': fake_user, + 'access_key': 'PC7NPg87QWhOzXTkXIhX', + 'secret_key': 'jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'zone': fake_zone, + 'system': True, + 'admin': True} + + +class TestRadosgwUserModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_user.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_user.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_user.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_user.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'user' + ]) + assert radosgw_user.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'create', + '--uid=' + fake_user, + '--display_name=' + fake_user, + '--email=' + fake_user, + '--access-key=PC7NPg87QWhOzXTkXIhX', + '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--system', + '--admin' + ] + + assert radosgw_user.create_user(fake_module) == expected_cmd + + def test_modify_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'modify', + '--uid=' + fake_user, + '--display_name=' + fake_user, + '--email=' + fake_user, + '--access-key=PC7NPg87QWhOzXTkXIhX', + '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--system', + '--admin' + ] + + assert radosgw_user.modify_user(fake_module) == expected_cmd + + def test_get_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'info', + '--uid=' + fake_user, + '--format=json', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_user.get_user(fake_module) == expected_cmd + + def test_remove_user(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'user', 'rm', + '--uid=' + fake_user, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_user.remove_user(fake_module) == expected_cmd + + def test_caps_add(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'caps', 'add', + '--uid=' + fake_user, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--caps=metadata=read;buckets=read' + ] + + caps = [ + { + 'type': 'metadata', + 'perm': 'read', + }, + { + 'type': 'buckets', + 'perm': 'read', + }, + ] + + assert radosgw_user.caps_add(fake_module, caps) == expected_cmd + + def test_caps_rm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'caps', 'rm', + '--uid=' + fake_user, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--caps=metadata=read;buckets=read' + ] + + caps = [ + { + 'type': 'metadata', + 'perm': 'read', + }, + { + 'type': 'buckets', + 'perm': 'read', + }, + ] + + assert radosgw_user.caps_rm(fake_module, caps) == expected_cmd diff --git a/tests/library/test_radosgw_zone.py b/tests/library/test_radosgw_zone.py new file mode 100644 index 0000000..a1d2d58 --- /dev/null +++ b/tests/library/test_radosgw_zone.py @@ -0,0 +1,213 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_zone # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_zonegroup = 'bar' +fake_zone = 'z1' +fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080'] +fake_params = {'cluster': fake_cluster, + 'name': fake_zone, + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'endpoints': fake_endpoints, + 'default': True, + 'master': True} + + +class TestRadosgwZoneModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_zone.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_zone.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_zone.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_zone.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zone' + ]) + assert radosgw_zone.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + @pytest.mark.parametrize('image', [fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd_container_args(self, image): + container_args = [ + '-v', '/test:/test:ro', + ] + expected_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/test:/test:ro', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image + ] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zone' + ]) + assert radosgw_zone.generate_radosgw_cmd(fake_cluster, [], image, container_args) == expected_cmd + + def test_create_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'create', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zone.create_zone(fake_module) == expected_cmd + + def test_modify_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'modify', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zone.modify_zone(fake_module) == expected_cmd + + def test_get_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'get', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone, + '--format=json' + ] + + assert radosgw_zone.get_zone(fake_module) == expected_cmd + + def test_get_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'get', + '--rgw-zone=' + fake_zone, + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--format=json' + ] + + assert radosgw_zone.get_zonegroup(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_zone.get_realm(fake_module) == expected_cmd + + def test_remove_zone(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'delete', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--rgw-zone=' + fake_zone + ] + + assert radosgw_zone.remove_zone(fake_module) == expected_cmd + + def test_set_zone(self): + fake_module = MagicMock() + fake_module.params = { + 'cluster': fake_cluster, + 'name': fake_zone, + 'realm': fake_realm, + 'zonegroup': fake_zonegroup, + 'zone_doc': {'id': 'fake_id'}, + } + + zonefile = fake_module.tmpdir + '/zone.json' + + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zone', 'set', + '--rgw-realm=' + fake_realm, + '--infile=' + zonefile, + ] + + assert radosgw_zone.set_zone(fake_module) == expected_cmd diff --git a/tests/library/test_radosgw_zonegroup.py b/tests/library/test_radosgw_zonegroup.py new file mode 100644 index 0000000..a56fb9d --- /dev/null +++ b/tests/library/test_radosgw_zonegroup.py @@ -0,0 +1,144 @@ +import os +import sys +from mock.mock import patch, MagicMock +import pytest +sys.path.append('./library') +import radosgw_zonegroup # noqa: E402 + + +fake_binary = 'radosgw-admin' +fake_cluster = 'ceph' +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' +fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + fake_binary, + fake_container_image +] +fake_realm = 'foo' +fake_zonegroup = 'bar' +fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080'] +fake_params = {'cluster': fake_cluster, + 'name': fake_zonegroup, + 'realm': fake_realm, + 'endpoints': fake_endpoints, + 'default': True, + 'master': True} + + +class TestRadosgwZonegroupModule(object): + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = radosgw_zonegroup.container_exec(fake_binary, fake_container_image) + assert cmd == fake_container_cmd + + def test_not_is_containerized(self): + assert radosgw_zonegroup.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert radosgw_zonegroup.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + assert radosgw_zonegroup.pre_generate_radosgw_cmd(image) == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_radosgw_cmd(self, image): + if image: + expected_cmd = fake_container_cmd + else: + expected_cmd = [fake_binary] + + expected_cmd.extend([ + '--cluster', + fake_cluster, + 'zonegroup' + ]) + assert radosgw_zonegroup.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd + + def test_create_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'create', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zonegroup.create_zonegroup(fake_module) == expected_cmd + + def test_modify_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'modify', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--endpoints=' + ','.join(fake_endpoints), + '--default', + '--master' + ] + + assert radosgw_zonegroup.modify_zonegroup(fake_module) == expected_cmd + + def test_get_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'get', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup, + '--format=json' + ] + + assert radosgw_zonegroup.get_zonegroup(fake_module) == expected_cmd + + def test_get_realm(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'realm', 'get', + '--rgw-realm=' + fake_realm, + '--format=json' + ] + + assert radosgw_zonegroup.get_realm(fake_module) == expected_cmd + + def test_remove_zonegroup(self): + fake_module = MagicMock() + fake_module.params = fake_params + expected_cmd = [ + fake_binary, + '--cluster', fake_cluster, + 'zonegroup', 'delete', + '--rgw-realm=' + fake_realm, + '--rgw-zonegroup=' + fake_zonegroup + ] + + assert radosgw_zonegroup.remove_zonegroup(fake_module) == expected_cmd diff --git a/tests/module_utils/test_ca_common.py b/tests/module_utils/test_ca_common.py new file mode 100644 index 0000000..e562bae --- /dev/null +++ b/tests/module_utils/test_ca_common.py @@ -0,0 +1,144 @@ +from mock.mock import patch, MagicMock +import os +import ca_common +import pytest + +fake_container_binary = 'podman' +fake_container_image = 'docker.io/ceph/daemon:latest' + + +class TestCommon(object): + + def setup_method(self): + self.fake_binary = 'ceph' + self.fake_cluster = 'ceph' + self.fake_container_cmd = [ + fake_container_binary, + 'run', + '--rm', + '--net=host', + '-v', '/etc/ceph:/etc/ceph:z', + '-v', '/var/lib/ceph/:/var/lib/ceph/:z', + '-v', '/var/log/ceph/:/var/log/ceph/:z', + '--entrypoint=' + self.fake_binary, + fake_container_image + ] + + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_container_exec(self): + cmd = ca_common.container_exec(self.fake_binary, fake_container_image) + assert cmd == self.fake_container_cmd + + def test_not_is_containerized(self): + assert ca_common.is_containerized() is None + + @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) + def test_is_containerized(self): + assert ca_common.is_containerized() == fake_container_image + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_pre_generate_cmd(self, image): + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + assert ca_common.pre_generate_cmd(self.fake_binary, image) == expected_cmd # noqa: E501 + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.admin', + '-k', '/etc/ceph/ceph.client.admin.keyring', + '--cluster', + self.fake_cluster, + 'osd', 'pool', + 'create', 'foo' + ]) + assert ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster=self.fake_cluster, container_image=image) == expected_cmd # noqa: E501 + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_cluster_name(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.admin', + '-k', '/etc/ceph/foo.client.admin.keyring', + '--cluster', + 'foo', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_cluster_name_and_user(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.foo', + '-k', '/etc/ceph/foo.client.foo.keyring', + '--cluster', + 'foo', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, cluster='foo', user='client.foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('image', [None, fake_container_image]) + @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) + def test_generate_cmd_different_user(self, image): + sub_cmd = ['osd', 'pool'] + args = ['create', 'foo'] + if image: + expected_cmd = self.fake_container_cmd + else: + expected_cmd = [self.fake_binary] + + expected_cmd.extend([ + '-n', 'client.foo', + '-k', '/etc/ceph/ceph.client.foo.keyring', + '--cluster', + 'ceph', + 'osd', 'pool', + 'create', 'foo' + ]) + result = ca_common.generate_cmd(sub_cmd=sub_cmd, args=args, user='client.foo', container_image=image) # noqa: E501 + assert result == expected_cmd + + @pytest.mark.parametrize('stdin', [None, 'foo']) + def test_exec_command(self, stdin): + fake_module = MagicMock() + rc = 0 + stderr = '' + stdout = 'ceph version 1.2.3' + fake_module.run_command.return_value = 0, stdout, stderr + expected_cmd = [self.fake_binary, '--version'] + _rc, _cmd, _out, _err = ca_common.exec_command(fake_module, expected_cmd, stdin=stdin) # noqa: E501 + assert _rc == rc + assert _cmd == expected_cmd + assert _err == stderr + assert _out == stdout diff --git a/tests/plugins/filter/test_ipaddrs_in_ranges.py b/tests/plugins/filter/test_ipaddrs_in_ranges.py new file mode 100644 index 0000000..4b111e0 --- /dev/null +++ b/tests/plugins/filter/test_ipaddrs_in_ranges.py @@ -0,0 +1,63 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleFilterError + +import ipaddrs_in_ranges +import pytest + +pytest.importorskip('netaddr') + +filter_plugin = ipaddrs_in_ranges.FilterModule() + + +class TestIpaddrsInRanges(object): + + def test_one_ip_one_range(self): + ips = ['10.10.10.1'] + ranges = ['10.10.10.1/24'] + result = filter_plugin.ips_in_ranges(ips, ranges) + assert ips[0] in result + assert len(result) == 1 + + def test_two_ip_one_range(self): + ips = ['192.168.1.1', '10.10.10.1'] + ranges = ['10.10.10.1/24'] + result = filter_plugin.ips_in_ranges(ips, ranges) + assert ips[0] not in result + assert ips[1] in result + assert len(result) == 1 + + def test_one_ip_two_ranges(self): + ips = ['10.10.10.1'] + ranges = ['192.168.1.0/24', '10.10.10.1/24'] + result = filter_plugin.ips_in_ranges(ips, ranges) + assert ips[0] in result + assert len(result) == 1 + + def test_multiple_ips_multiple_ranges(self): + ips = ['10.10.10.1', '192.168.1.1', '172.16.10.1'] + ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24'] + result = filter_plugin.ips_in_ranges(ips, ranges) + assert ips[0] in result + assert ips[1] in result + assert ips[2] not in result + assert len(result) == 2 + + def test_no_ips_in_ranges(self): + ips = ['10.10.20.1', '192.168.2.1', '172.16.10.1'] + ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24'] + result = filter_plugin.ips_in_ranges(ips, ranges) + assert len(result) == 0 + + def test_ips_in_ranges_in_filters_dict(self): + assert 'ips_in_ranges' in filter_plugin.filters() + + def test_missing_netaddr_module(self): + ipaddrs_in_ranges.netaddr = None + + with pytest.raises(AnsibleFilterError) as result: + filter_plugin.filters() + + assert result.type == AnsibleFilterError + assert str(result.value) == "The ips_in_ranges filter requires python's netaddr be installed on the ansible controller." diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 0000000..d4c1563 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,18 @@ +# this is just a placeholder so that we can define what the 'root' of the tests +# dir really is. +[pytest] +markers = + ceph_exporter: environment with ceph exporter enabled + ceph_crash: environment with ceph crash enabled + dashboard: environment with dashboard enabled + no_docker: environment without containers + docker: environment with containers + all: for all nodes + mdss: for mds nodes + mgrs: for mgr nodes + mons: for mon nodes + nfss: for nfs nodes + osds: for osd nodes + rbdmirrors: for rbdmirror nodes + rgws: for rgw nodes + grafanas: for grafana nodes diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 0000000..2ddd5e2 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,11 @@ +# These are Python requirements needed to run the functional tests +pytest-testinfra +pytest-xdist +pytest +ansible-core>=2.15,<2.17,!=2.9.10 +netaddr +mock +jmespath +pytest-rerunfailures +pytest-cov +setuptools diff --git a/tests/scripts/generate_ssh_config.sh b/tests/scripts/generate_ssh_config.sh new file mode 100644 index 0000000..feebaf6 --- /dev/null +++ b/tests/scripts/generate_ssh_config.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Generate a custom ssh config from Vagrant so that it can then be used by +# ansible.cfg + +path=$1 + +if [ $# -eq 0 ] + then + echo "A path to the scenario is required as an argument and it wasn't provided" + exit 1 +fi + +cd "$path" + +# Let's print vagrant status for debug purposes and to give the VMs a second to +# settle before asking vagrant for SSH config. +vagrant status || true + +n=0 +until [ "$n" -ge 5 ] +do + vagrant ssh-config > vagrant_ssh_config && break + n=$((n+1)) + echo "\`vagrant ssh-config\` failed. Retrying." + sleep 3 +done + +if [ "$n" -eq 5 ]; then + echo "\`vagrant ssh-config\` failed 5 times. This is a fatal error." + cat vagrant_ssh_config + exit 1 +fi diff --git a/tests/scripts/vagrant_up.sh b/tests/scripts/vagrant_up.sh new file mode 100644 index 0000000..05b1758 --- /dev/null +++ b/tests/scripts/vagrant_up.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -x +if [[ -n $1 ]]; then + DIRECTORY=$1 + shift +else + DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +fi +pushd "${DIRECTORY}" + +if [[ "${CEPH_ANSIBLE_VAGRANT_BOX}" =~ "centos/stream" ]]; then + EL_VERSION="${CEPH_ANSIBLE_VAGRANT_BOX: -1}" + LATEST_IMAGE="$(curl -s https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/CHECKSUM | sed -nE 's/^SHA256.*\((.*-([0-9]+).*vagrant-libvirt.box)\).*$/\1/p' | sort -u | tail -n1)" + vagrant box remove "${CEPH_ANSIBLE_VAGRANT_BOX}" --all --force || true + vagrant box add --force --provider libvirt --name "${CEPH_ANSIBLE_VAGRANT_BOX}" "https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/${LATEST_IMAGE}" --force +fi + +retries=0 +until [ $retries -ge 5 ] +do + echo "Attempting to start VMs. Attempts: $retries" + timeout 10m time vagrant up "$@" && break + retries=$((retries+1)) + sleep 5 +done + +sleep 10 +popd diff --git a/tests/scripts/workflows/defaults.sh b/tests/scripts/workflows/defaults.sh new file mode 100755 index 0000000..cb847bb --- /dev/null +++ b/tests/scripts/workflows/defaults.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -ex + +function git_diff_to_head { + git diff --diff-filter=MT --no-color origin/"${GITHUB_BASE_REF}"..HEAD +} + +function match_file { + git_diff_to_head | sed -n "s|^+++.*\\($1.*\\)|\\1|p" +} + +# group_vars / defaults +match_file "/defaults/main.yml" +nb=$(match_file "/defaults/main.yml" | wc -l) +if [[ "$nb" -eq 0 ]]; then + echo "group_vars has not been touched." +else + match_file "group_vars/" + nb_group_vars=$(match_file "group_vars/" | wc -l) + if [[ "$nb" -gt "$nb_group_vars" ]]; then + echo "One or more files containing default variables has/have been modified." + echo "You must run 'generate_group_vars_sample.sh' to generate the group_vars template files." + exit 1 + fi +fi + +# ceph_release_num[ceph_release] statements check +if match_file "roles/ceph-defaults/" | grep -E '^[<>+].*- ceph_release_num\[ceph_release\]'; then + echo "Do not use statements like '- ceph_release_num[ceph_release]' in ceph-defaults role!" + echo "'ceph_release' is only populated **after** the play of ceph-defaults, typically in ceph-common or ceph-docker-common." + exit 1 +fi +echo "No '- ceph_release_num[ceph_release]' statements found in ceph-defaults role!" diff --git a/tests/scripts/workflows/signed-off.sh b/tests/scripts/workflows/signed-off.sh new file mode 100755 index 0000000..4901fbc --- /dev/null +++ b/tests/scripts/workflows/signed-off.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -x + +if [[ "$(git log --oneline --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | wc -l)" -ne "$(git log --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | grep -c Signed-off-by)" ]]; then + echo "One or more commits is/are missing a Signed-off-by. Add it with 'git commit -s'." + exit 1 +else + echo "Sign-off ok!" +fi \ No newline at end of file diff --git a/tox-cephadm.ini b/tox-cephadm.ini new file mode 100644 index 0000000..f84357d --- /dev/null +++ b/tox-cephadm.ini @@ -0,0 +1,43 @@ +[tox] +envlist = centos-container-cephadm + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + pip + rm +passenv=* +sitepackages=True +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml + # Set the vagrant box image to use + CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + +deps= -r{toxinidir}/tests/requirements.txt +changedir= {toxinidir}/tests/functional/cephadm + +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + ceph_repository=dev \ + " + + vagrant destroy -f diff --git a/tox-docker2podman.ini b/tox-docker2podman.ini new file mode 100644 index 0000000..33eb779 --- /dev/null +++ b/tox-docker2podman.ini @@ -0,0 +1,53 @@ +[tox] +envlist = centos-container-docker_to_podman + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + pip + rm +passenv=* +sitepackages=True +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml + # Set the vagrant box image to use + CEPH_ANSIBLE_VAGRANT_BOX = centos/7 + +deps= -r{toxinidir}/tests/requirements.txt +changedir= {toxinidir}/tests/functional/docker2podman + +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + " + + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + vagrant destroy -f diff --git a/tox-external_clients.ini b/tox-external_clients.ini new file mode 100644 index 0000000..b03f302 --- /dev/null +++ b/tox-external_clients.ini @@ -0,0 +1,85 @@ +[tox] +envlist = centos-{container,non_container}-external_clients + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +# non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + non_container: PLAYBOOK = site.yml.sample + +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/external_clients{env:CONTAINER_DIR:} +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv --diff -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml + + non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch=main ceph_dev_sha1=latest" --tags "vagrant_setup" + ansible-playbook -vv --diff -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\ + yes_i_know=true \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch=main \ + ceph_dev_sha1=latest \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + ansible-playbook -vv --diff -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml + + ansible-playbook -vv --diff -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + ireallymeanit=yes \ + fsid=40358a87-ab6e-4bdc-83db-1d909147861c \ + external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \ + generate_fsid=false \ + ceph_dev_branch=main \ + ceph_dev_sha1=latest \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf + + ansible-playbook -vv --diff -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\ + ireallymeanit=yes \ + fsid=40358a87-ab6e-4bdc-83db-1d909147861c \ + external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \ + generate_fsid=false \ + ceph_dev_branch=main \ + ceph_dev_sha1=latest \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf + + vagrant destroy --force diff --git a/tox-podman.ini b/tox-podman.ini new file mode 100644 index 0000000..6dfd6f9 --- /dev/null +++ b/tox-podman.ini @@ -0,0 +1,60 @@ +[tox] +envlist = centos-container-podman + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + pip + rm +passenv=* +sitepackages=True +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml + # Set the vagrant box image to use + CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + + # Set the ansible inventory host file to be used according to which distrib we are running on + INVENTORY = {env:_INVENTORY:hosts} + PLAYBOOK = site-container.yml.sample + +deps= -r{toxinidir}/tests/requirements.txt +changedir= {toxinidir}/tests/functional/podman + +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + # test cluster state using ceph-ansible tests + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + # reboot all vms + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml + + # retest to ensure cluster came back up correctly after rebooting + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + vagrant destroy -f diff --git a/tox-rbdmirror.ini b/tox-rbdmirror.ini new file mode 100644 index 0000000..4790022 --- /dev/null +++ b/tox-rbdmirror.ini @@ -0,0 +1,97 @@ +[tox] +envlist = centos-{container,non_container}-rbdmirror + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_WHITELIST = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + # only available for ansible >= 2.5 + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_STDOUT_CALLBACK = yaml +# non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + non_container: PLAYBOOK = site.yml.sample + container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.144.10 + non_container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.140.10 + + UPDATE_CEPH_DEV_BRANCH = main + UPDATE_CEPH_DEV_SHA1 = latest + ROLLING_UPDATE = True +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/rbdmirror{env:CONTAINER_DIR:} +commands= + ansible-galaxy install -r {toxinidir}/requirements.yml -v + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + ceph_rbd_mirror_configure=true \ + ceph_rbd_mirror_pool=rbd \ + ceph_rbd_mirror_local_user_secret=AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== \ + yes_i_know=true \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir}/secondary --no-provision {posargs:--provider=virtualbox}" + bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary" + ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml + # ensure the rule isn't already present + ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent' + ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present' + ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + ceph_rbd_mirror_configure=true \ + ceph_rbd_mirror_pool=rbd \ + ceph_rbd_mirror_remote_user=client.rbd-mirror-peer \ + ceph_rbd_mirror_remote_mon_hosts={env:CEPH_RBD_MIRROR_REMOTE_MON_HOSTS} \ + ceph_rbd_mirror_remote_key=AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== \ + ceph_rbd_mirror_remote_cluster=remote \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=secondary --extra-vars "\ + ceph_rbd_mirror_pool=rbd \ + " + ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=primary -e 'ceph_rbd_mirror_pool=rbd' + vagrant destroy --force + bash -c "cd {changedir}/secondary && vagrant destroy --force" + # clean rule after the scenario is complete + ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent' + + diff --git a/tox-shrink_osd.ini b/tox-shrink_osd.ini new file mode 100644 index 0000000..5ab070d --- /dev/null +++ b/tox-shrink_osd.ini @@ -0,0 +1,121 @@ +[tox] +envlist = {centos}-{container,non_container}-{shrink_osd_single,shrink_osd_multiple} +skipsdist = True + +[shrink-osd-single] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=0 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=1 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=2 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=3 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=4 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=5 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=6 \ + " + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=7 \ + " + +[shrink-osd-multiple] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill=0,1,2,3,4,5,6,7 \ + " + +[testenv] +allowlist_externals = + vagrant + bash +passenv=* +sitepackages=False +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml + non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + container: PURGE_PLAYBOOK = purge-container-cluster.yml + non_container: PLAYBOOK = site.yml.sample + +deps= -r{toxinidir}/tests/requirements.txt +changedir= + shrink_osd_single: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:} + shrink_osd_multiple: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:} + + +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + # test cluster state using ceph-ansible tests + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + shrink_osd_single: {[shrink-osd-single]commands} + shrink_osd_multiple: {[shrink-osd-multiple]commands} + + # configure lvm + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\ + yes_i_know=true \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + # retest to ensure OSDs are well redeployed + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + vagrant destroy --force diff --git a/tox-subset_update.ini b/tox-subset_update.ini new file mode 100644 index 0000000..d2d274b --- /dev/null +++ b/tox-subset_update.ini @@ -0,0 +1,123 @@ +[tox] +envlist = centos-{container,non_container}-subset_update + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +# non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + non_container: PLAYBOOK = site.yml.sample + + UPDATE_CEPH_DEV_BRANCH = main + UPDATE_CEPH_DEV_SHA1 = latest + ROLLING_UPDATE = True +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/subset_update{env:CONTAINER_DIR:} +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + +# upgrade mons +# mon1 + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# mon0 and mon2 + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade mgrs + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade osd1 + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade remaining osds (serially) + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# upgrade rgws + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " +# post upgrade actions + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + + bash -c "CEPH_STABLE_RELEASE=squid py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" + + vagrant destroy --force diff --git a/tox-update.ini b/tox-update.ini new file mode 100644 index 0000000..9a7590d --- /dev/null +++ b/tox-update.ini @@ -0,0 +1,81 @@ +[tox] +envlist = centos-{container,non_container}-update + +skipsdist = True + +[testenv] +allowlist_externals = + vagrant + bash + git + pip +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACK_ENABLED = profile_tasks + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +# non_container: DEV_SETUP = True + # Set the vagrant box image to use + centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + non_container: PLAYBOOK = site.yml.sample + + UPDATE_CEPH_DEV_BRANCH = main + UPDATE_CEPH_DEV_SHA1 = latest + ROLLING_UPDATE = True +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} +commands= + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + +# # use the stable-7.0 branch to deploy an octopus cluster +# git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible +# pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt +# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml' +# # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) +# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"' + + # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' + + non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + +# pip uninstall -y ansible +# pip install -r {toxinidir}/tests/requirements.txt +# ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + bash -c "CEPH_STABLE_RELEASE=squid py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" + + vagrant destroy --force diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..d0f6fe6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,400 @@ +[tox] +envlist = {centos,ubuntu,rocky}-{container,non_container}-{all_daemons,all_daemons_ipv6,collocation,lvm_osds,shrink_mon,shrink_mgr,shrink_mds,shrink_rbdmirror,shrink_rgw,lvm_batch,add_mons,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,purge,storage_inventory,lvm_auto_discovery,all_in_one,cephadm_adopt,purge_dashboard} + centos-non_container-{switch_to_containers} + infra_lv_create + migrate_ceph_disk_to_ceph_volume + flake8 + +skipsdist = True + +# a test scenario for the lv-create.yml and lv-teardown playbooks +[testenv:infra_lv_create] +allowlist_externals = + vagrant + bash + mkdir + cat +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions + ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback + ANSIBLE_CALLBACK_WHITELIST = profile_tasks + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml +deps= -r{toxinidir}/tests/requirements.txt +changedir={toxinidir}/tests/functional/infra_lv_create +commands= + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml + + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-teardown.yml --extra-vars "ireallymeanit=yes" + + cat {toxinidir}/infrastructure-playbooks/lv-create.log + + vagrant destroy --force + +# extra commands for purging clusters +# that purge the cluster and then set it up again to +# ensure that a purge can clear nodes well enough that they +# can be redployed to. +[purge] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/ceph} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:v19} \ + " + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ + ireallymeanit=yes \ + remove_packages=yes \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/ceph} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:v19} \ + " + + # re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' + + # set up the cluster again + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + # test that the cluster can be redeployed in a healthy state + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[purge-dashboard] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\ + ireallymeanit=yes \ + ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ + ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/ceph} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:v19} \ + " + + # set up the cluster again + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + # test that the cluster can be redeployed in a healthy state + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[purge-lvm] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ + ireallymeanit=yes \ + remove_packages=yes \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + + # set up the cluster again + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + yes_i_know=true \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + " + # test that the cluster can be redeployed in a healthy state + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[shrink-mon] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mon.yml --extra-vars "\ + ireallymeanit=yes \ + mon_to_kill={env:MON_TO_KILL:mon2} \ + " +[shrink-osd] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\ + ireallymeanit=yes \ + osd_to_kill={env:OSD_TO_KILL:0} \ + " + +[shrink-mgr] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mgr.yml --extra-vars "\ + ireallymeanit=yes \ + mgr_to_kill={env:MGR_TO_KILL:mgr1} \ + " + +[shrink-mds] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mds.yml --extra-vars "\ + ireallymeanit=yes \ + mds_to_kill={env:MDS_TO_KILL:mds0} \ + " + +[shrink-rbdmirror] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rbdmirror.yml --extra-vars "\ + ireallymeanit=yes \ + rbdmirror_to_kill={env:RBDMIRROR_TO_KILL:rbd-mirror0} \ + " + +[shrink-rgw] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rgw.yml --extra-vars "\ + ireallymeanit=yes \ + rgw_to_kill={env:RGW_TO_KILL:rgw0.rgw0} \ + " + +[switch-to-containers] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ + ireallymeanit=yes \ + ceph_docker_image_tag=v19 \ + ceph_docker_registry=quay.io \ + ceph_docker_image=ceph/ceph \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml + + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[add-mons] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + " + py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[add-mgrs] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[add-mdss] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[add-rbdmirrors] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[add-rgws] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ireallymeanit=yes \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + +[storage-inventory] +commands= + ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:v19} \ + " + +[cephadm-adopt] +commands= + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\ + ireallymeanit=yes \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_repository=dev \ + " + # idempotency test + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\ + ireallymeanit=yes \ + delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ + ceph_repository=dev \ + " + +[testenv] +allowlist_externals = + vagrant + bash + pip + rm +passenv=* +sitepackages=False +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_CONFIG = {toxinidir}/ansible.cfg + ANSIBLE_CALLBACKS_ENABLED = profile_tasks + ANSIBLE_KEEP_REMOTE_FILES = 1 + ANSIBLE_CACHE_PLUGIN = memory + ANSIBLE_GATHERING = implicit + ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections + # only available for ansible >= 2.5 + ANSIBLE_STDOUT_CALLBACK = yaml + non_container: DEV_SETUP = True + # Set the vagrant box image to use + ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = generic/ubuntu2204 + centos: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + rocky: CEPH_ANSIBLE_VAGRANT_BOX = rockylinux/9 + INVENTORY = {env:_INVENTORY:hosts} + container: CONTAINER_DIR = /container + container: PLAYBOOK = site-container.yml.sample + container: PURGE_PLAYBOOK = purge-container-cluster.yml + non_container: PLAYBOOK = site.yml.sample + shrink_mds: MDS_TO_KILL = mds0 + shrink_mgr: MGR_TO_KILL = mgr1 + shrink_mon: MON_TO_KILL = mon2 + shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0 + shrink_rgw: RGW_TO_KILL = rgw0.rgw0 + +deps= -r{toxinidir}/tests/requirements.txt +changedir= + all_daemons: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} + all_daemons_ipv6: {toxinidir}/tests/functional/all_daemons_ipv6{env:CONTAINER_DIR:} + cluster: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} + shrink_mon: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:} + shrink_mgn: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:} + shrink_mgr: {toxinidir}/tests/functional/shrink_mgr{env:CONTAINER_DIR:} + shrink_mds: {toxinidir}/tests/functional/shrink_mds{env:CONTAINER_DIR:} + shrink_rbdmirror: {toxinidir}/tests/functional/shrink_rbdmirror{env:CONTAINER_DIR:} + shrink_rgw: {toxinidir}/tests/functional/shrink_rgw{env:CONTAINER_DIR:} + # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker + collocation: {toxinidir}/tests/functional/collocation{env:CONTAINER_DIR:} + purge: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} + purge_dashboard: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} + switch_to_containers: {toxinidir}/tests/functional/all_daemons + lvm_osds: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:} + lvm_batch: {toxinidir}/tests/functional/lvm-batch{env:CONTAINER_DIR:} + add_mons: {toxinidir}/tests/functional/add-mons{env:CONTAINER_DIR:} + add_mgrs: {toxinidir}/tests/functional/add-mgrs{env:CONTAINER_DIR:} + add_mdss: {toxinidir}/tests/functional/add-mdss{env:CONTAINER_DIR:} + add_rbdmirrors: {toxinidir}/tests/functional/add-rbdmirrors{env:CONTAINER_DIR:} + add_rgws: {toxinidir}/tests/functional/add-rgws{env:CONTAINER_DIR:} + storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:} + lvm_auto_discovery: {toxinidir}/tests/functional/lvm-auto-discovery{env:CONTAINER_DIR:} + all_in_one: {toxinidir}/tests/functional/all-in-one{env:CONTAINER_DIR:} + cephadm_adopt: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} + +commands= + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" + + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + + # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) + !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' + lvm_osds: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd2' + all_in_one: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + + ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + no_log_on_ceph_key_tasks=false \ + yes_i_know=true \ + ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ + ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ + ceph_docker_registry_auth=True \ + ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ + ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ + " + + # test cluster state using ceph-ansible tests + py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + # reboot all vms + all_daemons,all_daemons_ipv6,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml + + # retest to ensure cluster came back up correctly after rebooting + all_daemons,all_daemons_ipv6,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests + + # handlers/idempotency test + all_daemons,all_daemon_ipv6,all_in_one,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "no_log_on_ceph_key_tasks=false delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-main} ceph_docker_image={env:CEPH_DOCKER_IMAGE_BIS:ceph/daemon-base} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} yes_i_know=true" --extra-vars @ceph-override.json + + purge: {[purge]commands} + purge_dashboard: {[purge-dashboard]commands} + switch_to_containers: {[switch-to-containers]commands} + shrink_mon: {[shrink-mon]commands} + shrink_mgr: {[shrink-mgr]commands} + shrink_mds: {[shrink-mds]commands} + shrink_rbdmirror: {[shrink-rbdmirror]commands} + shrink_rgw: {[shrink-rgw]commands} + add_mons: {[add-mons]commands} + add_mgrs: {[add-mgrs]commands} + add_mdss: {[add-mdss]commands} + add_rbdmirrors: {[add-rbdmirrors]commands} + add_rgws: {[add-rgws]commands} + storage_inventory: {[storage-inventory]commands} + cephadm_adopt: {[cephadm-adopt]commands} + + vagrant destroy --force + +[testenv:flake8] +max-line-length = 100 +ignore = + E501, + W503, +exclude = + .tox \ + .vagrant \ + __pycache__ \ + *.pyc \ + templates \ + .eggs +statistics = True +deps = + flake8 +commands = + flake8 --max-line-length=160 ./library ./module_utils/ ./tests/library/ ./tests/module_utils/ ./tests/functional/tests/ {posargs} diff --git a/vagrant.yaml b/vagrant.yaml new file mode 100644 index 0000000..9d39fde --- /dev/null +++ b/vagrant.yaml @@ -0,0 +1,32 @@ +all: + hosts: + ceph1: + ansible_host: 192.168.1.38 + ansible_user: foo + ansible_password: qweqwe + become_password: qweqwe + ceph2: + ansible_host: 192.168.1.149 + ansible_user: foo + ansible_password: qweqwe + become_password: qweqwe + ceph3: + ansible_host: 192.168.1.74 + ansible_user: foo + ansible_password: qweqwe + become_password: qweqwe + vars: + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + + +mons: + hosts: + ceph1: +osds: + hosts: + ceph1: + ceph2: + ceph3: +mgrs: + hosts: + ceph1: diff --git a/vagrant_variables.yml.sample b/vagrant_variables.yml.sample new file mode 100644 index 0000000..376f3a5 --- /dev/null +++ b/vagrant_variables.yml.sample @@ -0,0 +1,65 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 3 +osd_vms: 3 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +grafana_server_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.42 +cluster_subnet: 192.168.43 + +# MEMORY +# set 1024 for CentOS +memory: 1024 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For Xenial use disks: [ '/dev/sdb', '/dev/sdc' ] +# For CentOS7 use disks: [ '/dev/sda', '/dev/sdb' ] +disks: [ '/dev/sdb', '/dev/sdc' ] + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial or bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# openSUSE: opensuse/openSUSE-42.3-x86_64 +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +vagrant_sync_dir: /home/vagrant/sync +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: fs.file-max, value: 26234859 } + +# Debug mode, runs Ansible with -vvvv +debug: false