Bookstack storage on host

This commit is contained in:
a.pivkin 2024-10-26 14:35:25 +03:00
parent b0e2ba125c
commit ce8252b4dc
7 changed files with 118 additions and 30 deletions

6
host_vars/ceph01.yml Normal file
View File

@ -0,0 +1,6 @@
location:
root: default
labels:
- mon
- osd
- mgr

6
host_vars/ceph02.yml Normal file
View File

@ -0,0 +1,6 @@
location:
root: default
labels:
- mon
- osd
- mgr

5
host_vars/ceph03.yml Normal file
View File

@ -0,0 +1,5 @@
location:
root: default
labels:
- mon
- osd

View File

@ -5,19 +5,27 @@
gather_facts: true
diff: true
vars:
ceph_repo: "https://download.ceph.com/debian-squid"
ceph_repo: "https://download.ceph.com/debian-reef"
ntp_server: "ntp.int.nt-com.ru"
first_mon_ip: "10.63.1.19"
cluster_network: "10.63.0.0/24"
public_network: "10.63.1.0/24"
CEPH_RELEASE: '19.2.0'
initial_dashboard_user: 'admin'
initial_dashboard_password: 'qweqwe'
allow_fqdn_hostname: 'true'
tasks:
- name: download asc from ceph
get_url:
url: https://download.ceph.com/keys/release.asc
dest: /tmp
- name: get gpg key and import it
- name: Delete previously install ceph.gpg
file:
path: /usr/share/keyrings/ceph.gpg
state: absent
- name: import downloaded asc
shell:
chdir: /tmp
cmd: echo y | gpg -o /usr/share/keyrings/ceph.gpg --dearmor release.asc;
@ -28,33 +36,58 @@
repo: "deb [signed-by=/usr/share/keyrings/ceph.gpg] {{ ceph_repo }} {{ ansible_distribution_release }} main"
state: present
block: # here bootstrap the cluster
- name: download cepadm bootstrapper
get_url:
url: https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm
dest: /tmp
mode: '777'
# - name: here bootstrap the the first node
# block:
# - name: download cephadm bootstrapper
# get_url:
# url: https://download.ceph.com/rpm-{{CEPH_RELEASE}}/el9/noarch/cephadm
# dest: /tmp
# mode: '777'
- name: Prepare customized ceph.conf
template:
src: ceph.conf.j2
dest: /root/ceph.conf
- name: install packages
apt:
pkg:
- ceph-common
- cephadm
- lvm2
- podman
- chrony
- mc
state: present
- name: Setup ntp client
template:
src: chrony.yaml.j2
dest: /etc/chrony/chrony.conf
notify:
- ceph-rollout:restart
- name: Templating cluster-spec
template:
src: cluster_spec.yml.j2
dest: /root/cluster_spec.yml
trim_blocks: true
lstrip_blocks: true
run_once: true
delegate_to: ceph01
- name: Flush restart handlers
meta: flush_handlers
- name: running ceph bootstrap
pause:
prompt: run this command to bootstrap 1st node "cephadm bootstrap --mon-ip {{ first_mon_ip }} --cluster-network {{ cluster_network }}
--initial-dashboard-user {{ initial_dashboard_user }} --initial-dashboard-password {{ initial_dashboard_password }} --apply-spec /root/cluster_spec.yml"
handlers:
- name: ceph-rollout:restart
ansible.builtin.systemd_service:
state: restarted
name: chronyd
listen: "restart chronyd service"
# - name: install packages
# apt:
# pkg:
# - cephadm
# - lvm2
# - podman
# - chrony
# state: present
#
# - name: Setup ntp client
# template:
# src: chrony.yaml.j2
# dest: /etc/chrony/chrony.conf
# notify:
# - ceph-rollout:restart
#
# handlers:
# - name: ceph-rollout:restart
# ansible.builtin.systemd_service:
# state: restarted
# name: chronyd
# listen: "restart chronyd service"

3
templates/ceph.conf.j2 Normal file
View File

@ -0,0 +1,3 @@
[global]
public_network = {{ public_network }}
cluster_network = {{ cluster_network }}

View File

@ -0,0 +1,35 @@
{% macro nodes() %}
{%- for _host in groups['all'] %}
service_type: host
addr: {{hostvars[_host]['ansible_host']}}
hostname: {{ _host }}
location:
{% for key, value in hostvars[_host]['location'].items() %}
{{key}}: {{value}}
{% endfor %}
labels:
{% for i in hostvars[_host]['labels']%}
- {{ i }}
{% endfor %}
---
{% endfor %}
{% endmacro %}
{{ nodes() }}
service_type: mon
placement:
label: "mon"
---
service_type: mgr
service_name: mgr
placement:
label: "mgr"
---
service_type: osd
service_id: 12_SDD
service_name: osd.all-available-devices
placement:
label: "osd"
spec:
data_devices:
all: true
---