This commit is contained in:
a.pivkin 2025-12-22 17:39:04 +03:00
parent e125ead164
commit ab3a912c5a
2 changed files with 230 additions and 0 deletions

72
rbd_exporter/Dockerfile Normal file
View File

@ -0,0 +1,72 @@
# Multi-stage build using official Go image
FROM harbor.mws-team.ru/docker.io/library/ubuntu:22.04 AS builder
# Add Ceph Reef repository
RUN apt-get update && apt-get install -y \
wget \
gnupg \
gcc \
pkg-config \
&& wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add - \
&& echo "deb https://download.ceph.com/debian-reef/ jammy main" > /etc/apt/sources.list.d/ceph.list
# Install Ceph Reef development libraries
RUN apt-get update && apt-get install -y \
librbd-dev \
librados-dev \
&& rm -rf /var/lib/apt/lists/*
# Install Go
ENV GO_VERSION=1.25.5
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz \
&& tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz \
&& rm go${GO_VERSION}.linux-amd64.tar.gz
# Set Go environment variables
ENV PATH=$PATH:/usr/local/go/bin
ENV GOPATH=/go
ENV PATH=$PATH:$GOPATH/bin
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build the application
RUN CGO_ENABLED=1 GOOS=linux go build -o rbd-exporter .
# Final stage with Ceph Reef runtime libraries
FROM harbor.mws-team.ru/docker.io/library/ubuntu:22.04 AS final
# Add Ceph Reef repository to runtime stage too
RUN apt-get update && apt-get install -y \
wget \
gnupg \
lsb-release \
ca-certificates \
&& wget -q -O- 'https://download.ceph.com/keys/release.asc' | gpg --dearmor -o /etc/apt/trusted.gpg.d/ceph.gpg \
&& echo "deb https://download.ceph.com/debian-reef/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/ceph.list
# Install Ceph Reef runtime libraries
RUN apt-get update && apt-get install -y \
librbd-dev \
librados-dev \
ceph-common \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /rbd-exporter
# Copy the binary from builder stage
COPY --from=builder /app/rbd-exporter .
# Make binary executable
RUN chmod +x ./rbd-exporter
EXPOSE 9040
CMD ["./rbd-exporter"]

158
rbd_exporter/deploy.yaml Normal file
View File

@ -0,0 +1,158 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rbd-exporter
namespace: dev-unstable-rook-ceph-common
labels:
app: rbd-exporter
spec:
replicas: 1
selector:
matchLabels:
app: rbd-exporter
template:
metadata:
labels:
app: rbd-exporter
annotations:
obs.mws.ru/path: /metrics
obs.mws.ru/port: "9040"
obs.mws.ru/scheme: "http"
obs.mws.ru/scrape: "true"
obs.mws.ru/probe-scrape-interval: "1m"
spec:
imagePullSecrets:
- name: ceph-registry-secret
containers:
- name: rbd-exporter
image: harbor.mws-team.ru/mws-storage-junk/rbd-exporter:0.01
ports:
- containerPort: 9040
name: metrics
command:
- /bin/bash
- -c
- |
# Replicate the script from toolbox.sh inline so the ceph image
# can be run directly, instead of requiring the rook toolbox
CEPH_CONFIG="/etc/ceph/ceph.conf"
MON_CONFIG="/etc/rook/mon-endpoints"
KEYRING_FILE="/etc/ceph/keyring"
# create a ceph config file in its default location so ceph/rados tools can be used
# without specifying any arguments
write_endpoints() {
endpoints=$(cat ${MON_CONFIG})
# filter out the mon names
# external cluster can have numbers or hyphens in mon names, handling them in regex
# shellcheck disable=SC2001
mon_endpoints=$(echo "${endpoints}"| sed 's/[a-z0-9_-]\+=//g')
DATE=$(date)
echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
cat <<EOF > ${CEPH_CONFIG}
[global]
mon_host = ${mon_endpoints}
[client.admin]
keyring = ${KEYRING_FILE}
EOF
}
# watch the endpoints config file and update if the mon endpoints ever change
watch_endpoints() {
# get the timestamp for the target of the soft link
real_path=$(realpath ${MON_CONFIG})
initial_time=$(stat -c %Z "${real_path}")
while true; do
echo "I am watching for mon changes!!!"
real_path=$(realpath ${MON_CONFIG})
latest_time=$(stat -c %Z "${real_path}")
if [[ "${latest_time}" != "${initial_time}" ]]; then
write_endpoints
initial_time=${latest_time}
fi
sleep 10
done
}
# read the secret from an env var (for backward compatibility), or from the secret file
ceph_secret=${ROOK_CEPH_SECRET}
if [[ "$ceph_secret" == "" ]]; then
ceph_secret=$(cat /var/lib/rook-ceph-mon/secret.keyring)
fi
# create the keyring file
cat <<EOF > ${KEYRING_FILE}
[${ROOK_CEPH_USERNAME}]
key = ${ceph_secret}
EOF
# write the initial config file
write_endpoints
# run rbd-exporter
exec /rbd-exporter/rbd-exporter --keyring /etc/ceph/keyring &
# continuously update the mon endpoints if they fail over
watch_endpoints
imagePullPolicy: IfNotPresent
tty: true
securityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
env:
- name: ROOK_CEPH_USERNAME
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: ceph-username
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
- name: ceph-admin-secret
mountPath: /var/lib/rook-ceph-mon
readOnly: true
volumes:
- name: ceph-admin-secret
secret:
secretName: rook-ceph-mon
optional: false
items:
- key: ceph-secret
path: secret.keyring
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: ceph-config
emptyDir: {}
# ---
# apiVersion: v1
# kind: Service
# metadata:
# name: export-service
# namespace: rook-ceph
# labels:
# svc_app: export
# rook_cluster: rook-ceph
# spec:
# selector:
# app: export-box
# ports:
# - name: metrics
# port: 9040
# targetPort: 9040
# protocol: TCP
# type: ClusterIP