linter defeated!

This commit is contained in:
a.pivkin 2025-12-23 19:20:42 +03:00
parent 5ca21e635d
commit 2294202009
6 changed files with 56 additions and 50 deletions

View File

@ -7,11 +7,11 @@ import (
)
func Connect(params mytypes.Params) (_ mytypes.CephConnection, err error) {
var cephConn mytypes.CephConnection = mytypes.CephConnection{}
var cephConn = mytypes.CephConnection{}
defer func() {
if err != nil {
err = fmt.Errorf("Error in func connect() %w", err)
err = fmt.Errorf("error in func connect() %w", err)
}
}()

View File

@ -5,19 +5,17 @@ import (
"flag"
"fmt"
"net/http"
"os"
"rbd_exporter/mytypes"
"rbd_exporter/metrics"
"rbd_exporter/logger"
"rbd_exporter/connection"
"rbd_exporter/logger"
"rbd_exporter/metrics"
"rbd_exporter/mytypes"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var params mytypes.Params
var mainlogger *zap.SugaredLogger
// Here I initialize logger
func loggerInit() *zap.SugaredLogger {
@ -28,17 +26,11 @@ func loggerInit() *zap.SugaredLogger {
if err != nil {
panic(fmt.Sprintf("Logger set up failed: %v", err))
}
defer func() {
err := logger.Sync()
if err != nil {
logger.Error("couldn't sinc logger")
}
}()
return logger.Sugar()
}
// This func runs even before main()
func init() {
func globalInit() (mainlogger *zap.SugaredLogger,params mytypes.Params){
configFile := flag.String("config", "/etc/ceph/ceph.conf", "placement of ceph config file")
_keyring := flag.String("keyring", "/etc/ceph/ceph.client.admin.keyring", "placement of ceph keyring file")
flag.Parse()
@ -50,33 +42,54 @@ func init() {
logger.SetLogger(mainlogger)
mainlogger.Info("Setting up logger is complete successfully")
mainlogger.Info("Registering prom metrics")
return mainlogger,params
}
func main() {
mainlogger,params := globalInit()
defer func() {
err := logger.Logger.Sync()
if err != nil {
logger.Logger.Error("couldn't sync logger")
}
}()
cephConn, err := connection.Connect(params)
if err != nil {
fmt.Println(err)
logger.Logger.Error(err)
if wrapped := errors.Unwrap(err); wrapped != nil {
fmt.Println(wrapped)
logger.Logger.Error(wrapped)
}
os.Exit(1)
panic(err)
}
defer cephConn.Conn.Shutdown()
mainlogger.Info("Successfully connected to a cluster")
http.Handle("/metrics", promhttp.Handler())
server := http.Server{
Addr: ":9040",
ReadTimeout: 10 * time.Second,
Handler: http.DefaultServeMux,
}
// HTTP runs in separate thread cuz it blocks further execution of main
go func() {
mainlogger.Info("Starting http server")
// Here I check for errors if HTTP fails
if err := http.ListenAndServe(":9040", nil); err != nil {
if err := server.ListenAndServe(); err != nil {
mainlogger.Fatalf("HTTP server failed to start %v", err)
}
mainlogger.Info("HTTP server started")
}()
// if err := server.Shutdown(); err != nil {
// logger.Logger.Errorf("https server shutdown failed",err)
// }
go metrics.GetMetrics(cephConn)

View File

@ -12,14 +12,14 @@ import (
func GetMetrics(cephConn mytypes.CephConnector) {
metrics := InitMetrics()
prometheus.MustRegister(
metrics.Total_rbd_requested_size_per_pool,
metrics.TotalRbdRequestedSizePerPool,
)
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
for range ticker.C {
var result []mytypes.Pool = []mytypes.Pool{}
var result = []mytypes.Pool{}
poolList, err := cephConn.ListPools()
if err != nil {
logger.Logger.Error("Cannot get list of pools")
@ -32,7 +32,7 @@ func GetMetrics(cephConn mytypes.CephConnector) {
result = append(result, x)
}
metrics.Total_rbd_requested_size_per_pool.Reset()
metrics.TotalRbdRequestedSizePerPool.Reset()
for _,v := range result {
if !v.HasRBD {continue}
FillMetrics(v,metrics)
@ -45,7 +45,7 @@ func GetMetrics(cephConn mytypes.CephConnector) {
func InitMetrics() *mytypes.Metrics {
m := &mytypes.Metrics{
Total_rbd_requested_size_per_pool: prometheus.NewGaugeVec(
TotalRbdRequestedSizePerPool : prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "total_rbd_requested_size_per_pool",
Help: "total size of all requested RBDs in a specific pool",
@ -69,11 +69,11 @@ func FillMetrics(pool mytypes.Pool,metrics *mytypes.Metrics) {
logger.Logger.Debugf("Processing pool %s",pool.Name)
for _,v := range pool.RBDlist {
totalSizePerPool += uint64(v.GetSize())
totalSizePerPool += v.GetSize()
}
logger.Logger.Debugf("Total size of RBDs in pool %s is %d",pool.Name,totalSizePerPool)
metrics.Total_rbd_requested_size_per_pool.WithLabelValues(pool.Name,
metrics.TotalRbdRequestedSizePerPool.WithLabelValues(pool.Name,
).Set(
float64(totalSizePerPool))
}

View File

@ -83,7 +83,7 @@ func (riw RBDImageWrapper) Stat() (*rbd.ImageInfo,error) {
type IRBD interface {
GetName() string
GetSize() int64
GetSize() uint64
}
type RBDUsage struct {
@ -99,8 +99,8 @@ func (r RBD) GetName() string {
return r.Name
}
func (r RBD) GetSize() int64 {
return int64(r.Size)
func (r RBD) GetSize() uint64 {
return r.Size
}
type Pool struct {
@ -110,6 +110,6 @@ type Pool struct {
}
type Metrics struct {
Total_rbd_requested_size_per_pool *prometheus.GaugeVec
Total_rbd_requested_size prometheus.Gauge
TotalRbdRequestedSizePerPool *prometheus.GaugeVec
// Total_rbd_requested_size prometheus.Gauge
}

View File

@ -7,7 +7,7 @@ import (
)
func PoolFactory(cephConn mytypes.CephConnector, poolName string) (mytypes.Pool, error) {
var rbdlist []mytypes.IRBD = []mytypes.IRBD{}
var rbdlist = []mytypes.IRBD{}
ioctx, err := cephConn.OpenIOContext(poolName)
if err != nil {
@ -16,36 +16,29 @@ func PoolFactory(cephConn mytypes.CephConnector, poolName string) (mytypes.Pool,
defer ioctx.Destroy()
imageList, err := ioctx.GetImageNames()
imageNames, err := ioctx.GetImageNames()
if err != nil {
return mytypes.Pool{}, fmt.Errorf("couldn't get list of rbds %w", err)
}
for _, rbdname := range imageList {
stat, err := RBDFacroty(ioctx, rbdname)
for _, rbdName := range imageNames {
stat, err := RBDFacroty(ioctx, rbdName)
if err != nil {
fmt.Errorf("coundn't get stat from disk %s %w", rbdname, err)
panic(err)
logger.Logger.Errorf("coundn't get stat from disk %s %w", rbdName, err)
}
rbdlist = append(rbdlist, stat)
}
return mytypes.Pool{
Name: poolName,
HasRBD: len(imageList) != 0,
HasRBD: len(imageNames) != 0,
RBDlist: rbdlist,
}, nil
}
func RBDFacroty(ioctx mytypes.IOContexter, rbdname string) (mytypes.RBD, error) {
defer func() {
if v := recover(); v != nil {
logger.Logger.Errorf("no such RBD exists (probably just deleted)")
}
}()
image := ioctx.GetImage(rbdname)
func RBDFacroty(ioctx mytypes.IOContexter, rbdName string) (mytypes.RBD, error) {
image := ioctx.GetImage(rbdName)
err := image.Open()
if err != nil {
panic(err)
logger.Logger.Errorf("Couldn't open RBD %s",rbdName)
// logger.Logger.Errorf("Shit happened in RBDFactory")
}
@ -54,11 +47,11 @@ func RBDFacroty(ioctx mytypes.IOContexter, rbdname string) (mytypes.RBD, error)
info,err := image.Stat()
if err != nil {
return mytypes.RBD{},fmt.Errorf("couldn't get stats for image %s %w",rbdname,err)
return mytypes.RBD{},fmt.Errorf("couldn't get stats for image %s %w",rbdName,err)
}
return mytypes.RBD{
Name: rbdname,
Name: rbdName,
ImageInfo: *info,
},nil
}

View File

@ -34,7 +34,7 @@ func TestRBDFactory(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, rbdName, result.Name)
assert.Equal(t, expectedImageInfo.Size, result.ImageInfo.Size)
assert.Equal(t, expectedImageInfo.Size, result.Size)
}