diff --git a/agent/agent.go b/agent/agent.go index 9d6a85f91..43b4d8879 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -96,11 +96,9 @@ func serve(a *Agent) { var wg sync.WaitGroup var exit bool - var reload bool for { ctx, cancel := context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, "reload", reload) wg.Add(1) go func() { defer wg.Done(); a.run(ctx) }() @@ -136,7 +134,6 @@ func serve(a *Agent) { os.Exit(0) } - reload = true time.Sleep(time.Second) } } @@ -169,7 +166,7 @@ func (a *Agent) run(ctx context.Context) { discCfg := a.buildDiscoveryConf(enabledModules) - discoveryManager, err := discovery.NewManager(discCfg) + discMgr, err := discovery.NewManager(discCfg) if err != nil { a.Error(err) if isTerminal { @@ -178,46 +175,32 @@ func (a *Agent) run(ctx context.Context) { return } - functionsManager := functions.NewManager() - - jobsManager := jobmgr.NewManager() - jobsManager.PluginName = a.Name - jobsManager.Out = a.Out - jobsManager.Modules = enabledModules - - // TODO: API will be changed in https://github.com/netdata/netdata/pull/16702 - //if logger.Level.Enabled(slog.LevelDebug) { - // dyncfgDiscovery, _ := dyncfg.NewDiscovery(dyncfg.Config{ - // Plugin: a.Name, - // API: netdataapi.New(a.Out), - // Modules: enabledModules, - // ModuleConfigDefaults: discCfg.Registry, - // Functions: functionsManager, - // }) - // - // discoveryManager.Add(dyncfgDiscovery) - // - // jobsManager.Dyncfg = dyncfgDiscovery - //} + fnMgr := functions.NewManager() + + jobMgr := jobmgr.New() + jobMgr.PluginName = a.Name + jobMgr.Out = a.Out + jobMgr.Modules = enabledModules + jobMgr.FnReg = fnMgr if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 { vnodes.Disabled = true } else { - jobsManager.Vnodes = reg + jobMgr.Vnodes = reg } if a.LockDir != "" { - jobsManager.FileLock = filelock.New(a.LockDir) + jobMgr.FileLock = filelock.New(a.LockDir) } - var statusSaveManager *filestatus.Manager + var fsMgr *filestatus.Manager if !isTerminal && a.StateFile != "" { - statusSaveManager = filestatus.NewManager(a.StateFile) - jobsManager.StatusSaver = statusSaveManager + fsMgr = filestatus.NewManager(a.StateFile) + jobMgr.FileStatus = fsMgr if store, err := filestatus.LoadStore(a.StateFile); err != nil { a.Warningf("couldn't load state file: %v", err) } else { - jobsManager.StatusStore = store + jobMgr.FileStatusStore = store } } @@ -225,17 +208,17 @@ func (a *Agent) run(ctx context.Context) { var wg sync.WaitGroup wg.Add(1) - go func() { defer wg.Done(); functionsManager.Run(ctx) }() + go func() { defer wg.Done(); fnMgr.Run(ctx) }() wg.Add(1) - go func() { defer wg.Done(); jobsManager.Run(ctx, in) }() + go func() { defer wg.Done(); jobMgr.Run(ctx, in) }() wg.Add(1) - go func() { defer wg.Done(); discoveryManager.Run(ctx, in) }() + go func() { defer wg.Done(); discMgr.Run(ctx, in) }() - if statusSaveManager != nil { + if fsMgr != nil { wg.Add(1) - go func() { defer wg.Done(); statusSaveManager.Run(ctx) }() + go func() { defer wg.Done(); fsMgr.Run(ctx) }() } wg.Wait() diff --git a/agent/agent_test.go b/agent/agent_test.go index 2a15a6b73..2abbdb31a 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -74,17 +74,17 @@ func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) mod func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module { return &module.MockModule{ - InitFunc: func() bool { + InitFunc: func() error { mux.Lock() defer mux.Unlock() stats[name+"_init"]++ - return true + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { mux.Lock() defer mux.Unlock() stats[name+"_check"]++ - return true + return nil }, ChartsFunc: func() *module.Charts { mux.Lock() diff --git a/agent/confgroup/cache.go b/agent/confgroup/cache.go deleted file mode 100644 index 40c8071d5..000000000 --- a/agent/confgroup/cache.go +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package confgroup - -func NewCache() *Cache { - return &Cache{ - hashes: make(map[uint64]uint), - sources: make(map[string]map[uint64]Config), - } -} - -type Cache struct { - hashes map[uint64]uint // map[cfgHash]cfgCount - sources map[string]map[uint64]Config // map[cfgSource]map[cfgHash]cfg -} - -func (c *Cache) Add(group *Group) (added, removed []Config) { - if group == nil { - return nil, nil - } - - if len(group.Configs) == 0 { - return c.addEmpty(group) - } - - return c.addNotEmpty(group) -} - -func (c *Cache) addEmpty(group *Group) (added, removed []Config) { - set, ok := c.sources[group.Source] - if !ok { - return nil, nil - } - - for hash, cfg := range set { - c.hashes[hash]-- - if c.hashes[hash] == 0 { - removed = append(removed, cfg) - } - delete(set, hash) - } - - delete(c.sources, group.Source) - - return nil, removed -} - -func (c *Cache) addNotEmpty(group *Group) (added, removed []Config) { - set, ok := c.sources[group.Source] - if !ok { - set = make(map[uint64]Config) - c.sources[group.Source] = set - } - - seen := make(map[uint64]struct{}) - - for _, cfg := range group.Configs { - hash := cfg.Hash() - seen[hash] = struct{}{} - - if _, ok := set[hash]; ok { - continue - } - - set[hash] = cfg - if c.hashes[hash] == 0 { - added = append(added, cfg) - } - c.hashes[hash]++ - } - - if !ok { - return added, nil - } - - for hash, cfg := range set { - if _, ok := seen[hash]; ok { - continue - } - - delete(set, hash) - c.hashes[hash]-- - if c.hashes[hash] == 0 { - removed = append(removed, cfg) - } - } - - if ok && len(set) == 0 { - delete(c.sources, group.Source) - } - - return added, removed -} diff --git a/agent/confgroup/cache_test.go b/agent/confgroup/cache_test.go deleted file mode 100644 index a2bbd4919..000000000 --- a/agent/confgroup/cache_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package confgroup - -import ( - "sort" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestConfigCache_Add(t *testing.T) { - tests := map[string]struct { - prepareGroups []Group - groups []Group - expectedAdd []Config - expectedRemove []Config - }{ - "new group, new configs": { - groups: []Group{ - prepareGroup("source", prepareCfg("name", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name", "module"), - }, - }, - "several equal updates for the same group": { - groups: []Group{ - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name", "module"), - }, - }, - "empty group update for cached group": { - prepareGroups: []Group{ - prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source"), - }, - expectedRemove: []Config{ - prepareCfg("name1", "module"), - prepareCfg("name2", "module"), - }, - }, - "changed group update for cached group": { - prepareGroups: []Group{ - prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source", prepareCfg("name2", "module")), - }, - expectedRemove: []Config{ - prepareCfg("name1", "module"), - }, - }, - "empty group update for uncached group": { - groups: []Group{ - prepareGroup("source"), - prepareGroup("source"), - }, - }, - "several updates with different source but same context": { - groups: []Group{ - prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name1", "module"), - prepareCfg("name2", "module"), - }, - }, - "have equal configs from 2 sources, get empty group for the 1st source": { - prepareGroups: []Group{ - prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source2"), - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - cache := NewCache() - - for _, group := range test.prepareGroups { - cache.Add(&group) - } - - var added, removed []Config - for _, group := range test.groups { - a, r := cache.Add(&group) - added = append(added, a...) - removed = append(removed, r...) - } - - sortConfigs(added) - sortConfigs(removed) - sortConfigs(test.expectedAdd) - sortConfigs(test.expectedRemove) - - assert.Equalf(t, test.expectedAdd, added, "added configs") - assert.Equalf(t, test.expectedRemove, removed, "removed configs") - }) - } -} - -func prepareGroup(source string, cfgs ...Config) Group { - return Group{ - Configs: cfgs, - Source: source, - } -} - -func prepareCfg(name, module string) Config { - return Config{ - "name": name, - "module": module, - } -} - -func sortConfigs(cfgs []Config) { - if len(cfgs) == 0 { - return - } - sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() }) -} diff --git a/agent/confgroup/config.go b/agent/confgroup/config.go new file mode 100644 index 000000000..92d52b9d4 --- /dev/null +++ b/agent/confgroup/config.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/agent/hostinfo" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/ilyam8/hashstructure" + "gopkg.in/yaml.v2" +) + +const ( + keyName = "name" + keyModule = "module" + keyUpdateEvery = "update_every" + keyDetectRetry = "autodetection_retry" + keyPriority = "priority" + keyLabels = "labels" + keyVnode = "vnode" + + ikeySource = "__source__" + ikeySourceType = "__source_type__" + ikeyProvider = "__provider__" +) + +type Config map[string]any + +func (c Config) HashIncludeMap(_ string, k, _ any) (bool, error) { + s := k.(string) + return !(strings.HasPrefix(s, "__") || strings.HasSuffix(s, "__")), nil +} + +func (c Config) Set(key string, value any) Config { c[key] = value; return c } +func (c Config) Get(key string) any { return c[key] } + +func (c Config) Name() string { v, _ := c.Get(keyName).(string); return v } +func (c Config) Module() string { v, _ := c.Get(keyModule).(string); return v } +func (c Config) FullName() string { return fullName(c.Name(), c.Module()) } +func (c Config) UpdateEvery() int { v, _ := c.Get(keyUpdateEvery).(int); return v } +func (c Config) AutoDetectionRetry() int { v, _ := c.Get(keyDetectRetry).(int); return v } +func (c Config) Priority() int { v, _ := c.Get(keyPriority).(int); return v } +func (c Config) Labels() map[any]any { v, _ := c.Get(keyLabels).(map[any]any); return v } +func (c Config) Hash() uint64 { return calcHash(c) } +func (c Config) Vnode() string { v, _ := c.Get(keyVnode).(string); return v } + +func (c Config) SetName(v string) Config { return c.Set(keyName, v) } +func (c Config) SetModule(v string) Config { return c.Set(keyModule, v) } + +func (c Config) UID() string { + return fmt.Sprintf("%s_%s_%s_%s_%d", c.SourceType(), c.Provider(), c.Source(), c.FullName(), c.Hash()) +} + +func (c Config) Source() string { v, _ := c.Get(ikeySource).(string); return v } +func (c Config) SourceType() string { v, _ := c.Get(ikeySourceType).(string); return v } +func (c Config) Provider() string { v, _ := c.Get(ikeyProvider).(string); return v } +func (c Config) SetSource(v string) Config { return c.Set(ikeySource, v) } +func (c Config) SetSourceType(v string) Config { return c.Set(ikeySourceType, v) } +func (c Config) SetProvider(v string) Config { return c.Set(ikeyProvider, v) } + +func (c Config) SourceTypePriority() int { + switch c.SourceType() { + default: + return 0 + case "stock": + return 2 + case "discovered": + return 4 + case "user": + return 8 + case "dyncfg": + return 16 + } +} + +func (c Config) Clone() (Config, error) { + type plain Config + bytes, err := yaml.Marshal((plain)(c)) + if err != nil { + return nil, err + } + var newConfig Config + if err := yaml.Unmarshal(bytes, &newConfig); err != nil { + return nil, err + } + return newConfig, nil +} + +func (c Config) ApplyDefaults(def Default) { + if c.UpdateEvery() <= 0 { + v := firstPositive(def.UpdateEvery, module.UpdateEvery) + c.Set("update_every", v) + } + if c.AutoDetectionRetry() <= 0 { + v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry) + c.Set("autodetection_retry", v) + } + if c.Priority() <= 0 { + v := firstPositive(def.Priority, module.Priority) + c.Set("priority", v) + } + if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 { + c.Set("update_every", def.MinUpdateEvery) + } + if c.Name() == "" { + c.Set("name", c.Module()) + } else { + c.Set("name", cleanName(jobNameResolveHostname(c.Name()))) + } + + if v, ok := c.Get("url").(string); ok { + c.Set("url", urlResolveHostname(v)) + } +} + +var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`) + +func cleanName(name string) string { + return reInvalidCharacters.ReplaceAllString(name, "_") +} + +func fullName(name, module string) string { + if name == module { + return name + } + return module + "_" + name +} + +func calcHash(obj any) uint64 { + hash, _ := hashstructure.Hash(obj, nil) + return hash +} + +func firstPositive(value int, others ...int) int { + if value > 0 || len(others) == 0 { + return value + } + return firstPositive(others[0], others[1:]...) +} + +func urlResolveHostname(rawURL string) string { + if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") { + return rawURL + } + + u, err := url.Parse(rawURL) + if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) { + return rawURL + } + + u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1) + + return u.String() +} + +func jobNameResolveHostname(name string) string { + if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") { + return name + } + + if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") { + return name + } + + return strings.Replace(name, "hostname", hostinfo.Hostname, 1) +} diff --git a/agent/confgroup/group.go b/agent/confgroup/group.go index 649a145d7..286a0f922 100644 --- a/agent/confgroup/group.go +++ b/agent/confgroup/group.go @@ -2,126 +2,8 @@ package confgroup -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/netdata/go.d.plugin/agent/hostinfo" - "github.com/netdata/go.d.plugin/agent/module" - - "github.com/ilyam8/hashstructure" -) - type Group struct { - Configs []Config - Source string -} - -type Config map[string]interface{} - -func (c Config) HashIncludeMap(_ string, k, _ interface{}) (bool, error) { - s := k.(string) - return !(strings.HasPrefix(s, "__") && strings.HasSuffix(s, "__")), nil -} - -func (c Config) NameWithHash() string { return fmt.Sprintf("%s_%d", c.Name(), c.Hash()) } -func (c Config) Name() string { v, _ := c.get("name").(string); return v } -func (c Config) Module() string { v, _ := c.get("module").(string); return v } -func (c Config) FullName() string { return fullName(c.Name(), c.Module()) } -func (c Config) UpdateEvery() int { v, _ := c.get("update_every").(int); return v } -func (c Config) AutoDetectionRetry() int { v, _ := c.get("autodetection_retry").(int); return v } -func (c Config) Priority() int { v, _ := c.get("priority").(int); return v } -func (c Config) Labels() map[any]any { v, _ := c.get("labels").(map[any]any); return v } -func (c Config) Hash() uint64 { return calcHash(c) } -func (c Config) Source() string { v, _ := c.get("__source__").(string); return v } -func (c Config) Provider() string { v, _ := c.get("__provider__").(string); return v } -func (c Config) Vnode() string { v, _ := c.get("vnode").(string); return v } - -func (c Config) SetName(v string) { c.set("name", v) } -func (c Config) SetModule(v string) { c.set("module", v) } -func (c Config) SetSource(v string) { c.set("__source__", v) } -func (c Config) SetProvider(v string) { c.set("__provider__", v) } - -func (c Config) set(key string, value interface{}) { c[key] = value } -func (c Config) get(key string) interface{} { return c[key] } - -func (c Config) Apply(def Default) { - if c.UpdateEvery() <= 0 { - v := firstPositive(def.UpdateEvery, module.UpdateEvery) - c.set("update_every", v) - } - if c.AutoDetectionRetry() <= 0 { - v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry) - c.set("autodetection_retry", v) - } - if c.Priority() <= 0 { - v := firstPositive(def.Priority, module.Priority) - c.set("priority", v) - } - if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 { - c.set("update_every", def.MinUpdateEvery) - } - if c.Name() == "" { - c.set("name", c.Module()) - } else { - c.set("name", cleanName(jobNameResolveHostname(c.Name()))) - } - - if v, ok := c.get("url").(string); ok { - c.set("url", urlResolveHostname(v)) - } -} - -func cleanName(name string) string { - return reInvalidCharacters.ReplaceAllString(name, "_") -} - -var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`) - -func fullName(name, module string) string { - if name == module { - return name - } - return module + "_" + name -} - -func calcHash(obj interface{}) uint64 { - hash, _ := hashstructure.Hash(obj, nil) - return hash -} - -func firstPositive(value int, others ...int) int { - if value > 0 || len(others) == 0 { - return value - } - return firstPositive(others[0], others[1:]...) -} - -func urlResolveHostname(rawURL string) string { - if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") { - return rawURL - } - - u, err := url.Parse(rawURL) - if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) { - return rawURL - } - - u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1) - - return u.String() -} - -func jobNameResolveHostname(name string) string { - if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") { - return name - } - - if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") { - return name - } - - return strings.Replace(name, "hostname", hostinfo.Hostname, 1) + Configs []Config + Source string + SourceType string } diff --git a/agent/confgroup/group_test.go b/agent/confgroup/group_test.go index af9a804e8..beac8e61b 100644 --- a/agent/confgroup/group_test.go +++ b/agent/confgroup/group_test.go @@ -316,7 +316,7 @@ func TestConfig_Apply(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - test.origCfg.Apply(test.def) + test.origCfg.ApplyDefaults(test.def) assert.Equal(t, test.expectedCfg, test.origCfg) }) diff --git a/agent/discovery/dummy/discovery.go b/agent/discovery/dummy/discovery.go index acd0b8f1c..36fb76710 100644 --- a/agent/discovery/dummy/discovery.go +++ b/agent/discovery/dummy/discovery.go @@ -65,15 +65,17 @@ func (d *Discovery) newCfgGroup(name string) *confgroup.Group { return nil } + src := "internal" cfg := confgroup.Config{} cfg.SetModule(name) - cfg.SetSource(name) + cfg.SetSource(src) + cfg.SetSourceType("stock") cfg.SetProvider("dummy") - cfg.Apply(def) + cfg.ApplyDefaults(def) group := &confgroup.Group{ Configs: []confgroup.Config{cfg}, - Source: name, + Source: src, } return group } diff --git a/agent/discovery/dyncfg/config.go b/agent/discovery/dyncfg/config.go deleted file mode 100644 index ebda00f50..000000000 --- a/agent/discovery/dyncfg/config.go +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" -) - -type Config struct { - Plugin string - API NetdataDyncfgAPI - Functions FunctionRegistry - Modules module.Registry - ModuleConfigDefaults confgroup.Registry -} - -type NetdataDyncfgAPI interface { - DynCfgEnable(string) error - DynCfgReset() error - DyncCfgRegisterModule(string) error - DynCfgRegisterJob(_, _, _ string) error - DynCfgReportJobStatus(_, _, _, _ string) error - FunctionResultSuccess(_, _, _ string) error - FunctionResultReject(_, _, _ string) error -} - -type FunctionRegistry interface { - Register(name string, reg func(functions.Function)) -} - -func validateConfig(cfg Config) error { - return nil -} diff --git a/agent/discovery/dyncfg/dyncfg.go b/agent/discovery/dyncfg/dyncfg.go deleted file mode 100644 index 2f3c34234..000000000 --- a/agent/discovery/dyncfg/dyncfg.go +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "bytes" - "context" - "fmt" - "log/slog" - "strings" - "sync" - - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/logger" - - "gopkg.in/yaml.v2" -) - -const dynCfg = "dyncfg" - -func NewDiscovery(cfg Config) (*Discovery, error) { - if err := validateConfig(cfg); err != nil { - return nil, err - } - - mgr := &Discovery{ - Logger: logger.New().With( - slog.String("component", "discovery dyncfg"), - ), - Plugin: cfg.Plugin, - API: cfg.API, - Modules: cfg.Modules, - ModuleConfigDefaults: nil, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - mgr.registerFunctions(cfg.Functions) - - return mgr, nil -} - -type Discovery struct { - *logger.Logger - - Plugin string - API NetdataDyncfgAPI - Modules module.Registry - ModuleConfigDefaults confgroup.Registry - - in chan<- []*confgroup.Group - - mux *sync.Mutex - configs map[string]confgroup.Config -} - -func (d *Discovery) String() string { - return d.Name() -} - -func (d *Discovery) Name() string { - return "dyncfg discovery" -} - -func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { - d.Info("instance is started") - defer func() { d.Info("instance is stopped") }() - - d.in = in - - if reload, ok := ctx.Value("reload").(bool); ok && reload { - _ = d.API.DynCfgReset() - } - - _ = d.API.DynCfgEnable(d.Plugin) - - for k := range d.Modules { - _ = d.API.DyncCfgRegisterModule(k) - } - - <-ctx.Done() -} - -func (d *Discovery) registerFunctions(r FunctionRegistry) { - r.Register("get_plugin_config", d.getPluginConfig) - r.Register("get_plugin_config_schema", d.getModuleConfigSchema) - r.Register("set_plugin_config", d.setPluginConfig) - - r.Register("get_module_config", d.getModuleConfig) - r.Register("get_module_config_schema", d.getModuleConfigSchema) - r.Register("set_module_config", d.setModuleConfig) - - r.Register("get_job_config", d.getJobConfig) - r.Register("get_job_config_schema", d.getJobConfigSchema) - r.Register("set_job_config", d.setJobConfig) - r.Register("delete_job", d.deleteJobName) -} - -func (d *Discovery) getPluginConfig(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) getPluginConfigSchema(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) setPluginConfig(fn functions.Function) { d.notImplemented(fn) } - -func (d *Discovery) getModuleConfig(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) getModuleConfigSchema(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) setModuleConfig(fn functions.Function) { d.notImplemented(fn) } - -func (d *Discovery) getJobConfig(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - moduleName, jobName := fn.Args[0], fn.Args[1] - - bs, err := d.getConfigBytes(moduleName + "_" + jobName) - if err != nil { - d.apiReject(fn, err.Error()) - return - } - - d.apiSuccessYAML(fn, string(bs)) -} - -func (d *Discovery) getJobConfigSchema(fn functions.Function) { - if err := d.verifyFn(fn, 1); err != nil { - d.apiReject(fn, err.Error()) - return - } - - name := fn.Args[0] - - v, ok := d.Modules[name] - if !ok { - msg := jsonErrorf("module %s is not registered", name) - d.apiReject(fn, msg) - return - } - - d.apiSuccessJSON(fn, v.JobConfigSchema) -} - -func (d *Discovery) setJobConfig(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - var cfg confgroup.Config - if err := yaml.NewDecoder(bytes.NewBuffer(fn.Payload)).Decode(&cfg); err != nil { - d.apiReject(fn, err.Error()) - return - } - - modName, jobName := fn.Args[0], fn.Args[1] - def, _ := d.ModuleConfigDefaults.Lookup(modName) - src := source(modName, jobName) - - cfg.SetProvider(dynCfg) - cfg.SetSource(src) - cfg.SetModule(modName) - cfg.SetName(jobName) - cfg.Apply(def) - - d.in <- []*confgroup.Group{ - { - Configs: []confgroup.Config{cfg}, - Source: src, - }, - } - - d.apiSuccessJSON(fn, "") -} - -func (d *Discovery) deleteJobName(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - modName, jobName := fn.Args[0], fn.Args[1] - - cfg, ok := d.getConfig(modName + "_" + jobName) - if !ok { - d.apiReject(fn, jsonErrorf("module '%s' job '%s': not registered", modName, jobName)) - return - } - if cfg.Provider() != dynCfg { - d.apiReject(fn, jsonErrorf("module '%s' job '%s': can't remove non Dyncfg job", modName, jobName)) - return - } - - d.in <- []*confgroup.Group{ - { - Configs: []confgroup.Config{}, - Source: source(modName, jobName), - }, - } - - d.apiSuccessJSON(fn, "") -} - -func (d *Discovery) apiSuccessJSON(fn functions.Function, payload string) { - _ = d.API.FunctionResultSuccess(fn.UID, "application/json", payload) -} - -func (d *Discovery) apiSuccessYAML(fn functions.Function, payload string) { - _ = d.API.FunctionResultSuccess(fn.UID, "application/x-yaml", payload) -} - -func (d *Discovery) apiReject(fn functions.Function, msg string) { - _ = d.API.FunctionResultReject(fn.UID, "application/json", msg) -} - -func (d *Discovery) notImplemented(fn functions.Function) { - d.Infof("not implemented: '%s'", fn.String()) - msg := jsonErrorf("function '%s' is not implemented", fn.Name) - d.apiReject(fn, msg) -} - -func (d *Discovery) verifyFn(fn functions.Function, wantArgs int) error { - if got := len(fn.Args); got != wantArgs { - msg := jsonErrorf("wrong number of arguments: want %d, got %d (args: '%v')", wantArgs, got, fn.Args) - return fmt.Errorf(msg) - } - - if isSetFunction(fn) && len(fn.Payload) == 0 { - msg := jsonErrorf("no payload") - return fmt.Errorf(msg) - } - - return nil -} - -func jsonErrorf(format string, a ...any) string { - msg := fmt.Sprintf(format, a...) - msg = strings.ReplaceAll(msg, "\n", " ") - - return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg) -} - -func source(modName, jobName string) string { - return fmt.Sprintf("%s/%s/%s", dynCfg, modName, jobName) -} - -func cfgJobName(cfg confgroup.Config) string { - if strings.HasPrefix(cfg.Source(), "dyncfg") { - return cfg.Name() - } - return cfg.NameWithHash() -} - -func isSetFunction(fn functions.Function) bool { - return strings.HasPrefix(fn.Name, "set_") -} diff --git a/agent/discovery/dyncfg/dyncfg_test.go b/agent/discovery/dyncfg/dyncfg_test.go deleted file mode 100644 index 3eee1cef3..000000000 --- a/agent/discovery/dyncfg/dyncfg_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewDiscovery(t *testing.T) { - -} - -func TestDiscovery_Register(t *testing.T) { - tests := map[string]struct { - regConfigs []confgroup.Config - wantApiStats *mockApi - wantConfigs int - }{ - "register jobs created by Dyncfg and other providers": { - regConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - wantConfigs: 2, - wantApiStats: &mockApi{ - callsDynCfgRegisterJob: 1, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d := &Discovery{ - API: &mock, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - for _, v := range test.regConfigs { - d.Register(v) - } - - assert.Equal(t, test.wantApiStats, &mock) - assert.Equal(t, test.wantConfigs, len(d.configs)) - }) - } -} - -func TestDiscovery_Unregister(t *testing.T) { - tests := map[string]struct { - regConfigs []confgroup.Config - unregConfigs []confgroup.Config - wantApiStats *mockApi - wantConfigs int - }{ - "register/unregister jobs created by Dyncfg and other providers": { - wantConfigs: 0, - wantApiStats: &mockApi{ - callsDynCfgRegisterJob: 1, - }, - regConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - unregConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d := &Discovery{ - API: &mock, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - for _, v := range test.regConfigs { - d.Register(v) - } - for _, v := range test.unregConfigs { - d.Unregister(v) - } - - assert.Equal(t, test.wantApiStats, &mock) - assert.Equal(t, test.wantConfigs, len(d.configs)) - }) - } -} - -func TestDiscovery_UpdateStatus(t *testing.T) { - -} - -func TestDiscovery_Run(t *testing.T) { - tests := map[string]struct { - wantApiStats *mockApi - }{ - "default run": { - wantApiStats: &mockApi{ - callsDynCfgEnable: 1, - callsDyncCfgRegisterModule: 2, - callsRegister: 10, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d, err := NewDiscovery(Config{ - Plugin: "test", - API: &mock, - Functions: &mock, - Modules: module.Registry{ - "module1": module.Creator{}, - "module2": module.Creator{}, - }, - ModuleConfigDefaults: nil, - }) - require.Nil(t, err) - - testTime := time.Second * 3 - ctx, cancel := context.WithTimeout(context.Background(), testTime) - defer cancel() - - in := make(chan<- []*confgroup.Group) - done := make(chan struct{}) - - go func() { defer close(done); d.Run(ctx, in) }() - - timeout := testTime + time.Second*2 - tk := time.NewTimer(timeout) - defer tk.Stop() - - select { - case <-done: - assert.Equal(t, test.wantApiStats, &mock) - case <-tk.C: - t.Errorf("timed out after %s", timeout) - } - }) - } -} - -type mockApi struct { - callsDynCfgEnable int - callsDyncCfgRegisterModule int - callsDynCfgRegisterJob int - callsDynCfgReportJobStatus int - callsFunctionResultSuccess int - callsFunctionResultReject int - - callsRegister int -} - -func (m *mockApi) Register(string, func(functions.Function)) { - m.callsRegister++ -} - -func (m *mockApi) DynCfgEnable(string) error { - m.callsDynCfgEnable++ - return nil -} - -func (m *mockApi) DynCfgReset() error { - return nil -} - -func (m *mockApi) DyncCfgRegisterModule(string) error { - m.callsDyncCfgRegisterModule++ - return nil -} - -func (m *mockApi) DynCfgRegisterJob(_, _, _ string) error { - m.callsDynCfgRegisterJob++ - return nil -} - -func (m *mockApi) DynCfgReportJobStatus(_, _, _, _ string) error { - m.callsDynCfgReportJobStatus++ - return nil -} - -func (m *mockApi) FunctionResultSuccess(_, _, _ string) error { - m.callsFunctionResultSuccess++ - return nil -} - -func (m *mockApi) FunctionResultReject(_, _, _ string) error { - m.callsFunctionResultReject++ - return nil -} - -func prepareConfig(values ...string) confgroup.Config { - cfg := confgroup.Config{} - for i := 1; i < len(values); i += 2 { - cfg[values[i-1]] = values[i] - } - return cfg -} diff --git a/agent/discovery/dyncfg/ext.go b/agent/discovery/dyncfg/ext.go deleted file mode 100644 index 910475c3d..000000000 --- a/agent/discovery/dyncfg/ext.go +++ /dev/null @@ -1,79 +0,0 @@ -package dyncfg - -import ( - "errors" - "os" - "strings" - - "github.com/netdata/go.d.plugin/agent/confgroup" - - "gopkg.in/yaml.v2" -) - -func (d *Discovery) Register(cfg confgroup.Config) { - name := cfgJobName(cfg) - if cfg.Provider() != dynCfg { - // jobType handling in ND is not documented - _ = d.API.DynCfgRegisterJob(cfg.Module(), name, "stock") - } - - key := cfg.Module() + "_" + name - d.addConfig(key, cfg) -} - -func (d *Discovery) Unregister(cfg confgroup.Config) { - key := cfg.Module() + "_" + cfgJobName(cfg) - d.removeConfig(key) -} - -func (d *Discovery) UpdateStatus(cfg confgroup.Config, status, payload string) { - _ = d.API.DynCfgReportJobStatus(cfg.Module(), cfgJobName(cfg), status, payload) -} - -func (d *Discovery) addConfig(name string, cfg confgroup.Config) { - d.mux.Lock() - defer d.mux.Unlock() - - d.configs[name] = cfg -} - -func (d *Discovery) removeConfig(key string) { - d.mux.Lock() - defer d.mux.Unlock() - - delete(d.configs, key) -} - -func (d *Discovery) getConfig(key string) (confgroup.Config, bool) { - d.mux.Lock() - defer d.mux.Unlock() - - v, ok := d.configs[key] - return v, ok -} - -func (d *Discovery) getConfigBytes(key string) ([]byte, error) { - d.mux.Lock() - defer d.mux.Unlock() - - cfg, ok := d.configs[key] - if !ok { - return nil, errors.New("config not found") - } - - bs, err := yaml.Marshal(cfg) - if err != nil { - return nil, err - } - - return bs, nil -} - -var envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") - -func isStock(cfg confgroup.Config) bool { - if envNDStockConfigDir == "" { - return false - } - return strings.HasPrefix(cfg.Source(), envNDStockConfigDir) -} diff --git a/agent/discovery/file/parse.go b/agent/discovery/file/parse.go index b6ba52372..9fcb45faf 100644 --- a/agent/discovery/file/parse.go +++ b/agent/discovery/file/parse.go @@ -61,11 +61,12 @@ func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgro for _, cfg := range modCfg.Jobs { cfg.SetModule(name) def := mergeDef(modCfg.Default, modDef) - cfg.Apply(def) + cfg.ApplyDefaults(def) } group := &confgroup.Group{ - Configs: modCfg.Jobs, - Source: path, + Configs: modCfg.Jobs, + Source: path, + SourceType: configSourceType(path), } return group, nil } @@ -79,16 +80,18 @@ func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.G var i int for _, cfg := range cfgs { if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" { - cfg.Apply(def) + cfg.ApplyDefaults(def) cfgs[i] = cfg i++ } } group := &confgroup.Group{ - Configs: cfgs[:i], - Source: path, + Configs: cfgs[:i], + Source: path, + SourceType: configSourceType(path), } + return group, nil } diff --git a/agent/discovery/file/read.go b/agent/discovery/file/read.go index 3d27955ad..2c271995c 100644 --- a/agent/discovery/file/read.go +++ b/agent/discovery/file/read.go @@ -6,6 +6,7 @@ import ( "context" "os" "path/filepath" + "strings" "github.com/netdata/go.d.plugin/agent/confgroup" "github.com/netdata/go.d.plugin/logger" @@ -72,7 +73,7 @@ func (r *Reader) groups() (groups []*confgroup.Group) { continue } if group == nil { - group = &confgroup.Group{Source: path} + group = &confgroup.Group{Source: path, SourceType: configSourceType(path)} } groups = append(groups, group) } @@ -81,9 +82,17 @@ func (r *Reader) groups() (groups []*confgroup.Group) { for _, group := range groups { for _, cfg := range group.Configs { cfg.SetSource(group.Source) + cfg.SetSourceType(group.SourceType) cfg.SetProvider(r.Name()) } } return groups } + +func configSourceType(path string) string { + if strings.Contains(path, "/etc/netdata") { + return "user" + } + return "stock" +} diff --git a/agent/discovery/file/watch.go b/agent/discovery/file/watch.go index e33aac3ec..b73674274 100644 --- a/agent/discovery/file/watch.go +++ b/agent/discovery/file/watch.go @@ -148,7 +148,7 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) { if group, err := parse(w.reg, file); err != nil { w.Warningf("parse '%s': %v", file, err) } else if group == nil { - groups = append(groups, &confgroup.Group{Source: file}) + groups = append(groups, &confgroup.Group{Source: file, SourceType: configSourceType(file)}) } else { groups = append(groups, group) } @@ -165,11 +165,13 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) { for _, group := range groups { for _, cfg := range group.Configs { cfg.SetSource(group.Source) + cfg.SetSourceType(group.SourceType) cfg.SetProvider("file watcher") } } send(ctx, in, groups) + w.watchDirs() } @@ -202,7 +204,6 @@ func (w *Watcher) stop() { } }() - // in fact never returns an error _ = w.watcher.Close() } diff --git a/agent/discovery/manager.go b/agent/discovery/manager.go index 3ab1ab6af..433241958 100644 --- a/agent/discovery/manager.go +++ b/agent/discovery/manager.go @@ -56,9 +56,9 @@ func (m *Manager) String() string { return fmt.Sprintf("discovery manager: %v", m.discoverers) } -func (m *Manager) Add(d discoverer) { - m.discoverers = append(m.discoverers, d) -} +//func (m *Manager) Add(d discoverer) { +// m.discoverers = append(m.discoverers, d) +//} func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) { m.Info("instance is started") @@ -91,7 +91,7 @@ func (m *Manager) registerDiscoverers(cfg Config) error { if err != nil { return err } - m.Add(d) + m.discoverers = append(m.discoverers, d) } if len(cfg.Dummy.Names) > 0 { @@ -100,7 +100,7 @@ func (m *Manager) registerDiscoverers(cfg Config) error { if err != nil { return err } - m.Add(d) + m.discoverers = append(m.discoverers, d) } if len(m.discoverers) == 0 { diff --git a/agent/discovery/sd/pipeline/pipeline.go b/agent/discovery/sd/pipeline/pipeline.go index 1a1eb69f9..025a944b2 100644 --- a/agent/discovery/sd/pipeline/pipeline.go +++ b/agent/discovery/sd/pipeline/pipeline.go @@ -151,6 +151,7 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group { for _, cfg := range configs { cfg.SetProvider(tgg.Provider()) cfg.SetSource(tgg.Source()) + cfg.SetSourceType("discovered") } targetsCache[hash] = configs changed = true @@ -175,7 +176,11 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group { } // TODO: deepcopy? - cfgGroup := &confgroup.Group{Source: tgg.Source()} + cfgGroup := &confgroup.Group{ + Source: tgg.Source(), + SourceType: "discovered", + } + for _, cfgs := range targetsCache { cfgGroup.Configs = append(cfgGroup.Configs, cfgs...) } diff --git a/agent/functions/function.go b/agent/functions/function.go index 46a728994..c23301c4d 100644 --- a/agent/functions/function.go +++ b/agent/functions/function.go @@ -13,17 +13,20 @@ import ( ) type Function struct { - key string - UID string - Timeout time.Duration - Name string - Args []string - Payload []byte + key string + UID string + Timeout time.Duration + Name string + Args []string + Payload []byte + Permissions string + Source string + ContentType string } func (f *Function) String() string { - return fmt.Sprintf("key: %s, uid: %s, timeout: %s, function: %s, args: %v, payload: %s", - f.key, f.UID, f.Timeout, f.Name, f.Args, string(f.Payload)) + return fmt.Sprintf("key: '%s', uid: '%s', timeout: '%s', function: '%s', args: '%v', permissions: '%s', source: '%s', contentType: '%s', payload: '%s'", + f.key, f.UID, f.Timeout, f.Name, f.Args, f.Permissions, f.Source, f.ContentType, string(f.Payload)) } func parseFunction(s string) (*Function, error) { @@ -34,8 +37,9 @@ func parseFunction(s string) (*Function, error) { if err != nil { return nil, err } - if len(parts) != 4 { - return nil, fmt.Errorf("unexpected number of words: want 4, got %d (%v)", len(parts), parts) + + if n := len(parts); n != 6 && n != 7 { + return nil, fmt.Errorf("unexpected number of words: want 6 or 7, got %d (%v)", n, parts) } timeout, err := strconv.ParseInt(parts[2], 10, 64) @@ -43,14 +47,21 @@ func parseFunction(s string) (*Function, error) { return nil, err } + // 'FUNCTION_PAYLOAD 5d50db31d7e446768809b95382789257 120 \"config go.d:collector:example:jobs add example3\" \"method=api,role=god,ip=10.20.4.44\" \"text/yaml\"' cmd := strings.Split(parts[3], " ") fn := &Function{ - key: parts[0], - UID: parts[1], - Timeout: time.Duration(timeout) * time.Second, - Name: cmd[0], - Args: cmd[1:], + key: parts[0], + UID: parts[1], + Timeout: time.Duration(timeout) * time.Second, + Name: cmd[0], + Args: cmd[1:], + Permissions: parts[4], + Source: parts[5], + } + + if len(parts) == 7 { + fn.ContentType = parts[6] } return fn, nil diff --git a/agent/functions/manager.go b/agent/functions/manager.go index 760780cff..0353691c5 100644 --- a/agent/functions/manager.go +++ b/agent/functions/manager.go @@ -5,12 +5,15 @@ package functions import ( "bufio" "context" + "fmt" "io" "log/slog" "os" "strings" "sync" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" "github.com/netdata/go.d.plugin/logger" "github.com/mattn/go-isatty" @@ -25,6 +28,7 @@ func NewManager() *Manager { slog.String("component", "functions manager"), ), Input: os.Stdin, + api: netdataapi.New(safewriter.Stdout), mux: &sync.Mutex{}, FunctionRegistry: make(map[string]func(Function)), } @@ -34,18 +38,11 @@ type Manager struct { *logger.Logger Input io.Reader + api *netdataapi.API mux *sync.Mutex FunctionRegistry map[string]func(Function) } -func (m *Manager) Register(name string, fn func(Function)) { - if fn == nil { - m.Warningf("not registering '%s': nil function", name) - return - } - m.addFunction(name, fn) -} - func (m *Manager) Run(ctx context.Context) { m.Info("instance is started") defer func() { m.Info("instance is stopped") }() @@ -84,8 +81,10 @@ func (m *Manager) run(r io.Reader) { // we need to discard the current one and switch to the new one switch { case strings.HasPrefix(text, "FUNCTION "): + //m.Infof("RAW FUNCTION: '%s'", text) fn, err = parseFunction(text) case strings.HasPrefix(text, "FUNCTION_PAYLOAD "): + //m.Infof("RAW FUNCTION: '%s'", text) fn, err = parseFunctionWithPayload(text, sc) case text == "": continue @@ -102,19 +101,26 @@ func (m *Manager) run(r io.Reader) { function, ok := m.lookupFunction(fn.Name) if !ok { m.Infof("skipping execution of '%s': unregistered function", fn.Name) + m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("unregistered function: %s", fn.Name), "501") continue } if function == nil { m.Warningf("skipping execution of '%s': nil function registered", fn.Name) + m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("nil function: %s", fn.Name), "501") continue } - m.Debugf("executing function: '%s'", fn.String()) + m.Warningf("PARSED FUNCTION: '%s'", fn.String()) function(*fn) } } -func (m *Manager) addFunction(name string, fn func(Function)) { +func (m *Manager) Register(name string, fn func(Function)) { + if fn == nil { + m.Warningf("not registering '%s': nil function", name) + return + } + m.mux.Lock() defer m.mux.Unlock() @@ -126,6 +132,16 @@ func (m *Manager) addFunction(name string, fn func(Function)) { m.FunctionRegistry[name] = fn } +func (m *Manager) Unregister(name string) { + m.mux.Lock() + defer m.mux.Unlock() + + if _, ok := m.FunctionRegistry[name]; !ok { + delete(m.FunctionRegistry, name) + m.Debugf("unregistering function '%s'", name) + } +} + func (m *Manager) lookupFunction(name string) (func(Function), bool) { m.mux.Lock() defer m.mux.Unlock() @@ -133,3 +149,10 @@ func (m *Manager) lookupFunction(name string) (func(Function), bool) { f, ok := m.FunctionRegistry[name] return f, ok } + +func jsonErrorf(format string, a ...any) string { + msg := fmt.Sprintf(format, a...) + msg = strings.ReplaceAll(msg, "\n", " ") + + return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg) +} diff --git a/agent/jobmgr/cache.go b/agent/jobmgr/cache.go index 53a1f7325..422674562 100644 --- a/agent/jobmgr/cache.go +++ b/agent/jobmgr/cache.go @@ -4,22 +4,72 @@ package jobmgr import ( "context" + "sync" "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" ) -func newRunningJobsCache() *runningJobsCache { - return &runningJobsCache{} +func newDiscoveredConfigsCache() *discoveredConfigs { + return &discoveredConfigs{ + items: make(map[string]map[uint64]confgroup.Config), + } +} + +func newSeenConfigCache() *seenConfigs { + return &seenConfigs{ + items: make(map[string]*seenConfig), + } +} + +func newExposedConfigCache() *exposedConfigs { + return &exposedConfigs{ + items: make(map[string]*seenConfig), + } } -func newRetryingJobsCache() *retryingJobsCache { - return &retryingJobsCache{} +func newRunningJobsCache() *runningJobs { + return &runningJobs{ + mux: sync.Mutex{}, + items: make(map[string]*module.Job), + } +} + +func newRetryingTasksCache() *retryingTasks { + return &retryingTasks{ + items: make(map[string]*retryTask), + } } type ( - runningJobsCache map[string]bool - retryingJobsCache map[uint64]retryTask + discoveredConfigs struct { + // [Source][Hash] + items map[string]map[uint64]confgroup.Config + } + seenConfigs struct { + // [cfg.UID()] + items map[string]*seenConfig + } + exposedConfigs struct { + // [cfg.FullName()] + items map[string]*seenConfig + } + seenConfig struct { + cfg confgroup.Config + status dyncfgStatus + } + + runningJobs struct { + mux sync.Mutex + // [cfg.FullName()] + items map[string]*module.Job + } + + retryingTasks struct { + // [cfg.UID()] + items map[string]*retryTask + } retryTask struct { cancel context.CancelFunc timeout int @@ -27,23 +77,112 @@ type ( } ) -func (c runningJobsCache) put(cfg confgroup.Config) { - c[cfg.FullName()] = true +func (c *discoveredConfigs) add(group *confgroup.Group) (added, removed []confgroup.Config) { + cfgs, ok := c.items[group.Source] + if !ok { + cfgs = make(map[uint64]confgroup.Config) + c.items[group.Source] = cfgs + } + + seen := make(map[uint64]bool) + + for _, cfg := range group.Configs { + hash := cfg.Hash() + seen[hash] = true + + if _, ok := cfgs[hash]; ok { + continue + } + + cfgs[hash] = cfg + added = append(added, cfg) + } + + for hash, cfg := range cfgs { + if !seen[hash] { + delete(cfgs, hash) + removed = append(removed, cfg) + } + } + + if len(cfgs) == 0 { + delete(c.items, group.Source) + } + + return added, removed +} + +func (c *seenConfigs) add(sj *seenConfig) { + c.items[sj.cfg.UID()] = sj +} +func (c *seenConfigs) remove(cfg confgroup.Config) { + delete(c.items, cfg.UID()) +} +func (c *seenConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) { + v, ok := c.items[cfg.UID()] + return v, ok +} + +func (c *exposedConfigs) add(sj *seenConfig) { + c.items[sj.cfg.FullName()] = sj +} +func (c *exposedConfigs) remove(cfg confgroup.Config) { + delete(c.items, cfg.FullName()) +} +func (c *exposedConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) { + v, ok := c.items[cfg.FullName()] + return v, ok } -func (c runningJobsCache) remove(cfg confgroup.Config) { - delete(c, cfg.FullName()) + +func (c *exposedConfigs) lookupByName(module, job string) (*seenConfig, bool) { + key := module + "_" + job + if module == job { + key = job + } + v, ok := c.items[key] + return v, ok +} + +func (c *runningJobs) lock() { + c.mux.Lock() +} +func (c *runningJobs) unlock() { + c.mux.Unlock() +} +func (c *runningJobs) add(fullName string, job *module.Job) { + c.items[fullName] = job } -func (c runningJobsCache) has(cfg confgroup.Config) bool { - return c[cfg.FullName()] +func (c *runningJobs) remove(fullName string) { + delete(c.items, fullName) +} +func (c *runningJobs) has(fullName string) bool { + _, ok := c.lookup(fullName) + return ok +} +func (c *runningJobs) lookup(fullName string) (*module.Job, bool) { + j, ok := c.items[fullName] + return j, ok +} +func (c *runningJobs) forEach(fn func(fullName string, job *module.Job)) { + for k, j := range c.items { + fn(k, j) + } } -func (c retryingJobsCache) put(cfg confgroup.Config, retry retryTask) { - c[cfg.Hash()] = retry +func (c *retryingTasks) add(cfg confgroup.Config, retry *retryTask) { + c.items[cfg.UID()] = retry +} +func (c *retryingTasks) remove(cfg confgroup.Config) { + if v, ok := c.lookup(cfg); ok { + v.cancel() + } + delete(c.items, cfg.UID()) } -func (c retryingJobsCache) remove(cfg confgroup.Config) { - delete(c, cfg.Hash()) +func (c *retryingTasks) has(cfg confgroup.Config) bool { + _, ok := c.items[cfg.UID()] + return ok } -func (c retryingJobsCache) lookup(cfg confgroup.Config) (retryTask, bool) { - v, ok := c[cfg.Hash()] +func (c *retryingTasks) lookup(cfg confgroup.Config) (*retryTask, bool) { + v, ok := c.items[cfg.UID()] return v, ok } diff --git a/agent/jobmgr/di.go b/agent/jobmgr/di.go index fa567b2ce..98a274877 100644 --- a/agent/jobmgr/di.go +++ b/agent/jobmgr/di.go @@ -4,6 +4,7 @@ package jobmgr import ( "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" "github.com/netdata/go.d.plugin/agent/vnodes" ) @@ -12,21 +13,27 @@ type FileLocker interface { Unlock(name string) error } -type Vnodes interface { - Lookup(key string) (*vnodes.VirtualNode, bool) -} - -type StatusSaver interface { +type FileStatus interface { Save(cfg confgroup.Config, state string) Remove(cfg confgroup.Config) } -type StatusStore interface { +type FileStatusStore interface { Contains(cfg confgroup.Config, states ...string) bool } -type Dyncfg interface { - Register(cfg confgroup.Config) - Unregister(cfg confgroup.Config) - UpdateStatus(cfg confgroup.Config, status, payload string) +type Vnodes interface { + Lookup(key string) (*vnodes.VirtualNode, bool) +} + +type FunctionRegistry interface { + Register(name string, reg func(functions.Function)) + Unregister(name string) +} + +type DyncfgAPI interface { + CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) + CONFIGDELETE(id string) + CONFIGSTATUS(id, status string) + FUNCRESULT(uid, contentType, payload, code string) } diff --git a/agent/jobmgr/dyncfg.go b/agent/jobmgr/dyncfg.go new file mode 100644 index 000000000..f24ac2147 --- /dev/null +++ b/agent/jobmgr/dyncfg.go @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "encoding/json" + "fmt" + "log/slog" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +type dyncfgStatus int + +const ( + _ dyncfgStatus = iota + dyncfgAccepted + dyncfgRunning + dyncfgFailed + dyncfgIncomplete + dyncfgDisabled +) + +func (s dyncfgStatus) String() string { + switch s { + case dyncfgAccepted: + return "accepted" + case dyncfgRunning: + return "running" + case dyncfgFailed: + return "failed" + case dyncfgIncomplete: + return "incomplete" + case dyncfgDisabled: + return "disabled" + default: + return "unknown" + } +} + +func dyncfgModPath(name string) string { + return fmt.Sprintf("/collectors/%s", name) +} +func dyncfgJobPath(cfg confgroup.Config) string { + return fmt.Sprintf("/collectors/%s", cfg.Module()) +} + +func dyncfgModID(name string) string { + return fmt.Sprintf("go.d:collector:%s:jobs", name) +} +func dyncfgJobID(cfg confgroup.Config) string { + return fmt.Sprintf("go.d:collector:%s:jobs:%s", cfg.Module(), cfg.Name()) +} + +func dyncfgModCommands() string { + return "add schema enable disable test" +} +func dyncfgJobCommands(cfg confgroup.Config) string { + cmds := "schema get enable disable update restart" + if cfg.SourceType() == "dyncfg" { + cmds += " remove" + } + return cmds +} + +func (m *Manager) dyncfgNotifyAddModule(name string) { + id := dyncfgModID(name) + path := dyncfgModPath(name) + cmds := dyncfgModCommands() + typ := "template" + src := "internal" + m.api.CONFIGCREATE(id, dyncfgAccepted.String(), typ, path, src, src, cmds) +} + +func (m *Manager) dyncfgNotifyAddJob(cfg confgroup.Config, status dyncfgStatus) { + id := dyncfgJobID(cfg) + path := dyncfgJobPath(cfg) + cmds := dyncfgJobCommands(cfg) + typ := "job" + m.api.CONFIGCREATE(id, status.String(), typ, path, cfg.SourceType(), cfg.Source(), cmds) +} + +func (m *Manager) dyncfgNotifyRemoveJob(cfg confgroup.Config) { + m.api.CONFIGDELETE(dyncfgJobID(cfg)) +} + +func (m *Manager) dyncfgNotifyJobStatus(cfg confgroup.Config, status dyncfgStatus) { + m.api.CONFIGSTATUS(dyncfgJobID(cfg), status.String()) +} + +func (m *Manager) dyncfgConfig(fn functions.Function) { + if len(fn.Args) < 2 { + m.dyncfgResponsef(fn, 501, "not enough arguments, want at least 2, got %d", len(fn.Args)) + return + } + + m.mux.Lock() + defer m.mux.Unlock() + + select { + case <-m.ctx.Done(): + m.dyncfgResponsef(fn, 501, "job manager is shutting down") + return + default: + } + + action := strings.ToLower(fn.Args[1]) + + switch action { + case "test": + m.dyncfgConfigTest(fn) + case "schema": + m.dyncfgConfigSchema(fn) + case "get": + m.dyncfgConfigGet(fn) + case "remove": + m.dyncfgConfigRemove(fn) + case "restart": + m.dyncfgConfigRestart(fn) + case "enable": + m.dyncfgConfigEnable(fn) + case "disable": + m.dyncfgConfigDisable(fn) + case "add": + m.dyncfgConfigAdd(fn) + case "update": + m.dyncfgConfigUpdate(fn) + default: + m.Warningf("dyncfg: not implemented: '%s'", fn.String()) + m.dyncfgResponsef(fn, 501, "function '%s' is not implemented", fn.Name) + } +} + +func (m *Manager) dyncfgConfigTest(fn functions.Function) { + id := fn.Args[0] + mn, ok := extractModuleName(id) + if !ok { + m.Warningf("dyncfg[test]: can not extract module and from id (%s)", id) + m.dyncfgResponsef(fn, 501, "can not extract module and from id (%s)", id) + return + } + + creator, ok := m.Modules.Lookup(mn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s is not found", mn) + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to parse config: %v", err) + return + } + + cfg.SetModule(mn) + cfg.SetName("test") + + job := creator.Create() + + if err := applyConfig(cfg, job); err != nil { + m.dyncfgResponsef(fn, 501, "failed to apply config: %v", err) + return + } + + job.GetBase().Logger = logger.New().With( + slog.String("collector", cfg.Module()), + slog.String("job", cfg.Name()), + ) + + defer job.Cleanup() + + if err := job.Init(); err != nil { + m.dyncfgResponsef(fn, 501, "job init failed: %v", err) + return + } + if err := job.Check(); err != nil { + m.dyncfgResponsef(fn, 501, "job detection failed: %v", err) + return + } + + m.dyncfgResponsef(fn, 200, "") +} + +func (m *Manager) dyncfgConfigSchema(fn functions.Function) { + id := fn.Args[0] + mn, ok := extractModuleName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module name from id (%s)", id) + return + } + + mod, ok := m.Modules.Lookup(mn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s is not found", mn) + return + } + + if mod.JobConfigSchema == "" { + m.dyncfgResponsef(fn, 501, "module %s has not schema", mn) + return + } + + m.dyncfgResponsePayload(fn, mod.JobConfigSchema) +} + +func (m *Manager) dyncfgConfigGet(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s job %s is not found", mn, jn) + return + } + + creator, ok := m.Modules.Lookup(ecfg.cfg.Module()) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s is not found", mn) + return + } + + mod := creator.Create() + + if err := applyConfig(ecfg.cfg, mod); err != nil { + m.dyncfgResponsef(fn, 501, "failed to apply config: %v", err) + return + } + + conf := mod.Configuration() + if conf == nil { + m.dyncfgResponsef(fn, 501, "module does not provide configuration") + return + } + + bs, err := json.Marshal(conf) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to convert configuration to JSON: %v", err) + return + } + + m.dyncfgResponsePayload(fn, string(bs)) +} + +func (m *Manager) dyncfgConfigAdd(fn functions.Function) { + if len(fn.Args) < 3 { + m.dyncfgResponsef(fn, 501, "not enough arguments, want 3, got %d", len(fn.Args)) + return + } + if len(fn.Payload) == 0 { + m.dyncfgResponsef(fn, 501, "empty payload") + return + } + + id := fn.Args[0] + jn := fn.Args[2] + mn, ok := extractModuleName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module name from id (%s)", id) + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to parse config: %v", err) + return + } + + m.dyncfgSetConfigMeta(cfg, mn, jn) + + scfg := &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) + + ecfg, ok := m.exposedConfigs.lookup(cfg) + if ok { + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + } + ecfg = scfg + m.exposedConfigs.add(ecfg) + + if _, err := m.createCollectorJob(ecfg.cfg); err != nil { + ecfg.status = dyncfgFailed + m.dyncfgResponsef(fn, 501, "failed to create config: %v", err) + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + ecfg.status = dyncfgAccepted + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigRemove(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s job %s is not found", mn, jn) + return + } + + if ecfg.cfg.SourceType() != "dyncfg" { + m.dyncfgResponsef(fn, 501, "job type is %s, need dyncfg", ecfg.cfg.SourceType()) + return + } + + m.seenConfigs.remove(ecfg.cfg) + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyRemoveJob(ecfg.cfg) +} + +func (m *Manager) dyncfgConfigRestart(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "job not found") + return + } + + job, err := m.createCollectorJob(ecfg.cfg) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to create job: %v", err) + return + } + + switch ecfg.status { + case dyncfgAccepted: + m.dyncfgResponsef(fn, 501, "can not restart not enabled job") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + case dyncfgDisabled: + m.dyncfgResponsef(fn, 501, "can not restart disabled job") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + case dyncfgRunning: + m.stopRunningJob(ecfg.cfg.FullName()) + default: + } + + if err := job.AutoDetection(); err != nil { + ecfg.status = dyncfgFailed + } else { + ecfg.status = dyncfgRunning + m.startRunningJob(job) + } + + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigEnable(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s job %s is not found", mn, jn) + return + } + + switch ecfg.status { + case dyncfgAccepted, dyncfgDisabled: + default: + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + job, err := m.createCollectorJob(ecfg.cfg) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to create job: %v", err) + ecfg.status = dyncfgFailed + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + // TODO: retry + if err := job.AutoDetection(); err != nil { + m.dyncfgResponsef(fn, 200, "") + ecfg.status = dyncfgFailed + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + m.startRunningJob(job) + + m.dyncfgResponsef(fn, 200, "") + ecfg.status = dyncfgRunning + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + +} + +func (m *Manager) dyncfgConfigDisable(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "module %s job %s is not found", mn, jn) + return + } + + switch ecfg.status { + case dyncfgDisabled: + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + case dyncfgRunning: + m.stopRunningJob(ecfg.cfg.FullName()) + default: + } + + ecfg.status = dyncfgDisabled + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigUpdate(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.dyncfgResponsef(fn, 501, "can not extract module and job name from id (%s)", id) + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to parse config: %v", err) + return + } + + m.dyncfgSetConfigMeta(cfg, mn, jn) + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.dyncfgResponsef(fn, 501, "job not found") + return + } + + if ecfg.status == dyncfgRunning && ecfg.cfg.UID() == cfg.UID() { + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + job, err := m.createCollectorJob(cfg) + if err != nil { + m.dyncfgResponsef(fn, 501, "failed to create job: %v", err) + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + if ecfg.status == dyncfgAccepted { + m.dyncfgResponsef(fn, 501, "can not update not enabled job") + m.dyncfgNotifyJobStatus(ecfg.cfg, ecfg.status) + return + } + + if ecfg.cfg.SourceType() == "dyncfg" { + m.seenConfigs.remove(ecfg.cfg) + } + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + + scfg := &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) + m.exposedConfigs.add(scfg) + + if ecfg.status == dyncfgDisabled { + scfg.status = dyncfgDisabled + } else if err := job.AutoDetection(); err != nil { + scfg.status = dyncfgFailed + } else { + scfg.status = dyncfgRunning + m.startRunningJob(job) + } + + m.dyncfgResponsef(fn, 200, "") + m.dyncfgNotifyJobStatus(cfg, scfg.status) +} + +func (m *Manager) dyncfgSetConfigMeta(cfg confgroup.Config, module, name string) { + src := fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, name) + cfg.SetProvider("dyncfg") + cfg.SetSource(src) + cfg.SetSourceType("dyncfg") + cfg.SetModule(module) + cfg.SetName(name) + if def, ok := m.ConfigDefaults.Lookup(module); ok { + cfg.ApplyDefaults(def) + } +} + +func (m *Manager) dyncfgResponsePayload(fn functions.Function, payload string) { + m.api.FUNCRESULT(fn.UID, "application/json", payload, "200") +} + +func (m *Manager) dyncfgResponsef(fn functions.Function, code int, msgf string, a ...any) { + if fn.UID == "" { + return + } + + bs, _ := json.Marshal(struct { + Status int `json:"status"` + Message string `json:"message"` + }{ + Status: code, + Message: fmt.Sprintf(msgf, a...), + }) + m.api.FUNCRESULT(fn.UID, "application/json", string(bs), strconv.Itoa(code)) +} + +func configFromPayload(fn functions.Function) (confgroup.Config, error) { + var cfg confgroup.Config + + if fn.ContentType != "application/json" { + if err := yaml.Unmarshal(fn.Payload, &cfg); err != nil { + return nil, err + } + + return cfg, nil + } + + if err := json.Unmarshal(fn.Payload, &cfg); err != nil { + return nil, err + } + + return cfg.Clone() +} + +func extractModuleJobName(id string) (mn string, jn string, ok bool) { + if mn, ok = extractModuleName(id); !ok { + return "", "", false + } + if jn, ok = extractJobName(id); !ok { + return "", "", false + } + return mn, jn, true +} + +func extractModuleName(id string) (string, bool) { + id = strings.TrimPrefix(id, "go.d:collector:") + i := strings.IndexByte(id, ':') + if i == -1 { + return id, id != "" + } + return id[:i], true +} + +func extractJobName(id string) (string, bool) { + i := strings.LastIndexByte(id, ':') + if i == -1 { + return "", false + } + return id[i+1:], true +} diff --git a/agent/jobmgr/manager.go b/agent/jobmgr/manager.go index 7088f84f9..f859c21d3 100644 --- a/agent/jobmgr/manager.go +++ b/agent/jobmgr/manager.go @@ -13,57 +13,40 @@ import ( "time" "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/netdata/go.d.plugin/agent/ticker" "github.com/netdata/go.d.plugin/logger" + "github.com/mattn/go-isatty" "gopkg.in/yaml.v2" ) -type Job interface { - Name() string - ModuleName() string - FullName() string - AutoDetection() bool - AutoDetectionEvery() int - RetryAutoDetection() bool - Tick(clock int) - Start() - Stop() - Cleanup() -} - -type jobStatus = string - -const ( - jobStatusRunning jobStatus = "running" // Check() succeeded - jobStatusRetrying jobStatus = "retrying" // Check() failed, but we need keep trying auto-detection - jobStatusStoppedFailed jobStatus = "stopped_failed" // Check() failed - jobStatusStoppedDupLocal jobStatus = "stopped_duplicate_local" // a job with the same FullName is running - jobStatusStoppedDupGlobal jobStatus = "stopped_duplicate_global" // a job with the same FullName is registered by another plugin - jobStatusStoppedRegErr jobStatus = "stopped_registration_error" // an error during registration (only 'too many open files') - jobStatusStoppedCreateErr jobStatus = "stopped_creation_error" // an error during creation (yaml unmarshal) -) +var isTerminal = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsTerminal(os.Stdin.Fd()) -func NewManager() *Manager { - np := noop{} +func New() *Manager { mgr := &Manager{ Logger: logger.New().With( slog.String("component", "job manager"), ), - Out: io.Discard, - FileLock: np, - StatusSaver: np, - StatusStore: np, - Vnodes: np, - Dyncfg: np, - - confGroupCache: confgroup.NewCache(), - - runningJobs: newRunningJobsCache(), - retryingJobs: newRetryingJobsCache(), - - addCh: make(chan confgroup.Config), - removeCh: make(chan confgroup.Config), + Out: io.Discard, + FileLock: noop{}, + FileStatus: noop{}, + FileStatusStore: noop{}, + Vnodes: noop{}, + FnReg: noop{}, + + discoveredConfigs: newDiscoveredConfigsCache(), + seenConfigs: newSeenConfigCache(), + exposedConfigs: newExposedConfigCache(), + runningJobs: newRunningJobsCache(), + retryingTasks: newRetryingTasksCache(), + + api: netdataapi.New(safewriter.Stdout), + mux: sync.Mutex{}, + started: make(chan struct{}), } return mgr @@ -72,210 +55,245 @@ func NewManager() *Manager { type Manager struct { *logger.Logger - PluginName string - Out io.Writer - Modules module.Registry + PluginName string + Out io.Writer + Modules module.Registry + ConfigDefaults confgroup.Registry - FileLock FileLocker - StatusSaver StatusSaver - StatusStore StatusStore - Vnodes Vnodes - Dyncfg Dyncfg + FileLock FileLocker + FileStatus FileStatus + FileStatusStore FileStatusStore + Vnodes Vnodes + FnReg FunctionRegistry - confGroupCache *confgroup.Cache - runningJobs *runningJobsCache - retryingJobs *retryingJobsCache + discoveredConfigs *discoveredConfigs + seenConfigs *seenConfigs + exposedConfigs *exposedConfigs + retryingTasks *retryingTasks + runningJobs *runningJobs - addCh chan confgroup.Config - removeCh chan confgroup.Config + api DyncfgAPI + ctx context.Context + mux sync.Mutex - queueMux sync.Mutex - queue []Job + started chan struct{} } func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) { m.Info("instance is started") defer func() { m.cleanup(); m.Info("instance is stopped") }() + m.ctx = ctx + + m.FnReg.Register("config", m.dyncfgConfig) + + for name := range m.Modules { + m.dyncfgNotifyAddModule(name) + } var wg sync.WaitGroup wg.Add(1) - go func() { defer wg.Done(); m.runConfigGroupsHandling(ctx, in) }() + go func() { defer wg.Done(); m.runProcessDiscoveredConfigs(ctx, in) }() wg.Add(1) - go func() { defer wg.Done(); m.runConfigsHandling(ctx) }() + go func() { defer wg.Done(); m.runNotifyRunningJobs(ctx) }() - wg.Add(1) - go func() { defer wg.Done(); m.runRunningJobsHandling(ctx) }() + close(m.started) wg.Wait() <-ctx.Done() } -func (m *Manager) runConfigGroupsHandling(ctx context.Context, in chan []*confgroup.Group) { +func (m *Manager) runProcessDiscoveredConfigs(ctx context.Context, in chan []*confgroup.Group) { for { select { case <-ctx.Done(): return case groups := <-in: - for _, gr := range groups { - select { - case <-ctx.Done(): - return - default: - a, r := m.confGroupCache.Add(gr) - m.Debugf("received config group ('%s'): %d jobs (added: %d, removed: %d)", gr.Source, len(gr.Configs), len(a), len(r)) - sendConfigs(ctx, m.removeCh, r) - sendConfigs(ctx, m.addCh, a) - } - } + m.processDiscoveredConfigGroups(groups) } } } -func (m *Manager) runConfigsHandling(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case cfg := <-m.addCh: - m.addConfig(ctx, cfg) - case cfg := <-m.removeCh: - m.removeConfig(cfg) +func (m *Manager) processDiscoveredConfigGroups(groups []*confgroup.Group) { + for _, gr := range groups { + a, r := m.discoveredConfigs.add(gr) + m.Infof("received configs: %d/+%d/-%d (group '%s')", len(gr.Configs), len(a), len(r), gr.Source) + for _, cfg := range r { + m.removeDiscoveredConfig(cfg) + } + for _, cfg := range a { + m.addDiscoveredConfig(cfg) } } } -func (m *Manager) cleanup() { - for _, task := range *m.retryingJobs { - task.cancel() - } - for name := range *m.runningJobs { - _ = m.FileLock.Unlock(name) +func (m *Manager) addDiscoveredConfig(cfg confgroup.Config) { + m.mux.Lock() + defer m.mux.Unlock() + + scfg, ok := m.seenConfigs.lookup(cfg) + if !ok { + scfg = &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) } - // TODO: m.Dyncfg.Register() ? - m.stopRunningJobs() -} -func (m *Manager) addConfig(ctx context.Context, cfg confgroup.Config) { - task, isRetry := m.retryingJobs.lookup(cfg) - if isRetry { - task.cancel() - m.retryingJobs.remove(cfg) - } else { - m.Dyncfg.Register(cfg) + ecfg, ok := m.exposedConfigs.lookup(cfg) + if !ok { + ecfg = scfg + m.exposedConfigs.add(ecfg) } - if m.runningJobs.has(cfg) { - m.Infof("%s[%s] job is being served by another job, skipping it", cfg.Module(), cfg.Name()) - m.StatusSaver.Save(cfg, jobStatusStoppedDupLocal) - m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another job") + if !ok { + if _, err := m.createCollectorJob(cfg); err != nil { + ecfg.status = dyncfgFailed + m.dyncfgNotifyAddJob(cfg, ecfg.status) + return + } + + ecfg.status = dyncfgAccepted + m.dyncfgNotifyAddJob(cfg, ecfg.status) + if isTerminal { + m.dyncfgConfigEnable(functions.Function{ + Args: []string{dyncfgJobID(ecfg.cfg)}, + }) + } return } - job, err := m.createJob(cfg) - if err != nil { - m.Warningf("couldn't create %s[%s]: %v", cfg.Module(), cfg.Name(), err) - m.StatusSaver.Save(cfg, jobStatusStoppedCreateErr) - m.Dyncfg.UpdateStatus(cfg, "error", fmt.Sprintf("build error: %s", err)) + // TODO: fix retry + if scfg.cfg.UID() == ecfg.cfg.UID() { return } - cleanupJob := true - defer func() { - if cleanupJob { - job.Cleanup() - } - }() - - if isRetry { - job.AutoDetectEvery = task.timeout - job.AutoDetectTries = task.retries - } else if job.AutoDetectionEvery() == 0 { - switch { - case m.StatusStore.Contains(cfg, jobStatusRunning, jobStatusRetrying): - m.Infof("%s[%s] job last status is running/retrying, applying recovering settings", cfg.Module(), cfg.Name()) - job.AutoDetectEvery = 30 - job.AutoDetectTries = 11 - case isInsideK8sCluster() && cfg.Provider() == "file watcher": - m.Infof("%s[%s] is k8s job, applying recovering settings", cfg.Module(), cfg.Name()) - job.AutoDetectEvery = 10 - job.AutoDetectTries = 7 - } + sp, ep := scfg.cfg.SourceTypePriority(), ecfg.cfg.SourceTypePriority() + + if ep > sp || (ep == sp && ecfg.status == dyncfgRunning) { + return + } + if ep < sp { + m.stopRunningJob(ecfg.cfg.FullName()) + m.exposedConfigs.add(scfg) // replace + ecfg = scfg } - switch detection(job) { - case jobStatusRunning: - if ok, err := m.FileLock.Lock(cfg.FullName()); ok || err != nil && !isTooManyOpenFiles(err) { - cleanupJob = false - m.runningJobs.put(cfg) - m.StatusSaver.Save(cfg, jobStatusRunning) - m.Dyncfg.UpdateStatus(cfg, "running", "") - m.startJob(job) - } else if isTooManyOpenFiles(err) { - m.Error(err) - m.StatusSaver.Save(cfg, jobStatusStoppedRegErr) - m.Dyncfg.UpdateStatus(cfg, "error", "too many open files") - } else { - m.Infof("%s[%s] job is being served by another plugin, skipping it", cfg.Module(), cfg.Name()) - m.StatusSaver.Save(cfg, jobStatusStoppedDupGlobal) - m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another plugin") - } - case jobStatusRetrying: - m.Infof("%s[%s] job detection failed, will retry in %d seconds", cfg.Module(), cfg.Name(), job.AutoDetectionEvery()) - ctx, cancel := context.WithCancel(ctx) - m.retryingJobs.put(cfg, retryTask{ - cancel: cancel, - timeout: job.AutoDetectionEvery(), - retries: job.AutoDetectTries, + if isTerminal { + m.dyncfgConfigEnable(functions.Function{ + Args: []string{dyncfgJobID(ecfg.cfg)}, }) - go runRetryTask(ctx, m.addCh, cfg, time.Second*time.Duration(job.AutoDetectionEvery())) - m.StatusSaver.Save(cfg, jobStatusRetrying) - m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, will retry later") - case jobStatusStoppedFailed: - m.StatusSaver.Save(cfg, jobStatusStoppedFailed) - m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, stopping it") - default: - m.Warningf("%s[%s] job detection: unknown state", cfg.Module(), cfg.Name()) } + return } -func (m *Manager) removeConfig(cfg confgroup.Config) { - if m.runningJobs.has(cfg) { - m.stopJob(cfg.FullName()) - _ = m.FileLock.Unlock(cfg.FullName()) - m.runningJobs.remove(cfg) +func (m *Manager) removeDiscoveredConfig(cfg confgroup.Config) { + m.mux.Lock() + defer m.mux.Unlock() + + m.retryingTasks.remove(cfg) + + scfg, ok := m.seenConfigs.lookup(cfg) + if !ok { + return } + m.seenConfigs.remove(cfg) - if task, ok := m.retryingJobs.lookup(cfg); ok { - task.cancel() - m.retryingJobs.remove(cfg) + ecfg, ok := m.exposedConfigs.lookup(cfg) + if !ok { + return + } + if scfg.cfg.UID() == ecfg.cfg.UID() { + m.exposedConfigs.remove(cfg) + m.stopRunningJob(cfg.FullName()) + m.dyncfgNotifyRemoveJob(cfg) } - m.StatusSaver.Remove(cfg) - m.Dyncfg.Unregister(cfg) + return } -func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { +func (m *Manager) runNotifyRunningJobs(ctx context.Context) { + tk := ticker.New(time.Second) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case clock := <-tk.C: + m.runningJobs.lock() + m.runningJobs.forEach(func(_ string, job *module.Job) { + job.Tick(clock) + }) + m.runningJobs.unlock() + } + } +} + +func (m *Manager) cleanup() { + m.mux.Lock() + defer m.mux.Unlock() + + m.FnReg.Unregister("config") + + m.runningJobs.lock() + defer m.runningJobs.unlock() + + m.runningJobs.forEach(func(key string, job *module.Job) { + job.Stop() + m.runningJobs.remove(key) + }) +} + +func (m *Manager) startRunningJob(job *module.Job) { + m.runningJobs.lock() + defer m.runningJobs.unlock() + + if job, ok := m.runningJobs.lookup(job.FullName()); ok { + job.Stop() + } + + go job.Start() + m.runningJobs.add(job.FullName(), job) +} + +func (m *Manager) stopRunningJob(name string) { + m.runningJobs.lock() + defer m.runningJobs.unlock() + + if job, ok := m.runningJobs.lookup(name); ok { + job.Stop() + m.runningJobs.remove(name) + } +} + +func (m *Manager) createCollectorJob(cfg confgroup.Config) (*module.Job, error) { creator, ok := m.Modules[cfg.Module()] if !ok { return nil, fmt.Errorf("can not find %s module", cfg.Module()) } + var vnode struct { + guid string + hostname string + labels map[string]string + } + + if cfg.Vnode() != "" { + n, ok := m.Vnodes.Lookup(cfg.Vnode()) + if !ok { + return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode()) + } + + vnode.guid = n.GUID + vnode.hostname = n.Hostname + vnode.labels = n.Labels + } + m.Debugf("creating %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg) mod := creator.Create() - if err := unmarshal(cfg, mod); err != nil { - return nil, err - } - labels := make(map[string]string) - for name, value := range cfg.Labels() { - n, ok1 := name.(string) - v, ok2 := value.(string) - if ok1 && ok2 { - labels[n] = v - } + if err := applyConfig(cfg, mod); err != nil { + return nil, err } jobCfg := module.JobConfig{ @@ -286,21 +304,13 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { UpdateEvery: cfg.UpdateEvery(), AutoDetectEvery: cfg.AutoDetectionRetry(), Priority: cfg.Priority(), - Labels: labels, - IsStock: isStockConfig(cfg), + Labels: makeLabels(cfg), + IsStock: cfg.SourceType() == "stock", Module: mod, Out: m.Out, - } - - if cfg.Vnode() != "" { - n, ok := m.Vnodes.Lookup(cfg.Vnode()) - if !ok { - return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode()) - } - - jobCfg.VnodeGUID = n.GUID - jobCfg.VnodeHostname = n.Hostname - jobCfg.VnodeLabels = n.Labels + VnodeGUID: vnode.guid, + VnodeHostname: vnode.hostname, + VnodeLabels: vnode.labels, } job := module.NewJob(jobCfg) @@ -308,62 +318,31 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { return job, nil } -func detection(job Job) jobStatus { - if !job.AutoDetection() { - if job.RetryAutoDetection() { - return jobStatusRetrying - } else { - return jobStatusStoppedFailed - } - } - return jobStatusRunning -} - -func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config, timeout time.Duration) { - t := time.NewTimer(timeout) - defer t.Stop() - - select { - case <-ctx.Done(): - case <-t.C: - sendConfig(ctx, out, cfg) - } -} - -func sendConfigs(ctx context.Context, out chan<- confgroup.Config, cfgs []confgroup.Config) { - for _, cfg := range cfgs { - sendConfig(ctx, out, cfg) - } -} - -func sendConfig(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) { - select { - case <-ctx.Done(): - return - case out <- cfg: - } -} - -func unmarshal(conf interface{}, module interface{}) error { - bs, err := yaml.Marshal(conf) +func applyConfig(cfg confgroup.Config, module any) error { + bs, err := yaml.Marshal(cfg) if err != nil { return err } return yaml.Unmarshal(bs, module) } +func isTooManyOpenFiles(err error) bool { + return err != nil && strings.Contains(err.Error(), "too many open files") +} + func isInsideK8sCluster() bool { host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") return host != "" && port != "" } -func isTooManyOpenFiles(err error) bool { - return err != nil && strings.Contains(err.Error(), "too many open files") -} - -func isStockConfig(cfg confgroup.Config) bool { - if !strings.HasPrefix(cfg.Provider(), "file") { - return false +func makeLabels(cfg confgroup.Config) map[string]string { + labels := make(map[string]string) + for name, value := range cfg.Labels() { + n, ok1 := name.(string) + v, ok2 := value.(string) + if ok1 && ok2 { + labels[n] = v + } } - return !strings.Contains(cfg.Source(), "/etc/netdata") + return labels } diff --git a/agent/jobmgr/manager_test.go b/agent/jobmgr/manager_test.go index 69dceda49..b8074f212 100644 --- a/agent/jobmgr/manager_test.go +++ b/agent/jobmgr/manager_test.go @@ -3,102 +3,1252 @@ package jobmgr import ( - "bytes" - "context" - "sync" + "encoding/json" + "fmt" "testing" - "time" "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/agent/safewriter" - "github.com/stretchr/testify/assert" + "github.com/netdata/go.d.plugin/agent/functions" ) -// TODO: tech dept -func TestNewManager(t *testing.T) { +func TestManager_Run_Dyncfg_Get(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[get] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-get", + Args: []string{dyncfgJobID(cfg), "get"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-get 501 application/json 501 +{"status":501,"message":"module success job test is not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "get existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1"). + Set("option_int", 1) + bs, _ := json.Marshal(cfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: bs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-get", + Args: []string{dyncfgJobID(cfg), "get"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-get 200 application/json 200 +{"option_str":"1","option_int":1} +FUNCTION_RESULT_END +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } } -// TODO: tech dept -func TestManager_Run(t *testing.T) { - groups := []*confgroup.Group{ - { - Source: "source", - Configs: []confgroup.Config{ - { - "name": "name", - "module": "success", - "update_every": module.UpdateEvery, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, - { - "name": "name", - "module": "success", - "update_every": module.UpdateEvery + 1, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, - { - "name": "name", - "module": "fail", - "update_every": module.UpdateEvery + 1, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, +func TestManager_Run_Dyncfg_Add(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "add dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } + }, + }, + "add dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted +`, + } + }, + }, + "add dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } }, }, } - var buf bytes.Buffer - mgr := NewManager() - mgr.Modules = prepareMockRegistry() - mgr.Out = safewriter.New(&buf) - mgr.PluginName = "test.plugin" - - ctx, cancel := context.WithCancel(context.Background()) - in := make(chan []*confgroup.Group) - var wg sync.WaitGroup - - wg.Add(1) - go func() { defer wg.Done(); mgr.Run(ctx, in) }() - - select { - case in <- groups: - case <-time.After(time.Second * 2): + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) } +} + +func TestManager_Run_Dyncfg_Enable(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "enable non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-enable 501 application/json 501 +{"status":501,"message":"module success job test is not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "enable dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "enable dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "enable dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed +`, + } + }, + }, + "enable dyncfg:nok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted - time.Sleep(time.Second * 5) - cancel() - wg.Wait() +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed + +FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Disable(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "disable non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-disable 501 application/json 501 +{"status":501,"message":"module success job test is not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "disable dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "disable dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "disable dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled +`, + } + }, + }, + "disable dyncfg:nok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Restart(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "restart non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-restart 501 application/json 501 +{"status":501,"message":"job not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "restart not enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-restart 501 application/json 501 +{"status":501,"message":"can not restart not enabled job"} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } + }, + }, + "restart enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "restart disabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-restart 501 application/json 501 +{"status":501,"message":"can not restart disabled job"} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "restart enabled dyncfg:ok multiple times": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "4-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 4-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Remove(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "remove non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-remove 501 application/json 501 +{"status":501,"message":"module success job test is not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "remove non-dyncfg": { + createSim: func() *runSim { + stockCfg := prepareStockCfg("success", "stock") + userCfg := prepareUserCfg("success", "user") + discCfg := prepareDiscoveredCfg("success", "discovered") + + return &runSim{ + do: func(mgr *Manager) { + mgr.processDiscoveredConfigGroups([]*confgroup.Group{ + prepareCfgGroup(stockCfg.Source(), "stock", stockCfg), + prepareCfgGroup(userCfg.Source(), "user", userCfg), + prepareCfgGroup(discCfg.Source(), "discovered", discCfg), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "1-remove", + Args: []string{dyncfgJobID(stockCfg), "remove"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-remove", + Args: []string{dyncfgJobID(userCfg), "remove"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-remove", + Args: []string{dyncfgJobID(discCfg), "remove"}, + }) + }, + wantDiscovered: []confgroup.Config{ + stockCfg, + userCfg, + discCfg, + }, + wantSeen: []seenConfig{ + {cfg: stockCfg, status: dyncfgAccepted}, + {cfg: userCfg, status: dyncfgAccepted}, + {cfg: discCfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: stockCfg, status: dyncfgAccepted}, + {cfg: userCfg, status: dyncfgAccepted}, + {cfg: discCfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +CONFIG go.d:collector:success:jobs:stock create accepted job /collectors/success stock 'type=stock,module=success,job=stock' 'schema get enable disable update restart' 0x0000 0x0000 + +CONFIG go.d:collector:success:jobs:user create accepted job /collectors/success user 'type=user,module=success,job=user' 'schema get enable disable update restart' 0x0000 0x0000 + +CONFIG go.d:collector:success:jobs:discovered create accepted job /collectors/success discovered 'type=discovered,module=success,job=discovered' 'schema get enable disable update restart' 0x0000 0x0000 + +FUNCTION_RESULT_BEGIN 1-remove 501 application/json 501 +{"status":501,"message":"job type is stock, need dyncfg"} +FUNCTION_RESULT_END + +FUNCTION_RESULT_BEGIN 2-remove 501 application/json 501 +{"status":501,"message":"job type is user, need dyncfg"} +FUNCTION_RESULT_END + +FUNCTION_RESULT_BEGIN 3-remove 501 application/json 501 +{"status":501,"message":"job type is discovered, need dyncfg"} +FUNCTION_RESULT_END + +`, + } + }, + }, + "remove not enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-remove 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test delete +`, + } + }, + }, + "remove enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-remove 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test delete +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Update(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "update non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-update", + Args: []string{dyncfgJobID(cfg), "update"}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-update 501 application/json 501 +{"status":501,"message":"job not found"} +FUNCTION_RESULT_END +`, + } + }, + }, + "update enabled dyncfg:ok with dyncfg:ok": { + createSim: func() *runSim { + origCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1") + updCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "2") + origBs, _ := json.Marshal(origCfg) + updBs, _ := json.Marshal(updCfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()}, + Payload: origBs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(origCfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-update", + Args: []string{dyncfgJobID(origCfg), "update"}, + Payload: updBs, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: updCfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: updCfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-update 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "update disabled dyncfg:ok with dyncfg:ok": { + createSim: func() *runSim { + origCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1") + updCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "2") + origBs, _ := json.Marshal(origCfg) + updBs, _ := json.Marshal(updCfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()}, + Payload: origBs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(origCfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-update", + Args: []string{dyncfgJobID(origCfg), "update"}, + Payload: updBs, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: updCfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: updCfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-update 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func prepareCfgGroup(src, srcType string, configs ...confgroup.Config) *confgroup.Group { + return &confgroup.Group{ + Configs: configs, + Source: src, + SourceType: srcType, + } +} + +func prepareStockCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("stock"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=stock,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) +} + +func prepareUserCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("user"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=user,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) +} - assert.True(t, buf.String() != "") +func prepareDiscoveredCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("discovered"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=discovered,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) } -func prepareMockRegistry() module.Registry { - reg := module.Registry{} - reg.Register("success", module.Creator{ - Create: func() module.Module { - return &module.MockModule{ - InitFunc: func() bool { return true }, - CheckFunc: func() bool { return true }, - ChartsFunc: func() *module.Charts { - return &module.Charts{ - &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}, - } - }, - CollectFunc: func() map[string]int64 { - return map[string]int64{"id1": 1} - }, - } - }, - }) - reg.Register("fail", module.Creator{ - Create: func() module.Module { - return &module.MockModule{ - InitFunc: func() bool { return false }, - } - }, - }) - return reg +func prepareDyncfgCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("dyncfg"). + SetProvider("dyncfg"). + SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) } diff --git a/agent/jobmgr/noop.go b/agent/jobmgr/noop.go index 15883105d..4c2801c24 100644 --- a/agent/jobmgr/noop.go +++ b/agent/jobmgr/noop.go @@ -3,18 +3,19 @@ package jobmgr import ( + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/confgroup" "github.com/netdata/go.d.plugin/agent/vnodes" ) type noop struct{} -func (n noop) Lock(string) (bool, error) { return true, nil } -func (n noop) Unlock(string) error { return nil } -func (n noop) Save(confgroup.Config, string) {} -func (n noop) Remove(confgroup.Config) {} -func (n noop) Contains(confgroup.Config, ...string) bool { return false } -func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false } -func (n noop) Register(confgroup.Config) { return } -func (n noop) Unregister(confgroup.Config) { return } -func (n noop) UpdateStatus(confgroup.Config, string, string) { return } +func (n noop) Lock(string) (bool, error) { return true, nil } +func (n noop) Unlock(string) error { return nil } +func (n noop) Save(confgroup.Config, string) {} +func (n noop) Remove(confgroup.Config) {} +func (n noop) Contains(confgroup.Config, ...string) bool { return false } +func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false } +func (n noop) Register(name string, reg func(functions.Function)) {} +func (n noop) Unregister(name string) {} diff --git a/agent/jobmgr/run.go b/agent/jobmgr/run.go deleted file mode 100644 index f1a14cadc..000000000 --- a/agent/jobmgr/run.go +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package jobmgr - -import ( - "context" - "slices" - "time" - - "github.com/netdata/go.d.plugin/agent/ticker" -) - -func (m *Manager) runRunningJobsHandling(ctx context.Context) { - tk := ticker.New(time.Second) - defer tk.Stop() - - for { - select { - case <-ctx.Done(): - return - case clock := <-tk.C: - //m.Debugf("tick %d", clock) - m.notifyRunningJobs(clock) - } - } -} - -func (m *Manager) notifyRunningJobs(clock int) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - for _, v := range m.queue { - v.Tick(clock) - } -} - -func (m *Manager) startJob(job Job) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - go job.Start() - - m.queue = append(m.queue, job) -} - -func (m *Manager) stopJob(name string) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - idx := slices.IndexFunc(m.queue, func(job Job) bool { - return job.FullName() == name - }) - - if idx != -1 { - j := m.queue[idx] - j.Stop() - - copy(m.queue[idx:], m.queue[idx+1:]) - m.queue[len(m.queue)-1] = nil - m.queue = m.queue[:len(m.queue)-1] - } -} - -func (m *Manager) stopRunningJobs() { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - for i, v := range m.queue { - v.Stop() - m.queue[i] = nil - } - m.queue = m.queue[:0] -} diff --git a/agent/jobmgr/sim_test.go b/agent/jobmgr/sim_test.go new file mode 100644 index 000000000..870f19d7c --- /dev/null +++ b/agent/jobmgr/sim_test.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "bytes" + "context" + "errors" + "fmt" + "slices" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type runSim struct { + do func(mgr *Manager) + + wantDiscovered []confgroup.Config + wantSeen []seenConfig + wantExposed []seenConfig + wantRunning []string + wantDyncfg string +} + +func (s *runSim) run(t *testing.T) { + t.Helper() + + require.NotNil(t, s.do, "s.do is nil") + + var buf bytes.Buffer + mgr := New() + mgr.api = netdataapi.New(safewriter.New(&buf)) + mgr.Modules = prepareMockRegistry() + + done := make(chan struct{}) + grpCh := make(chan []*confgroup.Group) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { defer close(done); close(grpCh); mgr.Run(ctx, grpCh) }() + + timeout := time.Second * 5 + + select { + case <-mgr.started: + case <-time.After(timeout): + t.Errorf("failed to start work in %s", timeout) + } + + s.do(mgr) + cancel() + + select { + case <-done: + case <-time.After(timeout): + t.Errorf("failed to finish work in %s", timeout) + } + + parts := strings.Split(buf.String(), "\n") + parts = slices.DeleteFunc(parts, func(s string) bool { + return strings.HasPrefix(s, "CONFIG") && strings.Contains(s, " template ") + }) + + wantDyncfg, gotDyncfg := strings.TrimSpace(s.wantDyncfg), strings.TrimSpace(strings.Join(parts, "\n")) + + fmt.Println(gotDyncfg) + + assert.Equal(t, wantDyncfg, gotDyncfg, "dyncfg commands") + + var n int + for _, cfgs := range mgr.discoveredConfigs.items { + n += len(cfgs) + } + + require.Len(t, s.wantDiscovered, n, "discoveredConfigs: different len") + + for _, cfg := range s.wantDiscovered { + cfgs, ok := mgr.discoveredConfigs.items[cfg.Source()] + require.Truef(t, ok, "discoveredConfigs: source %s is not found", cfg.Source()) + _, ok = cfgs[cfg.Hash()] + require.Truef(t, ok, "discoveredConfigs: source %s config %d is not found", cfg.Source(), cfg.Hash()) + } + + require.Len(t, s.wantSeen, len(mgr.seenConfigs.items), "seenConfigs: different len") + + for _, scfg := range s.wantSeen { + v, ok := mgr.seenConfigs.lookup(scfg.cfg) + require.Truef(t, ok, "seenConfigs: config '%s' is not found", scfg.cfg.UID()) + require.Truef(t, scfg.status == v.status, "seenConfigs: wrong status, want %s got %s", scfg.status, v.status) + } + + require.Len(t, s.wantExposed, len(mgr.exposedConfigs.items), "exposedConfigs: different len") + + for _, scfg := range s.wantExposed { + v, ok := mgr.exposedConfigs.lookup(scfg.cfg) + require.Truef(t, ok && scfg.cfg.UID() == v.cfg.UID(), "exposedConfigs: config '%s' is not found", scfg.cfg.UID()) + require.Truef(t, scfg.status == v.status, "exposedConfigs: wrong status, want %s got %s", scfg.status, v.status) + } +} + +func prepareMockRegistry() module.Registry { + reg := module.Registry{} + + reg.Register("success", module.Creator{ + JobConfigSchema: module.MockConfigSchema, + Create: func() module.Module { + return &module.MockModule{ + ChartsFunc: func() *module.Charts { + return &module.Charts{&module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}} + }, + CollectFunc: func() map[string]int64 { return map[string]int64{"id1": 1} }, + } + }, + }) + reg.Register("fail", module.Creator{ + Create: func() module.Module { + return &module.MockModule{ + InitFunc: func() error { return errors.New("mock failed init") }, + } + }, + }) + + return reg +} diff --git a/agent/module/job.go b/agent/module/job.go index 6200ff9f5..b9b41f03f 100644 --- a/agent/module/job.go +++ b/agent/module/job.go @@ -4,6 +4,7 @@ package module import ( "bytes" + "errors" "fmt" "io" "log/slog" @@ -85,6 +86,10 @@ const ( func NewJob(cfg JobConfig) *Job { var buf bytes.Buffer + if cfg.UpdateEvery == 0 { + cfg.UpdateEvery = 1 + } + j := &Job{ AutoDetectEvery: cfg.AutoDetectEvery, AutoDetectTries: infTries, @@ -167,40 +172,44 @@ type Job struct { const NetdataChartIDMaxLength = 1000 // FullName returns job full name. -func (j Job) FullName() string { +func (j *Job) FullName() string { return j.fullName } // ModuleName returns job module name. -func (j Job) ModuleName() string { +func (j *Job) ModuleName() string { return j.moduleName } // Name returns job name. -func (j Job) Name() string { +func (j *Job) Name() string { return j.name } // Panicked returns 'panicked' flag value. -func (j Job) Panicked() bool { +func (j *Job) Panicked() bool { return j.panicked } // AutoDetectionEvery returns value of AutoDetectEvery. -func (j Job) AutoDetectionEvery() int { +func (j *Job) AutoDetectionEvery() int { return j.AutoDetectEvery } // RetryAutoDetection returns whether it is needed to retry autodetection. -func (j Job) RetryAutoDetection() bool { +func (j *Job) RetryAutoDetection() bool { return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0) } +func (j *Job) Configuration() any { + return j.module.Configuration() +} + // AutoDetection invokes init, check and postCheck. It handles panic. -func (j *Job) AutoDetection() (ok bool) { +func (j *Job) AutoDetection() (err error) { defer func() { if r := recover(); r != nil { - ok = false + err = fmt.Errorf("panic %v", err) j.panicked = true j.disableAutoDetection() @@ -209,7 +218,7 @@ func (j *Job) AutoDetection() (ok bool) { j.Errorf("STACK: %s", debug.Stack()) } } - if !ok { + if err != nil { j.module.Cleanup() } }() @@ -218,29 +227,29 @@ func (j *Job) AutoDetection() (ok bool) { j.Mute() } - if ok = j.init(); !ok { + if err = j.init(); err != nil { j.Error("init failed") j.Unmute() j.disableAutoDetection() - return + return err } - if ok = j.check(); !ok { + if err = j.check(); err != nil { j.Error("check failed") j.Unmute() - return + return err } j.Unmute() - j.Info("check success") - if ok = j.postCheck(); !ok { + + if err = j.postCheck(); err != nil { j.Error("postCheck failed") j.disableAutoDetection() - return + return err } - return true + return nil } // Tick Tick. @@ -316,34 +325,40 @@ func (j *Job) Cleanup() { } } -func (j *Job) init() bool { +func (j *Job) init() error { if j.initialized { - return true + return nil + } + + if err := j.module.Init(); err != nil { + return err } - j.initialized = j.module.Init() + j.initialized = true - return j.initialized + return nil } -func (j *Job) check() bool { - ok := j.module.Check() - if !ok && j.AutoDetectTries != infTries { - j.AutoDetectTries-- +func (j *Job) check() error { + if err := j.module.Check(); err != nil { + if j.AutoDetectTries != infTries { + j.AutoDetectTries-- + } + return err } - return ok + return nil } -func (j *Job) postCheck() bool { +func (j *Job) postCheck() error { if j.charts = j.module.Charts(); j.charts == nil { j.Error("nil charts") - return false + return errors.New("nil charts") } if err := checkCharts(*j.charts...); err != nil { j.Errorf("charts check: %v", err) - return false + return err } - return true + return nil } func (j *Job) runOnce() { @@ -562,7 +577,7 @@ func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun return chart.updated } -func (j Job) penalty() int { +func (j *Job) penalty() int { v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2 if v > maxPenalty { return maxPenalty diff --git a/agent/module/job_test.go b/agent/module/job_test.go index f19fdcebd..c87f840d5 100644 --- a/agent/module/job_test.go +++ b/agent/module/job_test.go @@ -3,6 +3,7 @@ package module import ( + "errors" "fmt" "io" "testing" @@ -72,10 +73,10 @@ func TestJob_AutoDetectionEvery(t *testing.T) { func TestJob_RetryAutoDetection(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { return false }, + CheckFunc: func() error { return errors.New("check error") }, ChartsFunc: func() *Charts { return &Charts{} }, @@ -86,14 +87,14 @@ func TestJob_RetryAutoDetection(t *testing.T) { assert.True(t, job.RetryAutoDetection()) assert.Equal(t, infTries, job.AutoDetectTries) for i := 0; i < 1000; i++ { - job.check() + _ = job.check() } assert.True(t, job.RetryAutoDetection()) assert.Equal(t, infTries, job.AutoDetectTries) job.AutoDetectTries = 10 for i := 0; i < 10; i++ { - job.check() + _ = job.check() } assert.False(t, job.RetryAutoDetection()) assert.Equal(t, 0, job.AutoDetectTries) @@ -103,13 +104,13 @@ func TestJob_AutoDetection(t *testing.T) { job := newTestJob() var v int m := &MockModule{ - InitFunc: func() bool { + InitFunc: func() error { v++ - return true + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { v++ - return true + return nil }, ChartsFunc: func() *Charts { v++ @@ -118,47 +119,47 @@ func TestJob_AutoDetection(t *testing.T) { } job.module = m - assert.True(t, job.AutoDetection()) + assert.NoError(t, job.AutoDetection()) assert.Equal(t, 3, v) } func TestJob_AutoDetection_FailInit(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return false + InitFunc: func() error { + return errors.New("init error") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_FailCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return false + CheckFunc: func() error { + return errors.New("check error") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_FailPostCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return true + CheckFunc: func() error { + return nil }, ChartsFunc: func() *Charts { return nil @@ -166,47 +167,47 @@ func TestJob_AutoDetection_FailPostCheck(t *testing.T) { } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicInit(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { + InitFunc: func() error { panic("panic in Init") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { panic("panic in Check") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicPostCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return true + CheckFunc: func() error { + return nil }, ChartsFunc: func() *Charts { panic("panic in PostCheck") @@ -214,7 +215,7 @@ func TestJob_AutoDetection_PanicPostCheck(t *testing.T) { } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } diff --git a/agent/module/mock.go b/agent/module/mock.go index c4353eb52..65b93debf 100644 --- a/agent/module/mock.go +++ b/agent/module/mock.go @@ -2,12 +2,40 @@ package module +const MockConfigSchema = ` +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "option_str": { + "type": "string", + "description": "Option string value" + }, + "option_int": { + "type": "integer", + "description": "Option integer value" + } + }, + "required": [ + "option_str", + "option_int" + ] +} +` + +type MockConfiguration struct { + OptionStr string `yaml:"option_str" json:"option_str"` + OptionInt int `yaml:"option_int" json:"option_int"` +} + // MockModule MockModule. type MockModule struct { Base - InitFunc func() bool - CheckFunc func() bool + Config MockConfiguration `yaml:",inline" json:",inline"` + + InitFunc func() error + CheckFunc func() error ChartsFunc func() *Charts CollectFunc func() map[string]int64 CleanupFunc func() @@ -15,23 +43,23 @@ type MockModule struct { } // Init invokes InitFunc. -func (m MockModule) Init() bool { +func (m *MockModule) Init() error { if m.InitFunc == nil { - return true + return nil } return m.InitFunc() } // Check invokes CheckFunc. -func (m MockModule) Check() bool { +func (m *MockModule) Check() error { if m.CheckFunc == nil { - return true + return nil } return m.CheckFunc() } // Charts invokes ChartsFunc. -func (m MockModule) Charts() *Charts { +func (m *MockModule) Charts() *Charts { if m.ChartsFunc == nil { return nil } @@ -39,7 +67,7 @@ func (m MockModule) Charts() *Charts { } // Collect invokes CollectDunc. -func (m MockModule) Collect() map[string]int64 { +func (m *MockModule) Collect() map[string]int64 { if m.CollectFunc == nil { return nil } @@ -53,3 +81,7 @@ func (m *MockModule) Cleanup() { } m.CleanupDone = true } + +func (m *MockModule) Configuration() any { + return m.Config +} diff --git a/agent/module/mock_test.go b/agent/module/mock_test.go index 9c194e893..d7521911f 100644 --- a/agent/module/mock_test.go +++ b/agent/module/mock_test.go @@ -12,17 +12,17 @@ import ( func TestMockModule_Init(t *testing.T) { m := &MockModule{} - assert.True(t, m.Init()) - m.InitFunc = func() bool { return false } - assert.False(t, m.Init()) + assert.NoError(t, m.Init()) + m.InitFunc = func() error { return nil } + assert.NoError(t, m.Init()) } func TestMockModule_Check(t *testing.T) { m := &MockModule{} - assert.True(t, m.Check()) - m.CheckFunc = func() bool { return false } - assert.False(t, m.Check()) + assert.NoError(t, m.Check()) + m.CheckFunc = func() error { return nil } + assert.NoError(t, m.Check()) } func TestMockModule_Charts(t *testing.T) { diff --git a/agent/module/module.go b/agent/module/module.go index 3421a02ee..5c88c6e04 100644 --- a/agent/module/module.go +++ b/agent/module/module.go @@ -9,15 +9,14 @@ import ( // Module is an interface that represents a module. type Module interface { // Init does initialization. - // If it returns false, the job will be disabled. - Init() bool + // If it returns error, the job will be disabled. + Init() error // Check is called after Init. - // If it returns false, the job will be disabled. - Check() bool + // If it returns error, the job will be disabled. + Check() error // Charts returns the chart definition. - // Make sure not to share returned instance. Charts() *Charts // Collect collects metrics. @@ -27,6 +26,8 @@ type Module interface { Cleanup() GetBase() *Base + + Configuration() any } // Base is a helper struct. All modules should embed this struct. diff --git a/agent/module/registry.go b/agent/module/registry.go index 4d0d2c493..f2fa661c1 100644 --- a/agent/module/registry.go +++ b/agent/module/registry.go @@ -44,3 +44,8 @@ func (r Registry) Register(name string, creator Creator) { } r[name] = creator } + +func (r Registry) Lookup(name string) (Creator, bool) { + v, ok := r[name] + return v, ok +} diff --git a/agent/netdataapi/api.go b/agent/netdataapi/api.go index 43c34d22d..b9ade50a8 100644 --- a/agent/netdataapi/api.go +++ b/agent/netdataapi/api.go @@ -165,52 +165,50 @@ func (a *API) HOSTDEFINEEND() error { } func (a *API) HOST(guid string) error { - _, err := a.Write([]byte("HOST " + "'" + guid + "'" + "\n\n")) + _, err := a.Write([]byte("HOST " + "'" + + guid + "'\n\n")) return err } -func (a *API) DynCfgEnable(pluginName string) error { - _, err := a.Write([]byte("DYNCFG_ENABLE '" + pluginName + "'\n\n")) - return err -} +func (a *API) FUNCRESULT(uid, contentType, payload, code string) { + var buf bytes.Buffer -func (a *API) DynCfgReset() error { - _, err := a.Write([]byte("DYNCFG_RESET\n")) - return err -} + buf.WriteString("FUNCTION_RESULT_BEGIN " + + uid + " " + + code + " " + + contentType + " " + + code + "\n", + ) -func (a *API) DyncCfgRegisterModule(moduleName string) error { - _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_MODULE '%s' job_array\n\n", moduleName) - return err -} + if payload != "" { + buf.WriteString(payload + "\n") + } -func (a *API) DynCfgRegisterJob(moduleName, jobName, jobType string) error { - _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_JOB '%s' '%s' '%s' 0\n\n", moduleName, jobName, jobType) - return err -} + buf.WriteString("FUNCTION_RESULT_END\n\n") -func (a *API) DynCfgReportJobStatus(moduleName, jobName, status, reason string) error { - _, err := fmt.Fprintf(a, "REPORT_JOB_STATUS '%s' '%s' '%s' 0 '%s'\n\n", moduleName, jobName, status, reason) - return err + _, _ = buf.WriteTo(a) } -func (a *API) FunctionResultSuccess(uid, contentType, payload string) error { - return a.functionResult(uid, contentType, payload, "1") -} +func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) { + // https://learn.netdata.cloud/docs/contributing/external-plugins/#config -func (a *API) FunctionResultReject(uid, contentType, payload string) error { - return a.functionResult(uid, contentType, payload, "0") + _, _ = a.Write([]byte("CONFIG " + + id + " " + + "create" + " " + + status + " " + + configType + " " + + path + " " + + sourceType + " '" + + source + "' '" + + supportedCommands + "' 0x0000 0x0000\n\n", + )) + // supportedCommands + "' 0x7ff 0x7ff\n", } -func (a *API) functionResult(uid, contentType, payload, code string) error { - var buf bytes.Buffer - - buf.WriteString("FUNCTION_RESULT_BEGIN " + uid + " " + code + " " + contentType + " 0\n") - if payload != "" { - buf.WriteString(payload + "\n") - } - buf.WriteString("FUNCTION_RESULT_END\n\n") +func (a *API) CONFIGDELETE(id string) { + _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n")) +} - _, err := buf.WriteTo(a) - return err +func (a *API) CONFIGSTATUS(id, status string) { + _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n")) } diff --git a/agent/netdataapi/api_test.go b/agent/netdataapi/api_test.go index 30f019460..e5087839b 100644 --- a/agent/netdataapi/api_test.go +++ b/agent/netdataapi/api_test.go @@ -260,101 +260,6 @@ HOST_DEFINE_END ) } -func TestAPI_DynCfgEnable(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgEnable("plugin") - - assert.Equal( - t, - "DYNCFG_ENABLE 'plugin'\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgReset(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgReset() - - assert.Equal( - t, - "DYNCFG_RESET\n", - buf.String(), - ) -} - -func TestAPI_DyncCfgRegisterModule(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DyncCfgRegisterModule("module") - - assert.Equal( - t, - "DYNCFG_REGISTER_MODULE 'module' job_array\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgRegisterJob(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} +func TestAPI_FUNCRESULT(t *testing.T) { - _ = a.DynCfgRegisterJob("module", "job", "type") - - assert.Equal( - t, - "DYNCFG_REGISTER_JOB 'module' 'job' 'type' 0\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgReportJobStatus(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgReportJobStatus("module", "job", "status", "reason") - - assert.Equal( - t, - "REPORT_JOB_STATUS 'module' 'job' 'status' 0 'reason'\n\n", - buf.String(), - ) -} - -func TestAPI_FunctionResultSuccess(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.FunctionResultSuccess("uid", "contentType", "payload") - - assert.Equal( - t, - `FUNCTION_RESULT_BEGIN uid 1 contentType 0 -payload -FUNCTION_RESULT_END - -`, - buf.String(), - ) -} - -func TestAPI_FunctionResultReject(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.FunctionResultReject("uid", "contentType", "payload") - - assert.Equal( - t, - `FUNCTION_RESULT_BEGIN uid 0 contentType 0 -payload -FUNCTION_RESULT_END - -`, - buf.String(), - ) } diff --git a/examples/simple/main.go b/examples/simple/main.go index 9982b91fc..f497ccba2 100644 --- a/examples/simple/main.go +++ b/examples/simple/main.go @@ -3,6 +3,7 @@ package main import ( + "errors" "fmt" "log/slog" "math/rand" @@ -24,9 +25,9 @@ type example struct{ module.Base } func (example) Cleanup() {} -func (example) Init() bool { return true } +func (example) Init() error { return nil } -func (example) Check() bool { return true } +func (example) Check() error { return nil } func (example) Charts() *module.Charts { return &module.Charts{ @@ -40,6 +41,7 @@ func (example) Charts() *module.Charts { }, } } +func (example) Configuration() any { return nil } func (e *example) Collect() map[string]int64 { return map[string]int64{ @@ -116,10 +118,10 @@ func main() { func parseCLI() *cli.Option { opt, err := cli.Parse(os.Args) - if err != nil { - if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { - os.Exit(0) - } + var flagsErr *flags.Error + if errors.As(err, &flagsErr) && errors.Is(flagsErr.Type, flags.ErrHelp) { + os.Exit(0) + } else { os.Exit(1) } return opt diff --git a/modules/activemq/activemq.go b/modules/activemq/activemq.go index 109c874de..0335f5795 100644 --- a/modules/activemq/activemq.go +++ b/modules/activemq/activemq.go @@ -4,8 +4,7 @@ package activemq import ( _ "embed" - "fmt" - "strings" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -24,46 +23,27 @@ func init() { }) } -const ( - keyQueues = "queues" - keyTopics = "topics" - keyAdvisory = "Advisory" -) - -var nameReplacer = strings.NewReplacer(".", "_", " ", "") - -const ( - defaultMaxQueues = 50 - defaultMaxTopics = 50 - defaultURL = "http://127.0.0.1:8161" - defaultHTTPTimeout = time.Second -) - -// New creates Example with default values. func New() *ActiveMQ { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &ActiveMQ{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8161", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, - }, - - MaxQueues: defaultMaxQueues, - MaxTopics: defaultMaxTopics, - } - return &ActiveMQ{ - Config: config, + MaxQueues: 50, + MaxTopics: 50, + }, charts: &Charts{}, activeQueues: make(map[string]bool), activeTopics: make(map[string]bool), } } -// Config is the ActiveMQ module configuration. type Config struct { web.HTTP `yaml:",inline"` Webadmin string `yaml:"webadmin"` @@ -73,7 +53,6 @@ type Config struct { TopicsFilter string `yaml:"topics_filter"` } -// ActiveMQ ActiveMQ module. type ActiveMQ struct { module.Base Config `yaml:",inline"` @@ -86,228 +65,75 @@ type ActiveMQ struct { charts *Charts } -// Cleanup makes cleanup. -func (ActiveMQ) Cleanup() {} +func (a *ActiveMQ) Configuration() any { + return a.Config +} -// Init makes initialization. -func (a *ActiveMQ) Init() bool { - if a.URL == "" { - a.Error("URL not set") - return false +func (a *ActiveMQ) Init() error { + if err := a.validateConfig(); err != nil { + a.Errorf("config validation: %v", err) + return err } - if a.Webadmin == "" { - a.Error("webadmin root path is not set") - return false + qf, err := a.initQueuesFiler() + if err != nil { + a.Error(err) + return err } - - if a.QueuesFilter != "" { - f, err := matcher.NewSimplePatternsMatcher(a.QueuesFilter) - if err != nil { - a.Errorf("error on creating queues filter : %v", err) - return false - } - a.queuesFilter = matcher.WithCache(f) + if qf != nil { + a.queuesFilter = qf } - if a.TopicsFilter != "" { - f, err := matcher.NewSimplePatternsMatcher(a.TopicsFilter) - if err != nil { - a.Errorf("error on creating topics filter : %v", err) - return false - } - a.topicsFilter = matcher.WithCache(f) + tf, err := a.initTopicsFilter() + if err != nil { + a.Error(err) + return err + } + if tf != nil { + a.topicsFilter = tf } client, err := web.NewHTTPClient(a.Client) if err != nil { a.Error(err) - return false + return err } a.apiClient = newAPIClient(client, a.Request, a.Webadmin) - return true + return nil } -// Check makes check. -func (a *ActiveMQ) Check() bool { - return len(a.Collect()) > 0 -} - -// Charts creates Charts. -func (a ActiveMQ) Charts() *Charts { - return a.charts -} - -// Collect collects metrics. -func (a *ActiveMQ) Collect() map[string]int64 { - metrics := make(map[string]int64) - - var ( - queues *queues - topics *topics - err error - ) - - if queues, err = a.apiClient.getQueues(); err != nil { +func (a *ActiveMQ) Check() error { + mx, err := a.collect() + if err != nil { a.Error(err) - return nil + return err } + if len(mx) == 0 { + return errors.New("no metrics collected") - if topics, err = a.apiClient.getTopics(); err != nil { - a.Error(err) - return nil } - - a.processQueues(queues, metrics) - a.processTopics(topics, metrics) - - return metrics + return nil } -func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) { - var ( - count = len(a.activeQueues) - updated = make(map[string]bool) - unp int - ) - - for _, q := range queues.Items { - if strings.Contains(q.Name, keyAdvisory) { - continue - } - - if !a.activeQueues[q.Name] { - if a.MaxQueues != 0 && count > a.MaxQueues { - unp++ - continue - } - - if !a.filterQueues(q.Name) { - continue - } - - a.activeQueues[q.Name] = true - a.addQueueTopicCharts(q.Name, keyQueues) - } - - rname := nameReplacer.Replace(q.Name) - - metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount - metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount - metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount - metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount - - updated[q.Name] = true - } - - for name := range a.activeQueues { - if !updated[name] { - delete(a.activeQueues, name) - a.removeQueueTopicCharts(name, keyQueues) - } - } - - if unp > 0 { - a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues) - } -} - -func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) { - var ( - count = len(a.activeTopics) - updated = make(map[string]bool) - unp int - ) - - for _, t := range topics.Items { - if strings.Contains(t.Name, keyAdvisory) { - continue - } - - if !a.activeTopics[t.Name] { - if a.MaxTopics != 0 && count > a.MaxTopics { - unp++ - continue - } - - if !a.filterTopics(t.Name) { - continue - } - - a.activeTopics[t.Name] = true - a.addQueueTopicCharts(t.Name, keyTopics) - } - - rname := nameReplacer.Replace(t.Name) - - metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount - metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount - metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount - metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount - - updated[t.Name] = true - } - - for name := range a.activeTopics { - if !updated[name] { - // TODO: delete after timeout? - delete(a.activeTopics, name) - a.removeQueueTopicCharts(name, keyTopics) - } - } - - if unp > 0 { - a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics) - } -} - -func (a ActiveMQ) filterQueues(line string) bool { - if a.queuesFilter == nil { - return true - } - return a.queuesFilter.MatchString(line) +func (a *ActiveMQ) Charts() *Charts { + return a.charts } -func (a ActiveMQ) filterTopics(line string) bool { - if a.topicsFilter == nil { - return true +func (a *ActiveMQ) Cleanup() { + if a.apiClient != nil && a.apiClient.httpClient != nil { + a.apiClient.httpClient.CloseIdleConnections() } - return a.topicsFilter.MatchString(line) } -func (a *ActiveMQ) addQueueTopicCharts(name, typ string) { - rname := nameReplacer.Replace(name) - - charts := charts.Copy() - - for _, chart := range *charts { - chart.ID = fmt.Sprintf(chart.ID, typ, rname) - chart.Title = fmt.Sprintf(chart.Title, name) - chart.Fam = typ +func (a *ActiveMQ) Collect() map[string]int64 { + mx, err := a.collect() - for _, dim := range chart.Dims { - dim.ID = fmt.Sprintf(dim.ID, typ, rname) - } + if err != nil { + a.Error(err) + return nil } - _ = a.charts.Add(*charts...) - -} - -func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) { - rname := nameReplacer.Replace(name) - - chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() - - chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() - - chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() + return mx } diff --git a/modules/activemq/activemq_test.go b/modules/activemq/activemq_test.go index e45ceecd4..5e11dfbde 100644 --- a/modules/activemq/activemq_test.go +++ b/modules/activemq/activemq_test.go @@ -9,7 +9,6 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -131,25 +130,15 @@ var ( } ) -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) - assert.Equal(t, defaultMaxQueues, job.MaxQueues) - assert.Equal(t, defaultMaxTopics, job.MaxTopics) -} - func TestActiveMQ_Init(t *testing.T) { job := New() // NG case - assert.False(t, job.Init()) + assert.Error(t, job.Init()) // OK case job.Webadmin = "webadmin" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -170,8 +159,8 @@ func TestActiveMQ_Check(t *testing.T) { job.HTTP.Request = web.Request{URL: ts.URL} job.Webadmin = "webadmin" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) } func TestActiveMQ_Charts(t *testing.T) { @@ -203,8 +192,8 @@ func TestActiveMQ_Collect(t *testing.T) { job.HTTP.Request = web.Request{URL: ts.URL} job.Webadmin = "webadmin" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) cases := []struct { expected map[string]int64 @@ -310,8 +299,8 @@ func TestActiveMQ_404(t *testing.T) { job.Webadmin = "webadmin" job.HTTP.Request = web.Request{URL: ts.URL} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestActiveMQ_InvalidData(t *testing.T) { @@ -324,6 +313,6 @@ func TestActiveMQ_InvalidData(t *testing.T) { mod.Webadmin = "webadmin" mod.HTTP.Request = web.Request{URL: ts.URL} - require.True(t, mod.Init()) - assert.False(t, mod.Check()) + require.NoError(t, mod.Init()) + assert.Error(t, mod.Check()) } diff --git a/modules/activemq/apiclient.go b/modules/activemq/apiclient.go index 6835fd5aa..04b397bbd 100644 --- a/modules/activemq/apiclient.go +++ b/modules/activemq/apiclient.go @@ -104,7 +104,7 @@ func (a *apiClient) getTopics() (*topics, error) { return &topics, nil } -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { +func (a *apiClient) doRequestOK(req *http.Request) (*http.Response, error) { resp, err := a.httpClient.Do(req) if err != nil { return resp, fmt.Errorf("error on request to %s : %v", req.URL, err) @@ -117,7 +117,7 @@ func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { return resp, err } -func (a apiClient) createRequest(urlPath string) (*http.Request, error) { +func (a *apiClient) createRequest(urlPath string) (*http.Request, error) { req := a.request.Copy() u, err := url.Parse(req.URL) if err != nil { diff --git a/modules/activemq/collect.go b/modules/activemq/collect.go new file mode 100644 index 000000000..0dbaf5544 --- /dev/null +++ b/modules/activemq/collect.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "fmt" + "strings" +) + +const ( + keyQueues = "queues" + keyTopics = "topics" + keyAdvisory = "Advisory" +) + +var nameReplacer = strings.NewReplacer(".", "_", " ", "") + +func (a *ActiveMQ) collect() (map[string]int64, error) { + metrics := make(map[string]int64) + + var ( + queues *queues + topics *topics + err error + ) + + if queues, err = a.apiClient.getQueues(); err != nil { + return nil, err + } + + if topics, err = a.apiClient.getTopics(); err != nil { + return nil, err + } + + a.processQueues(queues, metrics) + a.processTopics(topics, metrics) + + return metrics, nil +} + +func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) { + var ( + count = len(a.activeQueues) + updated = make(map[string]bool) + unp int + ) + + for _, q := range queues.Items { + if strings.Contains(q.Name, keyAdvisory) { + continue + } + + if !a.activeQueues[q.Name] { + if a.MaxQueues != 0 && count > a.MaxQueues { + unp++ + continue + } + + if !a.filterQueues(q.Name) { + continue + } + + a.activeQueues[q.Name] = true + a.addQueueTopicCharts(q.Name, keyQueues) + } + + rname := nameReplacer.Replace(q.Name) + + metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount + metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount + metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount + metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount + + updated[q.Name] = true + } + + for name := range a.activeQueues { + if !updated[name] { + delete(a.activeQueues, name) + a.removeQueueTopicCharts(name, keyQueues) + } + } + + if unp > 0 { + a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues) + } +} + +func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) { + var ( + count = len(a.activeTopics) + updated = make(map[string]bool) + unp int + ) + + for _, t := range topics.Items { + if strings.Contains(t.Name, keyAdvisory) { + continue + } + + if !a.activeTopics[t.Name] { + if a.MaxTopics != 0 && count > a.MaxTopics { + unp++ + continue + } + + if !a.filterTopics(t.Name) { + continue + } + + a.activeTopics[t.Name] = true + a.addQueueTopicCharts(t.Name, keyTopics) + } + + rname := nameReplacer.Replace(t.Name) + + metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount + metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount + metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount + metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount + + updated[t.Name] = true + } + + for name := range a.activeTopics { + if !updated[name] { + // TODO: delete after timeout? + delete(a.activeTopics, name) + a.removeQueueTopicCharts(name, keyTopics) + } + } + + if unp > 0 { + a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics) + } +} + +func (a *ActiveMQ) filterQueues(line string) bool { + if a.queuesFilter == nil { + return true + } + return a.queuesFilter.MatchString(line) +} + +func (a *ActiveMQ) filterTopics(line string) bool { + if a.topicsFilter == nil { + return true + } + return a.topicsFilter.MatchString(line) +} + +func (a *ActiveMQ) addQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + charts := charts.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, typ, rname) + chart.Title = fmt.Sprintf(chart.Title, name) + chart.Fam = typ + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, typ, rname) + } + } + + _ = a.charts.Add(*charts...) + +} + +func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() +} diff --git a/modules/activemq/config_schema.json b/modules/activemq/config_schema.json index abefb5d2f..a9f723c65 100644 --- a/modules/activemq/config_schema.json +++ b/modules/activemq/config_schema.json @@ -1,75 +1,78 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/activemq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "webadmin": { - "type": "string" - }, - "max_queues": { - "type": "integer" - }, - "max_topics": { - "type": "integer" - }, - "queues_filter": { - "type": "string" - }, - "topics_filter": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/activemq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "webadmin": { "type": "string" + }, + "max_queues": { + "type": "integer" + }, + "max_topics": { + "type": "integer" + }, + "queues_filter": { + "type": "string" + }, + "topics_filter": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url", + "webadmin" + ] }, - "required": [ - "name", - "url", - "webadmin" - ] + "uiSchema": {} } diff --git a/modules/activemq/init.go b/modules/activemq/init.go new file mode 100644 index 000000000..0467f628c --- /dev/null +++ b/modules/activemq/init.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (a *ActiveMQ) validateConfig() error { + if a.URL == "" { + return errors.New("url not set") + } + if a.Webadmin == "" { + return errors.New("webadmin root path set") + } + return nil +} + +func (a *ActiveMQ) initQueuesFiler() (matcher.Matcher, error) { + if a.QueuesFilter == "" { + return nil, nil + } + return matcher.NewSimplePatternsMatcher(a.QueuesFilter) +} + +func (a *ActiveMQ) initTopicsFilter() (matcher.Matcher, error) { + if a.TopicsFilter == "" { + return nil, nil + } + return matcher.NewSimplePatternsMatcher(a.TopicsFilter) +} diff --git a/modules/apache/apache.go b/modules/apache/apache.go index 8b117463d..bbfd2445f 100644 --- a/modules/apache/apache.go +++ b/modules/apache/apache.go @@ -4,6 +4,7 @@ package apache import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -30,7 +31,7 @@ func New() *Apache { URL: "http://127.0.0.1/server-status?auto", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, }, }, @@ -54,26 +55,40 @@ type Apache struct { once *sync.Once } -func (a *Apache) Init() bool { - if err := a.verifyConfig(); err != nil { +func (a *Apache) Configuration() any { + return a.Config +} + +func (a *Apache) Init() error { + if err := a.validateConfig(); err != nil { a.Errorf("config validation: %v", err) - return false + return err } httpClient, err := a.initHTTPClient() if err != nil { a.Errorf("init HTTP client: %v", err) - return false + return err } a.httpClient = httpClient a.Debugf("using URL %s", a.URL) - a.Debugf("using timeout: %s", a.Timeout.Duration) - return true + a.Debugf("using timeout: %s", a.Timeout) + + return nil } -func (a *Apache) Check() bool { - return len(a.Collect()) > 0 +func (a *Apache) Check() error { + mx, err := a.collect() + if err != nil { + a.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (a *Apache) Charts() *module.Charts { diff --git a/modules/apache/apache_test.go b/modules/apache/apache_test.go index a507113f3..9b9ec5575 100644 --- a/modules/apache/apache_test.go +++ b/modules/apache/apache_test.go @@ -66,9 +66,9 @@ func TestApache_Init(t *testing.T) { apache.Config = test.config if test.wantFail { - assert.False(t, apache.Init()) + assert.Error(t, apache.Init()) } else { - assert.True(t, apache.Init()) + assert.NoError(t, apache.Init()) } }) } @@ -115,9 +115,9 @@ func TestApache_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, apache.Check()) + assert.Error(t, apache.Check()) } else { - assert.True(t, apache.Check()) + assert.NoError(t, apache.Check()) } }) } @@ -255,7 +255,7 @@ func caseMPMEventSimpleStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -268,7 +268,7 @@ func caseMPMEventExtendedStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -281,7 +281,7 @@ func caseMPMPreforkExtendedStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -294,7 +294,7 @@ func caseLighttpdResponse(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -307,7 +307,7 @@ func caseInvalidDataResponse(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -316,7 +316,7 @@ func caseConnectionRefused(t *testing.T) (*Apache, func()) { t.Helper() apache := New() apache.URL = "http://127.0.0.1:65001/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, func() {} } @@ -329,7 +329,7 @@ func case404(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } diff --git a/modules/apache/config_schema.json b/modules/apache/config_schema.json index 81ece2b67..186ff56c5 100644 --- a/modules/apache/config_schema.json +++ b/modules/apache/config_schema.json @@ -1,59 +1,83 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/apache job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginx job configuration schema.", + "type": "object", + "properties": { + "url": { + "default": "http://127.0.0.1/server-status?auto", + "title": "URL", + "description": "The URL of the Apache status page to monitor.", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "minimum": 1, + "type": "integer" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string" + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", "type": "string" + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy Username", + "description": "The username for proxy authentication (if required).", + "type": "string" + }, + "proxy_password": { + "title": "Proxy Password", + "description": "The password for proxy authentication (if required).", + "type": "string" + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "title": "Not Follow Redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS Certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS Key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + }, + "tls_skip_verify": { + "title": "Skip TLS Verify", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/apache/init.go b/modules/apache/init.go index 355999770..8c4699cc1 100644 --- a/modules/apache/init.go +++ b/modules/apache/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (a Apache) verifyConfig() error { +func (a *Apache) validateConfig() error { if a.URL == "" { return errors.New("url not set") } @@ -20,6 +20,6 @@ func (a Apache) verifyConfig() error { return nil } -func (a Apache) initHTTPClient() (*http.Client, error) { +func (a *Apache) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(a.Client) } diff --git a/modules/bind/bind.go b/modules/bind/bind.go index bcca0204e..db6cde11c 100644 --- a/modules/bind/bind.go +++ b/modules/bind/bind.go @@ -4,8 +4,8 @@ package bind import ( _ "embed" - "fmt" - "strings" + "errors" + "net/http" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -24,286 +24,111 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:8653/json/v1" - defaultHTTPTimeout = time.Second * 2 -) - -// New creates Bind with default values. func New() *Bind { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Bind{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8653/json/v1", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, - } - - return &Bind{ - Config: config, charts: &Charts{}, } } -type bindAPIClient interface { - serverStats() (*serverStats, error) -} - -// Config is the Bind module configuration. type Config struct { web.HTTP `yaml:",inline"` PermitView string `yaml:"permit_view"` } -// Bind Bind module. -type Bind struct { - module.Base - Config `yaml:",inline"` +type ( + Bind struct { + module.Base + Config `yaml:",inline"` - bindAPIClient - permitView matcher.Matcher - charts *Charts -} + charts *Charts -// Cleanup makes cleanup. -func (Bind) Cleanup() {} + permitView matcher.Matcher + + httpClient *http.Client + bindAPIClient + } -// Init makes initialization. -func (b *Bind) Init() bool { - if b.URL == "" { - b.Error("URL not set") - return false + bindAPIClient interface { + serverStats() (*serverStats, error) } +) + +func (b *Bind) Configuration() any { + return b.Config +} - client, err := web.NewHTTPClient(b.Client) +func (b *Bind) Init() error { + if err := b.validateConfig(); err != nil { + b.Errorf("config verification: %v", err) + return err + } + + pvm, err := b.initPermitViewMatcher() if err != nil { - b.Errorf("error on creating http client : %v", err) - return false + b.Error(err) + return err + } + if pvm != nil { + b.permitView = pvm } - switch { - case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ - b.bindAPIClient = newXML3Client(client, b.Request) - case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ - b.bindAPIClient = newJSONClient(client, b.Request) - default: - b.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) - return false + httpClient, err := web.NewHTTPClient(b.Client) + if err != nil { + b.Errorf("creating http client : %v", err) + return err } + b.httpClient = httpClient - if b.PermitView != "" { - m, err := matcher.NewSimplePatternsMatcher(b.PermitView) - if err != nil { - b.Errorf("error on creating permitView matcher : %v", err) - return false - } - b.permitView = matcher.WithCache(m) + bindClient, err := b.initBindApiClient(httpClient) + if err != nil { + b.Error(err) + return err } + b.bindAPIClient = bindClient - return true + return nil } -// Check makes check. -func (b *Bind) Check() bool { - return len(b.Collect()) > 0 +func (b *Bind) Check() error { + mx, err := b.collect() + if err != nil { + b.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } -// Charts creates Charts. -func (b Bind) Charts() *Charts { +func (b *Bind) Charts() *Charts { return b.charts } -// Collect collects metrics. func (b *Bind) Collect() map[string]int64 { - metrics := make(map[string]int64) + mx, err := b.collect() - s, err := b.serverStats() if err != nil { b.Error(err) return nil } - b.collectServerStats(metrics, s) - return metrics + return mx } -func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) { - var chart *Chart - - for k, v := range stats.NSStats { - var ( - algo = module.Incremental - dimName = k - chartID string - ) - switch { - default: - continue - case k == "RecursClients": - dimName = "clients" - chartID = keyRecursiveClients - algo = module.Absolute - case k == "Requestv4": - dimName = "IPv4" - chartID = keyReceivedRequests - case k == "Requestv6": - dimName = "IPv6" - chartID = keyReceivedRequests - case k == "QryFailure": - dimName = "failures" - chartID = keyQueryFailures - case k == "QryUDP": - dimName = "UDP" - chartID = keyProtocolsQueries - case k == "QryTCP": - dimName = "TCP" - chartID = keyProtocolsQueries - case k == "QrySuccess": - dimName = "queries" - chartID = keyQueriesSuccess - case strings.HasSuffix(k, "QryRej"): - chartID = keyQueryFailuresDetail - case strings.HasPrefix(k, "Qry"): - chartID = keyQueriesAnalysis - case strings.HasPrefix(k, "Update"): - chartID = keyReceivedUpdates - } - - if !b.charts.Has(chartID) { - _ = b.charts.Add(charts[chartID].Copy()) - } - - chart = b.charts.Get(chartID) - - if !chart.HasDim(k) { - _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo}) - chart.MarkNotCreated() - } - - delete(stats.NSStats, k) - metrics[k] = v - } - - for _, v := range []struct { - item map[string]int64 - chartID string - }{ - {item: stats.NSStats, chartID: keyNSStats}, - {item: stats.OpCodes, chartID: keyInOpCodes}, - {item: stats.QTypes, chartID: keyInQTypes}, - {item: stats.SockStats, chartID: keyInSockStats}, - } { - if len(v.item) == 0 { - continue - } - - if !b.charts.Has(v.chartID) { - _ = b.charts.Add(charts[v.chartID].Copy()) - } - - chart = b.charts.Get(v.chartID) - - for key, val := range v.item { - if !chart.HasDim(key) { - _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental}) - chart.MarkNotCreated() - } - - metrics[key] = val - } - } - - if !(b.permitView != nil && len(stats.Views) > 0) { - return - } - - for name, view := range stats.Views { - if !b.permitView.MatchString(name) { - continue - } - r := view.Resolver - - delete(r.Stats, "BucketSize") - - for key, val := range r.Stats { - var ( - algo = module.Incremental - dimName = key - chartKey string - ) - - switch { - default: - chartKey = keyResolverStats - case key == "NumFetch": - chartKey = keyResolverNumFetch - dimName = "queries" - algo = module.Absolute - case strings.HasPrefix(key, "QryRTT"): - // TODO: not ordered - chartKey = keyResolverRTT - } - - chartID := fmt.Sprintf(chartKey, name) - - if !b.charts.Has(chartID) { - chart = charts[chartKey].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - } - - chart = b.charts.Get(chartID) - dimID := fmt.Sprintf("%s_%s", name, key) - - if !chart.HasDim(dimID) { - _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo}) - chart.MarkNotCreated() - } - - metrics[dimID] = val - } - - if len(r.QTypes) > 0 { - chartID := fmt.Sprintf(keyResolverInQTypes, name) - - if !b.charts.Has(chartID) { - chart = charts[keyResolverInQTypes].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - } - - chart = b.charts.Get(chartID) - - for key, val := range r.QTypes { - dimID := fmt.Sprintf("%s_%s", name, key) - if !chart.HasDim(dimID) { - _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental}) - chart.MarkNotCreated() - } - metrics[dimID] = val - } - } - - if len(r.CacheStats) > 0 { - chartID := fmt.Sprintf(keyResolverCacheHits, name) - - if !b.charts.Has(chartID) { - chart = charts[keyResolverCacheHits].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - for _, dim := range chart.Dims { - dim.ID = fmt.Sprintf(dim.ID, name) - } - } - - metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"] - metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"] - } +func (b *Bind) Cleanup() { + if b.httpClient != nil { + b.httpClient.CloseIdleConnections() } } diff --git a/modules/bind/bind_test.go b/modules/bind/bind_test.go index 65ff36af0..21b6290b9 100644 --- a/modules/bind/bind_test.go +++ b/modules/bind/bind_test.go @@ -17,28 +17,18 @@ var ( xmlServerData, _ = os.ReadFile("testdata/query-server.xml") ) -func TestNew(t *testing.T) { - job := New() - assert.IsType(t, (*Bind)(nil), job) - assert.NotNil(t, job.charts) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestBind_Cleanup(t *testing.T) { New().Cleanup() } func TestBind_Init(t *testing.T) { // OK job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.bindAPIClient) //NG job = New() job.URL = "" - assert.False(t, job.Init()) - job.URL = defaultURL[:len(defaultURL)-1] - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestBind_Check(t *testing.T) { @@ -54,19 +44,21 @@ func TestBind_Check(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) } func TestBind_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/xml/v3" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } -func TestBind_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } +func TestBind_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} func TestBind_CollectJSON(t *testing.T) { ts := httptest.NewServer( @@ -82,8 +74,8 @@ func TestBind_CollectJSON(t *testing.T) { job.URL = ts.URL + "/json/v1" job.PermitView = "*" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "_default_Queryv4": 4503685324, @@ -259,8 +251,8 @@ func TestBind_CollectXML3(t *testing.T) { job.PermitView = "*" job.URL = ts.URL + "/xml/v3" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "_bind_CookieClientOk": 0, @@ -504,8 +496,8 @@ func TestBind_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestBind_404(t *testing.T) { @@ -514,6 +506,6 @@ func TestBind_404(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/bind/collect.go b/modules/bind/collect.go new file mode 100644 index 000000000..cd10634b0 --- /dev/null +++ b/modules/bind/collect.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (b *Bind) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + s, err := b.serverStats() + if err != nil { + return nil, err + } + b.collectServerStats(mx, s) + + return mx, nil +} + +func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) { + var chart *Chart + + for k, v := range stats.NSStats { + var ( + algo = module.Incremental + dimName = k + chartID string + ) + switch { + default: + continue + case k == "RecursClients": + dimName = "clients" + chartID = keyRecursiveClients + algo = module.Absolute + case k == "Requestv4": + dimName = "IPv4" + chartID = keyReceivedRequests + case k == "Requestv6": + dimName = "IPv6" + chartID = keyReceivedRequests + case k == "QryFailure": + dimName = "failures" + chartID = keyQueryFailures + case k == "QryUDP": + dimName = "UDP" + chartID = keyProtocolsQueries + case k == "QryTCP": + dimName = "TCP" + chartID = keyProtocolsQueries + case k == "QrySuccess": + dimName = "queries" + chartID = keyQueriesSuccess + case strings.HasSuffix(k, "QryRej"): + chartID = keyQueryFailuresDetail + case strings.HasPrefix(k, "Qry"): + chartID = keyQueriesAnalysis + case strings.HasPrefix(k, "Update"): + chartID = keyReceivedUpdates + } + + if !b.charts.Has(chartID) { + _ = b.charts.Add(charts[chartID].Copy()) + } + + chart = b.charts.Get(chartID) + + if !chart.HasDim(k) { + _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + delete(stats.NSStats, k) + metrics[k] = v + } + + for _, v := range []struct { + item map[string]int64 + chartID string + }{ + {item: stats.NSStats, chartID: keyNSStats}, + {item: stats.OpCodes, chartID: keyInOpCodes}, + {item: stats.QTypes, chartID: keyInQTypes}, + {item: stats.SockStats, chartID: keyInSockStats}, + } { + if len(v.item) == 0 { + continue + } + + if !b.charts.Has(v.chartID) { + _ = b.charts.Add(charts[v.chartID].Copy()) + } + + chart = b.charts.Get(v.chartID) + + for key, val := range v.item { + if !chart.HasDim(key) { + _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + + metrics[key] = val + } + } + + if !(b.permitView != nil && len(stats.Views) > 0) { + return + } + + for name, view := range stats.Views { + if !b.permitView.MatchString(name) { + continue + } + r := view.Resolver + + delete(r.Stats, "BucketSize") + + for key, val := range r.Stats { + var ( + algo = module.Incremental + dimName = key + chartKey string + ) + + switch { + default: + chartKey = keyResolverStats + case key == "NumFetch": + chartKey = keyResolverNumFetch + dimName = "queries" + algo = module.Absolute + case strings.HasPrefix(key, "QryRTT"): + // TODO: not ordered + chartKey = keyResolverRTT + } + + chartID := fmt.Sprintf(chartKey, name) + + if !b.charts.Has(chartID) { + chart = charts[chartKey].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + dimID := fmt.Sprintf("%s_%s", name, key) + + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + metrics[dimID] = val + } + + if len(r.QTypes) > 0 { + chartID := fmt.Sprintf(keyResolverInQTypes, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverInQTypes].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + + for key, val := range r.QTypes { + dimID := fmt.Sprintf("%s_%s", name, key) + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + metrics[dimID] = val + } + } + + if len(r.CacheStats) > 0 { + chartID := fmt.Sprintf(keyResolverCacheHits, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverCacheHits].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"] + metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"] + } + } +} diff --git a/modules/bind/config_schema.json b/modules/bind/config_schema.json index 042f47a1a..6439db1bf 100644 --- a/modules/bind/config_schema.json +++ b/modules/bind/config_schema.json @@ -1,21 +1,24 @@ { - "$id": "https://example.com/person.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Bind collector job configuration", - "type": "object", - "properties": { - "firstName": { - "type": "string", - "description": "The person's first name." - }, - "lastName": { - "type": "string", - "description": "The person's last name." - }, - "age": { - "description": "Age in years which must be equal to or greater than zero.", - "type": "integer", - "minimum": 0 + "jsonSchema": { + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Bind collector job configuration", + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The person's first name." + }, + "lastName": { + "type": "string", + "description": "The person's last name." + }, + "age": { + "description": "Age in years which must be equal to or greater than zero.", + "type": "integer", + "minimum": 0 + } } - } + }, + "uiSchema": {} } diff --git a/modules/bind/init.go b/modules/bind/init.go new file mode 100644 index 000000000..daffe29bd --- /dev/null +++ b/modules/bind/init.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "errors" + "fmt" + "net/http" + "strings" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (b *Bind) validateConfig() error { + if b.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (b *Bind) initPermitViewMatcher() (matcher.Matcher, error) { + if b.PermitView == "" { + return nil, nil + } + return matcher.NewSimplePatternsMatcher(b.PermitView) +} + +func (b *Bind) initBindApiClient(httpClient *http.Client) (bindAPIClient, error) { + switch { + case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ + return newXML3Client(httpClient, b.Request), nil + case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ + return newJSONClient(httpClient, b.Request), nil + default: + return nil, fmt.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) + } +} diff --git a/modules/cassandra/cassandra.go b/modules/cassandra/cassandra.go index 1e745fbd8..538bce12a 100644 --- a/modules/cassandra/cassandra.go +++ b/modules/cassandra/cassandra.go @@ -4,6 +4,7 @@ package cassandra import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,7 +33,7 @@ func New() *Cassandra { URL: "http://127.0.0.1:7072/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -58,24 +59,37 @@ type Cassandra struct { mx *cassandraMetrics } -func (c *Cassandra) Init() bool { +func (c *Cassandra) Configuration() any { + return c.Config +} + +func (c *Cassandra) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("error on validating config: %v", err) - return false + return err } prom, err := c.initPrometheusClient() if err != nil { c.Errorf("error on init prometheus client: %v", err) - return false + return err } c.prom = prom - return true + return nil } -func (c *Cassandra) Check() bool { - return len(c.Collect()) > 0 +func (c *Cassandra) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Cassandra) Charts() *module.Charts { @@ -94,4 +108,8 @@ func (c *Cassandra) Collect() map[string]int64 { return mx } -func (c *Cassandra) Cleanup() {} +func (c *Cassandra) Cleanup() { + if c.prom != nil && c.prom.HTTPClient() != nil { + c.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/cassandra/cassandra_test.go b/modules/cassandra/cassandra_test.go index 4425de46e..7ef95b292 100644 --- a/modules/cassandra/cassandra_test.go +++ b/modules/cassandra/cassandra_test.go @@ -55,9 +55,9 @@ func TestCassandra_Init(t *testing.T) { c.Config = test.config if test.wantFail { - assert.False(t, c.Init()) + assert.Error(t, c.Init()) } else { - assert.True(t, c.Init()) + assert.NoError(t, c.Init()) } }) } @@ -90,12 +90,12 @@ func TestCassandra_Check(t *testing.T) { c, cleanup := test.prepare() defer cleanup() - require.True(t, c.Init()) + require.NoError(t, c.Init()) if test.wantFail { - assert.False(t, c.Check()) + assert.Error(t, c.Check()) } else { - assert.True(t, c.Check()) + assert.NoError(t, c.Check()) } }) } @@ -239,7 +239,7 @@ func TestCassandra_Collect(t *testing.T) { c, cleanup := test.prepare() defer cleanup() - require.True(t, c.Init()) + require.NoError(t, c.Init()) mx := c.Collect() diff --git a/modules/cassandra/config_schema.json b/modules/cassandra/config_schema.json index ff22764ec..1922b638f 100644 --- a/modules/cassandra/config_schema.json +++ b/modules/cassandra/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/cassandra job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/cassandra job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/chrony/chrony.go b/modules/chrony/chrony.go index 9f12325b9..0198cd1b9 100644 --- a/modules/chrony/chrony.go +++ b/modules/chrony/chrony.go @@ -4,6 +4,7 @@ package chrony import ( _ "embed" + "errors" "time" "github.com/facebook/time/ntp/chrony" @@ -25,7 +26,7 @@ func New() *Chrony { return &Chrony{ Config: Config{ Address: "127.0.0.1:323", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, charts: charts.Copy(), newClient: newChronyClient, @@ -54,17 +55,30 @@ type ( } ) -func (c *Chrony) Init() bool { +func (c *Chrony) Configuration() any { + return c.Config +} + +func (c *Chrony) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("config validation: %v", err) - return false + return err } - return true + return nil } -func (c *Chrony) Check() bool { - return len(c.Collect()) > 0 +func (c *Chrony) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Chrony) Charts() *module.Charts { diff --git a/modules/chrony/chrony_test.go b/modules/chrony/chrony_test.go index a6568b234..95558f0ae 100644 --- a/modules/chrony/chrony_test.go +++ b/modules/chrony/chrony_test.go @@ -35,9 +35,9 @@ func TestChrony_Init(t *testing.T) { c.Config = test.config if test.wantFail { - assert.False(t, c.Init()) + assert.Error(t, c.Init()) } else { - assert.True(t, c.Init()) + assert.NoError(t, c.Init()) } }) } @@ -53,7 +53,7 @@ func TestChrony_Check(t *testing.T) { prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) }, }, "tracking: success, activity: fail": { - wantFail: false, + wantFail: true, prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) }, }, "tracking: fail, activity: success": { @@ -74,12 +74,12 @@ func TestChrony_Check(t *testing.T) { t.Run(name, func(t *testing.T) { c := test.prepare() - require.True(t, c.Init()) + require.NoError(t, c.Init()) if test.wantFail { - assert.False(t, c.Check()) + assert.Error(t, c.Check()) } else { - assert.True(t, c.Check()) + assert.NoError(t, c.Check()) } }) } @@ -100,15 +100,15 @@ func TestChrony_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(c *Chrony) { c.Init() }, + prepare: func(c *Chrony) { _ = c.Init() }, }, "after Check": { wantClose: true, - prepare: func(c *Chrony) { c.Init(); c.Check() }, + prepare: func(c *Chrony) { _ = c.Init(); _ = c.Check() }, }, "after Collect": { wantClose: true, - prepare: func(c *Chrony) { c.Init(); c.Collect() }, + prepare: func(c *Chrony) { _ = c.Init(); _ = c.Collect() }, }, } @@ -197,7 +197,7 @@ func TestChrony_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { c := test.prepare() - require.True(t, c.Init()) + require.NoError(t, c.Init()) _ = c.Check() collected := c.Collect() diff --git a/modules/chrony/client.go b/modules/chrony/client.go index caa219f3b..e850ff239 100644 --- a/modules/chrony/client.go +++ b/modules/chrony/client.go @@ -10,7 +10,7 @@ import ( ) func newChronyClient(c Config) (chronyClient, error) { - conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration()) if err != nil { return nil, err } diff --git a/modules/chrony/config_schema.json b/modules/chrony/config_schema.json index 105adaa79..863caa1fa 100644 --- a/modules/chrony/config_schema.json +++ b/modules/chrony/config_schema.json @@ -1,23 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/chrony job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/chrony job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/chrony/init.go b/modules/chrony/init.go index 70c8916f2..828112c9d 100644 --- a/modules/chrony/init.go +++ b/modules/chrony/init.go @@ -6,7 +6,7 @@ import ( "errors" ) -func (c Chrony) validateConfig() error { +func (c *Chrony) validateConfig() error { if c.Address == "" { return errors.New("empty 'address'") } diff --git a/modules/cockroachdb/cockroachdb.go b/modules/cockroachdb/cockroachdb.go index 0a862f97e..0b9a13689 100644 --- a/modules/cockroachdb/cockroachdb.go +++ b/modules/cockroachdb/cockroachdb.go @@ -13,91 +13,88 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -// DefaultMetricsSampleInterval hard coded to 10 -// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58 -const cockroachDBSamplingInterval = 10 - //go:embed "config_schema.json" var configSchema string +// DefaultMetricsSampleInterval hard coded to 10 +// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58 +const dbSamplingInterval = 10 + func init() { module.Register("cockroachdb", module.Creator{ JobConfigSchema: configSchema, Defaults: module.Defaults{ - UpdateEvery: cockroachDBSamplingInterval, + UpdateEvery: dbSamplingInterval, }, Create: func() module.Module { return New() }, }) } func New() *CockroachDB { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8080/_status/vars", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &CockroachDB{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/_status/vars", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &CockroachDB{ - Config: config, charts: charts.Copy(), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - UpdateEvery int `yaml:"update_every"` - } - - CockroachDB struct { - module.Base - Config `yaml:",inline"` +type Config struct { + web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every"` +} - prom prometheus.Prometheus - charts *Charts - } -) +type CockroachDB struct { + module.Base + Config `yaml:",inline"` -func (c *CockroachDB) validateConfig() error { - if c.URL == "" { - return errors.New("URL is not set") - } - return nil + prom prometheus.Prometheus + charts *Charts } -func (c *CockroachDB) initClient() error { - client, err := web.NewHTTPClient(c.Client) - if err != nil { - return err - } - - c.prom = prometheus.New(client, c.Request) - return nil +func (c *CockroachDB) Configuration() any { + return c.Config } -func (c *CockroachDB) Init() bool { +func (c *CockroachDB) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("error on validating config: %v", err) - return false + return err } - if err := c.initClient(); err != nil { - c.Errorf("error on initializing client: %v", err) - return false + + prom, err := c.initPrometheusClient() + if err != nil { + c.Error(err) + return err } - if c.UpdateEvery < cockroachDBSamplingInterval { + c.prom = prom + + if c.UpdateEvery < dbSamplingInterval { c.Warningf("'update_every'(%d) is lower then CockroachDB default sampling interval (%d)", - c.UpdateEvery, cockroachDBSamplingInterval) + c.UpdateEvery, dbSamplingInterval) } - return true + + return nil } -func (c *CockroachDB) Check() bool { - return len(c.Collect()) > 0 +func (c *CockroachDB) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *CockroachDB) Charts() *Charts { @@ -116,4 +113,8 @@ func (c *CockroachDB) Collect() map[string]int64 { return mx } -func (CockroachDB) Cleanup() {} +func (c *CockroachDB) Cleanup() { + if c.prom != nil && c.prom.HTTPClient() != nil { + c.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/cockroachdb/cockroachdb_test.go b/modules/cockroachdb/cockroachdb_test.go index 88c307716..f99f60b9b 100644 --- a/modules/cockroachdb/cockroachdb_test.go +++ b/modules/cockroachdb/cockroachdb_test.go @@ -30,36 +30,36 @@ func TestNew(t *testing.T) { func TestCockroachDB_Init(t *testing.T) { cdb := prepareCockroachDB() - assert.True(t, cdb.Init()) + assert.NoError(t, cdb.Init()) } func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) { cdb := prepareCockroachDB() cdb.URL = "" - assert.False(t, cdb.Init()) + assert.Error(t, cdb.Init()) } func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { cdb := prepareCockroachDB() cdb.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, cdb.Init()) + assert.Error(t, cdb.Init()) } func TestCockroachDB_Check(t *testing.T) { cdb, srv := prepareClientServer(t) defer srv.Close() - assert.True(t, cdb.Check()) + assert.NoError(t, cdb.Check()) } func TestCockroachDB_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { cdb := New() cdb.URL = "http://127.0.0.1:38001/metrics" - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) - assert.False(t, cdb.Check()) + assert.Error(t, cdb.Check()) } func TestCockroachDB_Charts(t *testing.T) { @@ -221,7 +221,7 @@ func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) { func TestCockroachDB_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { cdb := prepareCockroachDB() - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) assert.Nil(t, cdb.Collect()) } @@ -272,7 +272,7 @@ func prepareClientServer(t *testing.T) (*CockroachDB, *httptest.Server) { cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -286,7 +286,7 @@ func prepareClientServerNotCockroachDBMetricResponse(t *testing.T) (*CockroachDB cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -300,7 +300,7 @@ func prepareClientServerInvalidDataResponse(t *testing.T) (*CockroachDB, *httpte cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -314,6 +314,6 @@ func prepareClientServerResponse404(t *testing.T) (*CockroachDB, *httptest.Serve cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } diff --git a/modules/cockroachdb/config_schema.json b/modules/cockroachdb/config_schema.json index e732b99f6..0b9940df8 100644 --- a/modules/cockroachdb/config_schema.json +++ b/modules/cockroachdb/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/cockroachdb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/cockroachdb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/cockroachdb/init.go b/modules/cockroachdb/init.go new file mode 100644 index 000000000..07986a199 --- /dev/null +++ b/modules/cockroachdb/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (c *CockroachDB) validateConfig() error { + if c.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (c *CockroachDB) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(c.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, c.Request), nil +} diff --git a/modules/consul/config_schema.json b/modules/consul/config_schema.json index a71723696..97a527f45 100644 --- a/modules/consul/config_schema.json +++ b/modules/consul/config_schema.json @@ -1,62 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/consul job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "acl_token": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/consul job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "acl_token": { + "type": "string" + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/consul/consul.go b/modules/consul/consul.go index ebd10984a..8bf3b8c37 100644 --- a/modules/consul/consul.go +++ b/modules/consul/consul.go @@ -4,6 +4,7 @@ package consul import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -33,7 +34,7 @@ func New() *Consul { Config: Config{ HTTP: web.HTTP{ Request: web.Request{URL: "http://127.0.0.1:8500"}, - Client: web.Client{Timeout: web.Duration{Duration: time.Second * 2}}, + Client: web.Client{Timeout: web.Duration(time.Second * 2)}, }, }, charts: &module.Charts{}, @@ -69,31 +70,44 @@ type Consul struct { checks map[string]bool } -func (c *Consul) Init() bool { +func (c *Consul) Configuration() any { + return c.Config +} + +func (c *Consul) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("config validation: %v", err) - return false + return err } httpClient, err := c.initHTTPClient() if err != nil { c.Errorf("init HTTP client: %v", err) - return false + return err } c.httpClient = httpClient prom, err := c.initPrometheusClient(httpClient) if err != nil { c.Errorf("init Prometheus client: %v", err) - return false + return err } c.prom = prom - return true + return nil } -func (c *Consul) Check() bool { - return len(c.Collect()) > 0 +func (c *Consul) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Consul) Charts() *module.Charts { diff --git a/modules/consul/consul_test.go b/modules/consul/consul_test.go index b8f990893..7d68edd91 100644 --- a/modules/consul/consul_test.go +++ b/modules/consul/consul_test.go @@ -78,9 +78,9 @@ func TestConsul_Init(t *testing.T) { consul.Config = test.config if test.wantFail { - assert.False(t, consul.Init()) + assert.Error(t, consul.Init()) } else { - assert.True(t, consul.Init()) + assert.NoError(t, consul.Init()) } }) } @@ -131,9 +131,9 @@ func TestConsul_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, consul.Check()) + assert.Error(t, consul.Check()) } else { - assert.True(t, consul.Check()) + assert.NoError(t, consul.Check()) } }) } @@ -561,7 +561,7 @@ func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -589,7 +589,7 @@ func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -617,7 +617,7 @@ func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -643,7 +643,7 @@ func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func()) consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -667,7 +667,7 @@ func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -682,7 +682,7 @@ func caseInvalidDataResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -691,7 +691,7 @@ func caseConnectionRefused(t *testing.T) (*Consul, func()) { t.Helper() consul := New() consul.URL = "http://127.0.0.1:65535/" - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, func() {} } @@ -705,7 +705,7 @@ func case404(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } diff --git a/modules/coredns/config_schema.json b/modules/coredns/config_schema.json index 70b9ef001..8785ace28 100644 --- a/modules/coredns/config_schema.json +++ b/modules/coredns/config_schema.json @@ -1,93 +1,96 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/coredns job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "per_server_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/coredns job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "per_server_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } - } - }, - "per_zone_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + }, + "per_zone_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/coredns/coredns.go b/modules/coredns/coredns.go index 18c92caf3..a8c081ec5 100644 --- a/modules/coredns/coredns.go +++ b/modules/coredns/coredns.go @@ -4,6 +4,7 @@ package coredns import ( _ "embed" + "errors" "time" "github.com/blang/semver/v4" @@ -14,11 +15,6 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -const ( - defaultURL = "http://127.0.0.1:9153/metrics" - defaultHTTPTimeout = time.Second * 2 -) - //go:embed "config_schema.json" var configSchema string @@ -31,18 +27,17 @@ func init() { // New creates CoreDNS with default values. func New() *CoreDNS { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &CoreDNS{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9153/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, - } - return &CoreDNS{ - Config: config, charts: summaryCharts.Copy(), collectedServers: make(map[string]bool), collectedZones: make(map[string]bool), @@ -71,48 +66,57 @@ type CoreDNS struct { metricNames requestMetricsNames } -// Cleanup makes cleanup. -func (CoreDNS) Cleanup() {} +func (cd *CoreDNS) Configuration() any { + return cd.Config +} // Init makes initialization. -func (cd *CoreDNS) Init() bool { - if cd.URL == "" { - cd.Error("URL not set") - return false +func (cd *CoreDNS) Init() error { + if err := cd.validateConfig(); err != nil { + cd.Errorf("config validation: %v", err) + return err } - if !cd.PerServerStats.Empty() { - m, err := cd.PerServerStats.Parse() - if err != nil { - cd.Errorf("error on creating 'per_server_stats' matcher : %v", err) - return false - } - cd.perServerMatcher = matcher.WithCache(m) + sm, err := cd.initPerServerMatcher() + if err != nil { + cd.Error(err) + return err } - - if !cd.PerZoneStats.Empty() { - m, err := cd.PerZoneStats.Parse() - if err != nil { - cd.Errorf("error on creating 'per_zone_stats' matcher : %v", err) - return false - } - cd.perZoneMatcher = matcher.WithCache(m) + if sm != nil { + cd.perServerMatcher = sm } - client, err := web.NewHTTPClient(cd.Client) + zm, err := cd.initPerZoneMatcher() if err != nil { - cd.Errorf("error on creating http client : %v", err) - return false + cd.Error(err) + return err + } + if zm != nil { + cd.perZoneMatcher = zm } - cd.prom = prometheus.New(client, cd.Request) + prom, err := cd.initPrometheusClient() + if err != nil { + cd.Error(err) + return err + } + cd.prom = prom - return true + return nil } // Check makes check. -func (cd *CoreDNS) Check() bool { - return len(cd.Collect()) > 0 +func (cd *CoreDNS) Check() error { + mx, err := cd.collect() + if err != nil { + cd.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } // Charts creates Charts. @@ -131,3 +135,10 @@ func (cd *CoreDNS) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (cd *CoreDNS) Cleanup() { + if cd.prom != nil && cd.prom.HTTPClient() != nil { + cd.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/coredns/coredns_test.go b/modules/coredns/coredns_test.go index a6b77976a..3056b3b98 100644 --- a/modules/coredns/coredns_test.go +++ b/modules/coredns/coredns_test.go @@ -20,24 +20,18 @@ var ( testNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt") ) -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*CoreDNS)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestCoreDNS_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } func TestCoreDNS_Cleanup(t *testing.T) { New().Cleanup() } -func TestCoreDNS_Init(t *testing.T) { assert.True(t, New().Init()) } +func TestCoreDNS_Init(t *testing.T) { + assert.NoError(t, New().Init()) +} func TestCoreDNS_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestCoreDNS_Check(t *testing.T) { @@ -60,8 +54,8 @@ func TestCoreDNS_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) }) } } @@ -69,8 +63,8 @@ func TestCoreDNS_Check(t *testing.T) { func TestCoreDNS_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_Collect(t *testing.T) { @@ -95,8 +89,8 @@ func TestCoreDNS_Collect(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "coredns.io._request_per_ip_family_v4": 19, @@ -444,8 +438,8 @@ func TestCoreDNS_CollectNoLoad(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "no_matching_zone_dropped_total": 0, @@ -513,8 +507,8 @@ func TestCoreDNS_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_404(t *testing.T) { @@ -527,8 +521,8 @@ func TestCoreDNS_404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_CollectNoVersion(t *testing.T) { @@ -543,8 +537,8 @@ func TestCoreDNS_CollectNoVersion(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) assert.Nil(t, job.Collect()) } diff --git a/modules/coredns/init.go b/modules/coredns/init.go new file mode 100644 index 000000000..79d05926d --- /dev/null +++ b/modules/coredns/init.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (cd *CoreDNS) validateConfig() error { + if cd.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (cd *CoreDNS) initPerServerMatcher() (matcher.Matcher, error) { + if cd.PerServerStats.Empty() { + return nil, nil + } + return cd.PerServerStats.Parse() +} + +func (cd *CoreDNS) initPerZoneMatcher() (matcher.Matcher, error) { + if cd.PerZoneStats.Empty() { + return nil, nil + } + return cd.PerZoneStats.Parse() +} + +func (cd *CoreDNS) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(cd.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, cd.Request), nil +} diff --git a/modules/couchbase/config_schema.json b/modules/couchbase/config_schema.json index 307a1261b..a823f5414 100644 --- a/modules/couchbase/config_schema.json +++ b/modules/couchbase/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/couchbase job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/couchbase job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/couchbase/couchbase.go b/modules/couchbase/couchbase.go index b92ec2d76..a6b29f946 100644 --- a/modules/couchbase/couchbase.go +++ b/modules/couchbase/couchbase.go @@ -4,6 +4,7 @@ package couchbase import ( _ "embed" + "errors" "net/http" "time" @@ -32,7 +33,7 @@ func New() *Couchbase { URL: "http://127.0.0.1:8091", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -40,19 +41,22 @@ func New() *Couchbase { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } - Couchbase struct { - module.Base - Config `yaml:",inline"` +type Config struct { + web.HTTP `yaml:",inline"` +} - httpClient *http.Client - charts *module.Charts - collectedBuckets map[string]bool - } -) +type Couchbase struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts + collectedBuckets map[string]bool +} + +func (cb *Couchbase) Configuration() any { + return cb.Config +} func (cb *Couchbase) Cleanup() { if cb.httpClient == nil { @@ -61,32 +65,41 @@ func (cb *Couchbase) Cleanup() { cb.httpClient.CloseIdleConnections() } -func (cb *Couchbase) Init() bool { +func (cb *Couchbase) Init() error { err := cb.validateConfig() if err != nil { cb.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := cb.initHTTPClient() if err != nil { cb.Errorf("init HTTP client: %v", err) - return false + return err } cb.httpClient = httpClient charts, err := cb.initCharts() if err != nil { cb.Errorf("init charts: %v", err) - return false + return err } - cb.charts = charts - return true + + return nil } -func (cb *Couchbase) Check() bool { - return len(cb.Collect()) > 0 +func (cb *Couchbase) Check() error { + mx, err := cb.collect() + if err != nil { + cb.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (cb *Couchbase) Charts() *Charts { diff --git a/modules/couchbase/couchbase_test.go b/modules/couchbase/couchbase_test.go index da0fa4e66..565b896ff 100644 --- a/modules/couchbase/couchbase_test.go +++ b/modules/couchbase/couchbase_test.go @@ -67,9 +67,9 @@ func TestCouchbase_Init(t *testing.T) { cb.Config = test.config if test.wantFail { - assert.False(t, cb.Init()) + assert.Error(t, cb.Init()) } else { - assert.True(t, cb.Init()) + assert.NoError(t, cb.Init()) } }) } @@ -103,9 +103,9 @@ func TestCouchbase_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, cb.Check()) + assert.Error(t, cb.Check()) } else { - assert.True(t, cb.Check()) + assert.NoError(t, cb.Check()) } }) } @@ -178,7 +178,7 @@ func prepareCouchbaseV660(t *testing.T) (cb *Couchbase, cleanup func()) { cb = New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -191,7 +191,7 @@ func prepareCouchbaseInvalidData(t *testing.T) (*Couchbase, func()) { })) cb := New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -204,7 +204,7 @@ func prepareCouchbase404(t *testing.T) (*Couchbase, func()) { })) cb := New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -213,7 +213,7 @@ func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) { t.Helper() cb := New() cb.URL = "http://127.0.0.1:38001" - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, func() {} } diff --git a/modules/couchbase/init.go b/modules/couchbase/init.go index c274ee572..abb330717 100644 --- a/modules/couchbase/init.go +++ b/modules/couchbase/init.go @@ -24,11 +24,11 @@ func (cb *Couchbase) initCharts() (*Charts, error) { return bucketCharts.Copy(), nil } -func (cb Couchbase) initHTTPClient() (*http.Client, error) { +func (cb *Couchbase) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(cb.Client) } -func (cb Couchbase) validateConfig() error { +func (cb *Couchbase) validateConfig() error { if cb.URL == "" { return errors.New("URL not set") } diff --git a/modules/couchdb/collect.go b/modules/couchdb/collect.go index 9fd041800..27dd33549 100644 --- a/modules/couchdb/collect.go +++ b/modules/couchdb/collect.go @@ -42,7 +42,7 @@ func (cdb *CouchDB) collect() (map[string]int64, error) { return collected, nil } -func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { if !ms.hasNodeStats() { return } @@ -56,7 +56,7 @@ func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { } } -func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { if !ms.hasNodeSystem() { return } @@ -68,7 +68,7 @@ func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { collected["peak_msg_queue"] = findMaxMQSize(ms.NodeSystem.MessageQueues) } -func (CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) { collected["active_tasks_indexer"] = 0 collected["active_tasks_database_compaction"] = 0 collected["active_tasks_replication"] = 0 diff --git a/modules/couchdb/config_schema.json b/modules/couchdb/config_schema.json index e3a67e322..7cc1ce9e2 100644 --- a/modules/couchdb/config_schema.json +++ b/modules/couchdb/config_schema.json @@ -1,65 +1,68 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/couchdb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "node": { - "type": "string" - }, - "databases": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/couchdb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "node": { + "type": "string" + }, + "databases": { "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/couchdb/couchdb.go b/modules/couchdb/couchdb.go index 3342b7b7f..5fbdc121e 100644 --- a/modules/couchdb/couchdb.go +++ b/modules/couchdb/couchdb.go @@ -4,6 +4,7 @@ package couchdb import ( _ "embed" + "errors" "net/http" "strings" "time" @@ -33,7 +34,7 @@ func New() *CouchDB { URL: "http://127.0.0.1:5984", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, Node: "_local", @@ -41,23 +42,25 @@ func New() *CouchDB { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - Node string `yaml:"node"` - Databases string `yaml:"databases"` - } +type Config struct { + web.HTTP `yaml:",inline"` + Node string `yaml:"node"` + Databases string `yaml:"databases"` +} - CouchDB struct { - module.Base - Config `yaml:",inline"` +type CouchDB struct { + module.Base + Config `yaml:",inline"` - httpClient *http.Client - charts *module.Charts + httpClient *http.Client + charts *module.Charts - databases []string - } -) + databases []string +} + +func (cdb *CouchDB) Configuration() any { + return cdb.Config +} func (cdb *CouchDB) Cleanup() { if cdb.httpClient == nil { @@ -66,11 +69,11 @@ func (cdb *CouchDB) Cleanup() { cdb.httpClient.CloseIdleConnections() } -func (cdb *CouchDB) Init() bool { +func (cdb *CouchDB) Init() error { err := cdb.validateConfig() if err != nil { cdb.Errorf("check configuration: %v", err) - return false + return err } cdb.databases = strings.Fields(cdb.Config.Databases) @@ -78,26 +81,37 @@ func (cdb *CouchDB) Init() bool { httpClient, err := cdb.initHTTPClient() if err != nil { cdb.Errorf("init HTTP client: %v", err) - return false + return err } cdb.httpClient = httpClient charts, err := cdb.initCharts() if err != nil { cdb.Errorf("init charts: %v", err) - return false + return err } cdb.charts = charts - return true + return nil } -func (cdb *CouchDB) Check() bool { +func (cdb *CouchDB) Check() error { if err := cdb.pingCouchDB(); err != nil { cdb.Error(err) - return false + return err + } + + mx, err := cdb.collect() + if err != nil { + cdb.Error(err) + return err } - return len(cdb.Collect()) > 0 + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil } func (cdb *CouchDB) Charts() *Charts { diff --git a/modules/couchdb/couchdb_test.go b/modules/couchdb/couchdb_test.go index 29b5b64af..d61b33e41 100644 --- a/modules/couchdb/couchdb_test.go +++ b/modules/couchdb/couchdb_test.go @@ -79,9 +79,9 @@ func TestCouchDB_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) } }) @@ -105,9 +105,9 @@ func TestCouchDB_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, cdb.Check()) + assert.Error(t, cdb.Check()) } else { - assert.True(t, cdb.Check()) + assert.NoError(t, cdb.Check()) } }) } @@ -387,7 +387,7 @@ func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, clea srv := prepareCouchDBEndpoint() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -404,7 +404,7 @@ func prepareCouchDBInvalidData(t *testing.T) (*CouchDB, func()) { })) cdb := New() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -417,7 +417,7 @@ func prepareCouchDB404(t *testing.T) (*CouchDB, func()) { })) cdb := New() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -426,7 +426,7 @@ func prepareCouchDBConnectionRefused(t *testing.T) (*CouchDB, func()) { t.Helper() cdb := New() cdb.URL = "http://127.0.0.1:38001" - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, func() {} } diff --git a/modules/dnsdist/config_schema.json b/modules/dnsdist/config_schema.json index 880190ce2..6f6334d90 100644 --- a/modules/dnsdist/config_schema.json +++ b/modules/dnsdist/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsdist job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsdist job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/dnsdist/dnsdist.go b/modules/dnsdist/dnsdist.go index 0af242534..6fbae240c 100644 --- a/modules/dnsdist/dnsdist.go +++ b/modules/dnsdist/dnsdist.go @@ -4,6 +4,7 @@ package dnsdist import ( _ "embed" + "errors" "net/http" "time" @@ -24,18 +25,6 @@ func init() { }) } -type Config struct { - web.HTTP `yaml:",inline"` -} - -type DNSdist struct { - module.Base - Config `yaml:",inline"` - - httpClient *http.Client - charts *module.Charts -} - func New() *DNSdist { return &DNSdist{ Config: Config{ @@ -44,39 +33,64 @@ func New() *DNSdist { URL: "http://127.0.0.1:8083", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, } } -func (d *DNSdist) Init() bool { +type Config struct { + web.HTTP `yaml:",inline"` +} + +type DNSdist struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (d *DNSdist) Configuration() any { + return d.Config +} + +func (d *DNSdist) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } client, err := d.initHTTPClient() if err != nil { d.Errorf("init HTTP client: %v", err) - return false + return err } d.httpClient = client cs, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = cs - return true + return nil } -func (d *DNSdist) Check() bool { - return len(d.Collect()) > 0 +func (d *DNSdist) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *DNSdist) Charts() *module.Charts { @@ -100,6 +114,5 @@ func (d *DNSdist) Cleanup() { if d.httpClient == nil { return } - d.httpClient.CloseIdleConnections() } diff --git a/modules/dnsdist/dnsdist_test.go b/modules/dnsdist/dnsdist_test.go index 851d99016..3a3265de2 100644 --- a/modules/dnsdist/dnsdist_test.go +++ b/modules/dnsdist/dnsdist_test.go @@ -68,9 +68,9 @@ func Test_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -78,7 +78,7 @@ func Test_Init(t *testing.T) { func Test_Charts(t *testing.T) { dist := New() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) assert.NotNil(t, dist.Charts()) } @@ -113,12 +113,12 @@ func Test_Check(t *testing.T) { t.Run(name, func(t *testing.T) { dist, cleanup := test.prepare() defer cleanup() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) if test.wantFail { - assert.False(t, dist.Check()) + assert.Error(t, dist.Check()) } else { - assert.True(t, dist.Check()) + assert.NoError(t, dist.Check()) } }) } @@ -181,7 +181,7 @@ func Test_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { dist, cleanup := test.prepare() defer cleanup() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) collected := dist.Collect() diff --git a/modules/dnsdist/init.go b/modules/dnsdist/init.go index d58891681..41c92edc6 100644 --- a/modules/dnsdist/init.go +++ b/modules/dnsdist/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (d DNSdist) validateConfig() error { +func (d *DNSdist) validateConfig() error { if d.URL == "" { return errors.New("URL not set") } @@ -22,10 +22,10 @@ func (d DNSdist) validateConfig() error { return nil } -func (d DNSdist) initHTTPClient() (*http.Client, error) { +func (d *DNSdist) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(d.Client) } -func (d DNSdist) initCharts() (*module.Charts, error) { +func (d *DNSdist) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/dnsmasq/config_schema.json b/modules/dnsmasq/config_schema.json index d08819917..abfa95019 100644 --- a/modules/dnsmasq/config_schema.json +++ b/modules/dnsmasq/config_schema.json @@ -1,26 +1,29 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsmasq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsmasq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "protocol": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "protocol": { - "type": "string" - }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/dnsmasq/dnsmasq.go b/modules/dnsmasq/dnsmasq.go index 33e252b09..2e1a814a0 100644 --- a/modules/dnsmasq/dnsmasq.go +++ b/modules/dnsmasq/dnsmasq.go @@ -4,6 +4,7 @@ package dnsmasq import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func New() *Dnsmasq { Config: Config{ Protocol: "udp", Address: "127.0.0.1:53", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, newDNSClient: func(network string, timeout time.Duration) dnsClient { @@ -61,32 +62,45 @@ type ( } ) -func (d *Dnsmasq) Init() bool { +func (d *Dnsmasq) Configuration() any { + return d.Config +} + +func (d *Dnsmasq) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } client, err := d.initDNSClient() if err != nil { d.Errorf("init DNS client: %v", err) - return false + return err } d.dnsClient = client charts, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = charts - return true + return nil } -func (d *Dnsmasq) Check() bool { - return len(d.Collect()) > 0 +func (d *Dnsmasq) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *Dnsmasq) Charts() *module.Charts { @@ -105,4 +119,4 @@ func (d *Dnsmasq) Collect() map[string]int64 { return ms } -func (Dnsmasq) Cleanup() {} +func (d *Dnsmasq) Cleanup() {} diff --git a/modules/dnsmasq/dnsmasq_test.go b/modules/dnsmasq/dnsmasq_test.go index b4f0bb555..4484388cb 100644 --- a/modules/dnsmasq/dnsmasq_test.go +++ b/modules/dnsmasq/dnsmasq_test.go @@ -54,9 +54,9 @@ func TestDnsmasq_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -83,12 +83,12 @@ func TestDnsmasq_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dnsmasq := test.prepare() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) if test.wantFail { - assert.False(t, dnsmasq.Check()) + assert.Error(t, dnsmasq.Check()) } else { - assert.True(t, dnsmasq.Check()) + assert.NoError(t, dnsmasq.Check()) } }) } @@ -96,7 +96,7 @@ func TestDnsmasq_Check(t *testing.T) { func TestDnsmasq_Charts(t *testing.T) { dnsmasq := New() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) assert.NotNil(t, dnsmasq.Charts()) } @@ -133,7 +133,7 @@ func TestDnsmasq_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dnsmasq := test.prepare() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) collected := dnsmasq.Collect() diff --git a/modules/dnsmasq/init.go b/modules/dnsmasq/init.go index 2ce4790ae..9ceb3ead5 100644 --- a/modules/dnsmasq/init.go +++ b/modules/dnsmasq/init.go @@ -9,7 +9,7 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (d Dnsmasq) validateConfig() error { +func (d *Dnsmasq) validateConfig() error { if d.Address == "" { return errors.New("'address' parameter not set") } @@ -19,11 +19,11 @@ func (d Dnsmasq) validateConfig() error { return nil } -func (d Dnsmasq) initDNSClient() (dnsClient, error) { - return d.newDNSClient(d.Protocol, d.Timeout.Duration), nil +func (d *Dnsmasq) initDNSClient() (dnsClient, error) { + return d.newDNSClient(d.Protocol, d.Timeout.Duration()), nil } -func (d Dnsmasq) initCharts() (*module.Charts, error) { +func (d *Dnsmasq) initCharts() (*module.Charts, error) { return cacheCharts.Copy(), nil } diff --git a/modules/dnsmasq_dhcp/config_schema.json b/modules/dnsmasq_dhcp/config_schema.json index bb9d76813..ff61f027e 100644 --- a/modules/dnsmasq_dhcp/config_schema.json +++ b/modules/dnsmasq_dhcp/config_schema.json @@ -1,23 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsmasq_dhcp job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsmasq_dhcp job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "leases_path": { + "type": "string" + }, + "conf_path": { + "type": "string" + }, + "conf_dir": { + "type": "string" + } }, - "leases_path": { - "type": "string" - }, - "conf_path": { - "type": "string" - }, - "conf_dir": { - "type": "string" - } + "required": [ + "name", + "leases_path" + ] }, - "required": [ - "name", - "leases_path" - ] + "uiSchema": {} } diff --git a/modules/dnsmasq_dhcp/dhcp.go b/modules/dnsmasq_dhcp/dhcp.go index ede8a8ee8..81d4fa806 100644 --- a/modules/dnsmasq_dhcp/dhcp.go +++ b/modules/dnsmasq_dhcp/dhcp.go @@ -4,6 +4,7 @@ package dnsmasq_dhcp import ( _ "embed" + "errors" "net" "time" @@ -22,15 +23,13 @@ func init() { } func New() *DnsmasqDHCP { - config := Config{ - // debian defaults - LeasesPath: "/var/lib/misc/dnsmasq.leases", - ConfPath: "/etc/dnsmasq.conf", - ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new", - } - return &DnsmasqDHCP{ - Config: config, + Config: Config{ + // debian defaults + LeasesPath: "/var/lib/misc/dnsmasq.leases", + ConfPath: "/etc/dnsmasq.conf", + ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new", + }, charts: charts.Copy(), parseConfigEvery: time.Minute, cacheDHCPRanges: make(map[string]bool), @@ -63,21 +62,34 @@ type DnsmasqDHCP struct { mx map[string]int64 } -func (d *DnsmasqDHCP) Init() bool { +func (d *DnsmasqDHCP) Configuration() any { + return d.Config +} + +func (d *DnsmasqDHCP) Init() error { if err := d.validateConfig(); err != nil { d.Errorf("config validation: %v", err) - return false + return err } if err := d.checkLeasesPath(); err != nil { d.Errorf("leases path check: %v", err) - return false + return err } - return true + return nil } -func (d *DnsmasqDHCP) Check() bool { - return len(d.Collect()) > 0 +func (d *DnsmasqDHCP) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *DnsmasqDHCP) Charts() *module.Charts { diff --git a/modules/dnsmasq_dhcp/dhcp_test.go b/modules/dnsmasq_dhcp/dhcp_test.go index 9e7693fa9..8de947bc9 100644 --- a/modules/dnsmasq_dhcp/dhcp_test.go +++ b/modules/dnsmasq_dhcp/dhcp_test.go @@ -27,14 +27,14 @@ func TestDnsmasqDHCP_Init(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestDnsmasqDHCP_InitEmptyLeasesPath(t *testing.T) { job := New() job.LeasesPath = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) { @@ -42,7 +42,7 @@ func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) { job.LeasesPath = testLeasesPath job.LeasesPath += "!" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) { @@ -51,7 +51,7 @@ func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) { job.ConfPath = "testdata/dnsmasq3.conf" job.ConfDir = "" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestDnsmasqDHCP_Check(t *testing.T) { @@ -60,8 +60,8 @@ func TestDnsmasqDHCP_Check(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestDnsmasqDHCP_Charts(t *testing.T) { @@ -70,7 +70,7 @@ func TestDnsmasqDHCP_Charts(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.Charts()) } @@ -85,8 +85,8 @@ func TestDnsmasqDHCP_Collect(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "dhcp_range_1230::1-1230::64_allocated_leases": 7, @@ -126,8 +126,8 @@ func TestDnsmasqDHCP_CollectFailedToOpenLeasesPath(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) job.LeasesPath = "" assert.Nil(t, job.Collect()) diff --git a/modules/dnsquery/collect.go b/modules/dnsquery/collect.go index 46104e944..a98e37cad 100644 --- a/modules/dnsquery/collect.go +++ b/modules/dnsquery/collect.go @@ -14,7 +14,7 @@ import ( func (d *DNSQuery) collect() (map[string]int64, error) { if d.dnsClient == nil { - d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration) + d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration()) } mx := make(map[string]int64) diff --git a/modules/dnsquery/config_schema.json b/modules/dnsquery/config_schema.json index 4a7fa412a..c1ecf7d14 100644 --- a/modules/dnsquery/config_schema.json +++ b/modules/dnsquery/config_schema.json @@ -1,48 +1,51 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dns_query job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "domains": { - "type": "array", - "items": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dns_query job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" - } - }, - "servers": { - "type": "array", - "items": { + }, + "domains": { + "type": "array", + "items": { + "type": "string" + } + }, + "servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "network": { "type": "string" - } - }, - "network": { - "type": "string" - }, - "record_type": { - "type": "string" - }, - "record_types": { - "type": "array", - "items": { + }, + "record_type": { "type": "string" + }, + "record_types": { + "type": "array", + "items": { + "type": "string" + } + }, + "port": { + "type": "integer" + }, + "timeout": { + "type": [ + "string", + "integer" + ] } }, - "port": { - "type": "integer" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "domains", + "servers" + ] }, - "required": [ - "name", - "domains", - "servers" - ] + "uiSchema": {} } diff --git a/modules/dnsquery/dnsquery.go b/modules/dnsquery/dnsquery.go index dd1cd3c66..48d215bb6 100644 --- a/modules/dnsquery/dnsquery.go +++ b/modules/dnsquery/dnsquery.go @@ -28,7 +28,7 @@ func init() { func New() *DNSQuery { return &DNSQuery{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), Network: "udp", RecordTypes: []string{"A"}, Port: 53, @@ -71,31 +71,35 @@ type ( } ) -func (d *DNSQuery) Init() bool { +func (d *DNSQuery) Configuration() any { + return d.Config +} + +func (d *DNSQuery) Init() error { if err := d.verifyConfig(); err != nil { d.Errorf("config validation: %v", err) - return false + return err } rt, err := d.initRecordTypes() if err != nil { d.Errorf("init record type: %v", err) - return false + return err } d.recordTypes = rt charts, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = charts - return true + return nil } -func (d *DNSQuery) Check() bool { - return true +func (d *DNSQuery) Check() error { + return nil } func (d *DNSQuery) Charts() *module.Charts { diff --git a/modules/dnsquery/dnsquery_test.go b/modules/dnsquery/dnsquery_test.go index 5ba841731..7dbd06aca 100644 --- a/modules/dnsquery/dnsquery_test.go +++ b/modules/dnsquery/dnsquery_test.go @@ -32,7 +32,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "success when using deprecated record_type": { @@ -43,7 +43,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordType: "A", Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail with default": { @@ -58,7 +58,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when servers not set": { @@ -69,7 +69,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when network is invalid": { @@ -80,7 +80,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "gcp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when record_type is invalid": { @@ -91,7 +91,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"B"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, } @@ -102,9 +102,9 @@ func TestDNSQuery_Init(t *testing.T) { dq.Config = test.config if test.wantFail { - assert.False(t, dq.Init()) + assert.Error(t, dq.Init()) } else { - assert.True(t, dq.Init()) + assert.NoError(t, dq.Init()) } }) } @@ -129,12 +129,12 @@ func TestDNSQuery_Check(t *testing.T) { t.Run(name, func(t *testing.T) { dq := test.prepare() - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) if test.wantFail { - assert.False(t, dq.Check()) + assert.Error(t, dq.Check()) } else { - assert.True(t, dq.Check()) + assert.NoError(t, dq.Check()) } }) } @@ -145,7 +145,7 @@ func TestDNSQuery_Charts(t *testing.T) { dq.Domains = []string{"google.com"} dq.Servers = []string{"192.0.2.0", "192.0.2.1"} - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) assert.NotNil(t, dq.Charts()) assert.Len(t, *dq.Charts(), len(dnsChartsTmpl)*len(dq.Servers)) @@ -186,7 +186,7 @@ func TestDNSQuery_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { dq := test.prepare() - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) mx := dq.Collect() diff --git a/modules/docker/collect.go b/modules/docker/collect.go index ceda40671..fe4b6b45e 100644 --- a/modules/docker/collect.go +++ b/modules/docker/collect.go @@ -43,7 +43,7 @@ func (d *Docker) collect() (map[string]int64, error) { } func (d *Docker) collectInfo(mx map[string]int64) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() info, err := d.client.Info(ctx) @@ -59,7 +59,7 @@ func (d *Docker) collectInfo(mx map[string]int64) error { } func (d *Docker) collectImages(mx map[string]int64) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() images, err := d.client.ImageList(ctx, types.ImageListOptions{}) @@ -106,7 +106,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error { for _, status := range containerHealthStatuses { if err := func() error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() v, err := d.client.ContainerList(ctx, types.ContainerListOptions{ @@ -191,7 +191,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error { } func (d *Docker) negotiateAPIVersion() { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() d.client.NegotiateAPIVersion(ctx) diff --git a/modules/docker/config_schema.json b/modules/docker/config_schema.json index b060da819..9c851bfd7 100644 --- a/modules/docker/config_schema.json +++ b/modules/docker/config_schema.json @@ -1,26 +1,29 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/docker job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/docker job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_container_size": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_container_size": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/docker/docker.go b/modules/docker/docker.go index 1078de2fb..81ee70884 100644 --- a/modules/docker/docker.go +++ b/modules/docker/docker.go @@ -5,6 +5,7 @@ package docker import ( "context" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -28,7 +29,7 @@ func New() *Docker { return &Docker{ Config: Config{ Address: docker.DefaultDockerHost, - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), CollectContainerSize: false, }, @@ -68,12 +69,25 @@ type ( } ) -func (d *Docker) Init() bool { - return true +func (d *Docker) Configuration() any { + return d.Config } -func (d *Docker) Check() bool { - return len(d.Collect()) > 0 +func (d *Docker) Init() error { + return nil +} + +func (d *Docker) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *Docker) Charts() *module.Charts { diff --git a/modules/docker/docker_test.go b/modules/docker/docker_test.go index 0a3711b4d..083f1400f 100644 --- a/modules/docker/docker_test.go +++ b/modules/docker/docker_test.go @@ -35,9 +35,9 @@ func TestDocker_Init(t *testing.T) { d.Config = test.config if test.wantFail { - assert.False(t, d.Init()) + assert.Error(t, d.Init()) } else { - assert.True(t, d.Init()) + assert.NoError(t, d.Init()) } }) } @@ -58,15 +58,15 @@ func TestDocker_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(d *Docker) { d.Init() }, + prepare: func(d *Docker) { _ = d.Init() }, }, "after Check": { wantClose: true, - prepare: func(d *Docker) { d.Init(); d.Check() }, + prepare: func(d *Docker) { _ = d.Init(); _ = d.Check() }, }, "after Collect": { wantClose: true, - prepare: func(d *Docker) { d.Init(); d.Collect() }, + prepare: func(d *Docker) { _ = d.Init(); d.Collect() }, }, } @@ -136,12 +136,12 @@ func TestDocker_Check(t *testing.T) { t.Run(name, func(t *testing.T) { d := test.prepare() - require.True(t, d.Init()) + require.NoError(t, d.Init()) if test.wantFail { - assert.False(t, d.Check()) + assert.Error(t, d.Check()) } else { - assert.True(t, d.Check()) + assert.NoError(t, d.Check()) } }) } @@ -666,7 +666,7 @@ func TestDocker_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { d := test.prepare() - require.True(t, d.Init()) + require.NoError(t, d.Init()) mx := d.Collect() diff --git a/modules/docker_engine/config_schema.json b/modules/docker_engine/config_schema.json index 2b8505610..a096a5be0 100644 --- a/modules/docker_engine/config_schema.json +++ b/modules/docker_engine/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/docker_engine job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/docker_engine job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/docker_engine/docker_engine.go b/modules/docker_engine/docker_engine.go index 7c69daa29..087be298b 100644 --- a/modules/docker_engine/docker_engine.go +++ b/modules/docker_engine/docker_engine.go @@ -24,69 +24,67 @@ func init() { } func New() *DockerEngine { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:9323/metrics", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &DockerEngine{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9323/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, } - return &DockerEngine{ - Config: config, - } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } - DockerEngine struct { - module.Base - Config `yaml:",inline"` +type Config struct { + web.HTTP `yaml:",inline"` +} - prom prometheus.Prometheus - isSwarmManager bool - hasContainerStates bool - } -) +type DockerEngine struct { + module.Base + Config `yaml:",inline"` -func (de DockerEngine) validateConfig() error { - if de.URL == "" { - return errors.New("URL is not set") - } - return nil + prom prometheus.Prometheus + isSwarmManager bool + hasContainerStates bool +} + +func (de *DockerEngine) Configuration() any { + return de.Config } -func (de *DockerEngine) initClient() error { - client, err := web.NewHTTPClient(de.Client) +func (de *DockerEngine) Init() error { + if err := de.validateConfig(); err != nil { + de.Errorf("config validation: %v", err) + return err + } + + prom, err := de.initPrometheusClient() if err != nil { + de.Error(err) return err } + de.prom = prom - de.prom = prometheus.New(client, de.Request) return nil } -func (de *DockerEngine) Init() bool { - if err := de.validateConfig(); err != nil { - de.Errorf("config validation: %v", err) - return false - } - if err := de.initClient(); err != nil { - de.Errorf("client initialization: %v", err) - return false +func (de *DockerEngine) Check() error { + mx, err := de.collect() + if err != nil { + de.Error(err) + return err } - return true -} + if len(mx) == 0 { + return errors.New("no metrics collected") -func (de *DockerEngine) Check() bool { - return len(de.Collect()) > 0 + } + return nil } -func (de DockerEngine) Charts() *Charts { +func (de *DockerEngine) Charts() *Charts { cs := charts.Copy() if !de.hasContainerStates { if err := cs.Remove("engine_daemon_container_states_containers"); err != nil { @@ -101,6 +99,7 @@ func (de DockerEngine) Charts() *Charts { if err := cs.Add(*swarmManagerCharts.Copy()...); err != nil { de.Warning(err) } + return cs } @@ -117,4 +116,8 @@ func (de *DockerEngine) Collect() map[string]int64 { return mx } -func (DockerEngine) Cleanup() {} +func (de *DockerEngine) Cleanup() { + if de.prom != nil && de.prom.HTTPClient() != nil { + de.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/docker_engine/docker_engine_test.go b/modules/docker_engine/docker_engine_test.go index 7ffc1ce5e..42ffe4625 100644 --- a/modules/docker_engine/docker_engine_test.go +++ b/modules/docker_engine/docker_engine_test.go @@ -64,9 +64,9 @@ func TestDockerEngine_Init(t *testing.T) { dockerEngine.Config = test.config if test.wantFail { - assert.False(t, dockerEngine.Init()) + assert.Error(t, dockerEngine.Init()) } else { - assert.True(t, dockerEngine.Init()) + assert.NoError(t, dockerEngine.Init()) } }) } @@ -92,9 +92,9 @@ func TestDockerEngine_Check(t *testing.T) { defer srv.Close() if test.wantFail { - assert.False(t, dockerEngine.Check()) + assert.Error(t, dockerEngine.Check()) } else { - assert.True(t, dockerEngine.Check()) + assert.NoError(t, dockerEngine.Check()) } }) } @@ -115,7 +115,7 @@ func TestDockerEngine_Charts(t *testing.T) { dockerEngine, srv := test.prepare(t) defer srv.Close() - require.True(t, dockerEngine.Check()) + require.NoError(t, dockerEngine.Check()) assert.Len(t, *dockerEngine.Charts(), test.wantNumCharts) }) } @@ -276,7 +276,7 @@ func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server) dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -290,7 +290,7 @@ func prepareClientServerV18093CE(t *testing.T) (*DockerEngine, *httptest.Server) dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -304,7 +304,7 @@ func prepareClientServerV18093CESwarm(t *testing.T) (*DockerEngine, *httptest.Se dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -318,7 +318,7 @@ func prepareClientServerNonDockerEngine(t *testing.T) (*DockerEngine, *httptest. dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -332,7 +332,7 @@ func prepareClientServerInvalidData(t *testing.T) (*DockerEngine, *httptest.Serv dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -346,7 +346,7 @@ func prepareClientServer404(t *testing.T) (*DockerEngine, *httptest.Server) { dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -357,7 +357,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*DockerEngine, *httptes dockerEngine := New() dockerEngine.URL = "http://127.0.0.1:38001/metrics" - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } diff --git a/modules/docker_engine/init.go b/modules/docker_engine/init.go new file mode 100644 index 000000000..b3ceefdea --- /dev/null +++ b/modules/docker_engine/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (de *DockerEngine) validateConfig() error { + if de.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (de *DockerEngine) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(de.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, de.Request), nil +} diff --git a/modules/dockerhub/config_schema.json b/modules/dockerhub/config_schema.json index 1be293e6f..8873906ae 100644 --- a/modules/dockerhub/config_schema.json +++ b/modules/dockerhub/config_schema.json @@ -1,65 +1,68 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dockerhub job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "repositories": { - "type": "array", - "items": { - "type": "number" - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dockerhub job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "repositories": { + "type": "array", + "items": { + "type": "number" + } + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "repositories" + ] }, - "required": [ - "name", - "repositories" - ] + "uiSchema": {} } diff --git a/modules/dockerhub/dockerhub.go b/modules/dockerhub/dockerhub.go index 48836a606..82230ddb5 100644 --- a/modules/dockerhub/dockerhub.go +++ b/modules/dockerhub/dockerhub.go @@ -4,6 +4,7 @@ package dockerhub import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -11,13 +12,6 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -const ( - defaultURL = "https://hub.docker.com/v2/repositories" - defaultHTTPTimeout = time.Second * 2 - - defaultUpdateEvery = 5 -) - //go:embed "config_schema.json" var configSchema string @@ -25,7 +19,7 @@ func init() { module.Register("dockerhub", module.Creator{ JobConfigSchema: configSchema, Defaults: module.Defaults{ - UpdateEvery: defaultUpdateEvery, + UpdateEvery: 5, }, Create: func() module.Module { return New() }, }) @@ -33,19 +27,18 @@ func init() { // New creates DockerHub with default values. func New() *DockerHub { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &DockerHub{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "https://hub.docker.com/v2/repositories", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, } - return &DockerHub{ - Config: config, - } } // Config is the DockerHub module configuration. @@ -61,38 +54,43 @@ type DockerHub struct { client *apiClient } -// Cleanup makes cleanup. -func (DockerHub) Cleanup() {} +func (dh *DockerHub) Configuration() any { + return dh.Config +} // Init makes initialization. -func (dh *DockerHub) Init() bool { - if dh.URL == "" { - dh.Error("URL not set") - return false - } - - if len(dh.Repositories) == 0 { - dh.Error("repositories parameter is not set") - return false +func (dh *DockerHub) Init() error { + if err := dh.validateConfig(); err != nil { + dh.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(dh.Client) + client, err := dh.initApiClient() if err != nil { - dh.Errorf("error on creating http client : %v", err) - return false + dh.Error(err) + return err } - dh.client = newAPIClient(client, dh.Request) + dh.client = client - return true + return nil } // Check makes check. -func (dh DockerHub) Check() bool { - return len(dh.Collect()) > 0 +func (dh *DockerHub) Check() error { + mx, err := dh.collect() + if err != nil { + dh.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } // Charts creates Charts. -func (dh DockerHub) Charts() *Charts { +func (dh *DockerHub) Charts() *Charts { cs := charts.Copy() addReposToCharts(dh.Repositories, cs) return cs @@ -109,3 +107,10 @@ func (dh *DockerHub) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (dh *DockerHub) Cleanup() { + if dh.client != nil && dh.client.httpClient != nil { + dh.client.httpClient.CloseIdleConnections() + } +} diff --git a/modules/dockerhub/dockerhub_test.go b/modules/dockerhub/dockerhub_test.go index 350af1a53..d5e2626a0 100644 --- a/modules/dockerhub/dockerhub_test.go +++ b/modules/dockerhub/dockerhub_test.go @@ -19,16 +19,6 @@ var ( repo3Data, _ = os.ReadFile("testdata/repo3.txt") ) -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*DockerHub)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) - assert.Len(t, job.Repositories, 0) - assert.Nil(t, job.client) -} - func TestDockerHub_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() } @@ -36,11 +26,13 @@ func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() } func TestDockerHub_Init(t *testing.T) { job := New() job.Repositories = []string{"name/repo"} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.client) } -func TestDockerHub_InitNG(t *testing.T) { assert.False(t, New().Init()) } +func TestDockerHub_InitNG(t *testing.T) { + assert.Error(t, New().Init()) +} func TestDockerHub_Check(t *testing.T) { ts := httptest.NewServer( @@ -60,16 +52,16 @@ func TestDockerHub_Check(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestDockerHub_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestDockerHub_Collect(t *testing.T) { @@ -90,8 +82,8 @@ func TestDockerHub_Collect(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "star_count_user1/name1": 45, @@ -127,8 +119,8 @@ func TestDockerHub_InvalidData(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestDockerHub_404(t *testing.T) { @@ -141,6 +133,6 @@ func TestDockerHub_404(t *testing.T) { job := New() job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/dockerhub/init.go b/modules/dockerhub/init.go new file mode 100644 index 000000000..17f2e712e --- /dev/null +++ b/modules/dockerhub/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (dh *DockerHub) validateConfig() error { + if dh.URL == "" { + return errors.New("url not set") + } + if len(dh.Repositories) == 0 { + return errors.New("repositories not set") + } + return nil +} + +func (dh *DockerHub) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(dh.Client) + if err != nil { + return nil, err + } + return newAPIClient(client, dh.Request), nil +} diff --git a/modules/elasticsearch/config_schema.json b/modules/elasticsearch/config_schema.json index f69eb6e43..686b0f16a 100644 --- a/modules/elasticsearch/config_schema.json +++ b/modules/elasticsearch/config_schema.json @@ -1,74 +1,77 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/elasticsearch job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "cluster_mode": { - "type": "boolean" - }, - "collect_node_stats": { - "type": "boolean" - }, - "collect_cluster_health": { - "type": "boolean" - }, - "collect_cluster_stats": { - "type": "boolean" - }, - "collect_indices_stats": { - "type": "boolean" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/elasticsearch job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "cluster_mode": { + "type": "boolean" + }, + "collect_node_stats": { + "type": "boolean" + }, + "collect_cluster_health": { + "type": "boolean" + }, + "collect_cluster_stats": { + "type": "boolean" + }, + "collect_indices_stats": { + "type": "boolean" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/elasticsearch/elasticsearch.go b/modules/elasticsearch/elasticsearch.go index 4b29a6cc8..2c10a4221 100644 --- a/modules/elasticsearch/elasticsearch.go +++ b/modules/elasticsearch/elasticsearch.go @@ -4,6 +4,7 @@ package elasticsearch import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -34,7 +35,7 @@ func New() *Elasticsearch { URL: "http://127.0.0.1:9200", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, ClusterMode: false, @@ -78,25 +79,38 @@ type Elasticsearch struct { indices map[string]bool } -func (es *Elasticsearch) Init() bool { +func (es *Elasticsearch) Configuration() any { + return es.Config +} + +func (es *Elasticsearch) Init() error { err := es.validateConfig() if err != nil { es.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := es.initHTTPClient() if err != nil { es.Errorf("init HTTP client: %v", err) - return false + return err } es.httpClient = httpClient - return true + return nil } -func (es *Elasticsearch) Check() bool { - return len(es.Collect()) > 0 +func (es *Elasticsearch) Check() error { + mx, err := es.collect() + if err != nil { + es.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (es *Elasticsearch) Charts() *module.Charts { diff --git a/modules/elasticsearch/elasticsearch_test.go b/modules/elasticsearch/elasticsearch_test.go index d4f1628cd..1a6d178cc 100644 --- a/modules/elasticsearch/elasticsearch_test.go +++ b/modules/elasticsearch/elasticsearch_test.go @@ -103,9 +103,9 @@ func TestElasticsearch_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) } }) } @@ -128,9 +128,9 @@ func TestElasticsearch_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, es.Check()) + assert.Error(t, es.Check()) } else { - assert.True(t, es.Check()) + assert.NoError(t, es.Check()) } }) } @@ -666,7 +666,7 @@ func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Ela es = createES() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -683,7 +683,7 @@ func prepareElasticsearchInvalidData(t *testing.T) (*Elasticsearch, func()) { })) es := New() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -696,7 +696,7 @@ func prepareElasticsearch404(t *testing.T) (*Elasticsearch, func()) { })) es := New() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -705,7 +705,7 @@ func prepareElasticsearchConnectionRefused(t *testing.T) (*Elasticsearch, func() t.Helper() es := New() es.URL = "http://127.0.0.1:38001" - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, func() {} } diff --git a/modules/energid/config_schema.json b/modules/energid/config_schema.json index 20f4ec9f8..7251f3137 100644 --- a/modules/energid/config_schema.json +++ b/modules/energid/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/energid job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/energid job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/energid/energid.go b/modules/energid/energid.go index fcffe50d8..875a24557 100644 --- a/modules/energid/energid.go +++ b/modules/energid/energid.go @@ -4,6 +4,7 @@ package energid import ( _ "embed" + "errors" "net/http" "time" @@ -24,18 +25,6 @@ func init() { }) } -type Config struct { - web.HTTP `yaml:",inline"` -} - -type Energid struct { - module.Base - Config `yaml:",inline"` - - httpClient *http.Client - charts *module.Charts -} - func New() *Energid { return &Energid{ Config: Config{ @@ -44,39 +33,64 @@ func New() *Energid { URL: "http://127.0.0.1:9796", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, } } -func (e *Energid) Init() bool { +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Energid struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (e *Energid) Configuration() any { + return e.Config +} + +func (e *Energid) Init() error { err := e.validateConfig() if err != nil { e.Errorf("config validation: %v", err) - return false + return err } client, err := e.initHTTPClient() if err != nil { e.Errorf("init HTTP client: %v", err) - return false + return err } e.httpClient = client cs, err := e.initCharts() if err != nil { e.Errorf("init charts: %v", err) - return false + return err } e.charts = cs - return true + return nil } -func (e *Energid) Check() bool { - return len(e.Collect()) > 0 +func (e *Energid) Check() error { + mx, err := e.collect() + if err != nil { + e.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (e *Energid) Charts() *module.Charts { diff --git a/modules/energid/energid_test.go b/modules/energid/energid_test.go index ab0e2f24e..7054afa49 100644 --- a/modules/energid/energid_test.go +++ b/modules/energid/energid_test.go @@ -78,9 +78,9 @@ func Test_Init(t *testing.T) { energid.Config = test.config if test.wantFail { - assert.False(t, energid.Init()) + assert.Error(t, energid.Init()) } else { - assert.True(t, energid.Init()) + assert.NoError(t, energid.Init()) } }) } @@ -88,7 +88,7 @@ func Test_Init(t *testing.T) { func Test_Charts(t *testing.T) { energid := New() - require.True(t, energid.Init()) + require.NoError(t, energid.Init()) assert.NotNil(t, energid.Charts()) } @@ -123,12 +123,12 @@ func Test_Check(t *testing.T) { energid, cleanup := test.prepare() defer cleanup() - require.True(t, energid.Init()) + require.NoError(t, energid.Init()) if test.wantFail { - assert.False(t, energid.Check()) + assert.Error(t, energid.Check()) } else { - assert.True(t, energid.Check()) + assert.NoError(t, energid.Check()) } }) } @@ -173,7 +173,7 @@ func Test_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { energid, cleanup := test.prepare() defer cleanup() - require.True(t, energid.Init()) + require.NoError(t, energid.Init()) collected := energid.Collect() diff --git a/modules/energid/init.go b/modules/energid/init.go index 3b7b7fb9e..ee0c98dfc 100644 --- a/modules/energid/init.go +++ b/modules/energid/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (e Energid) validateConfig() error { +func (e *Energid) validateConfig() error { if e.URL == "" { return errors.New("URL not set") } @@ -22,10 +22,10 @@ func (e Energid) validateConfig() error { return nil } -func (e Energid) initHTTPClient() (*http.Client, error) { +func (e *Energid) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(e.Client) } -func (e Energid) initCharts() (*module.Charts, error) { +func (e *Energid) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/envoy/config_schema.json b/modules/envoy/config_schema.json index 48b3c9478..f45dd4457 100644 --- a/modules/envoy/config_schema.json +++ b/modules/envoy/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/envoy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/envoy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/envoy/envoy.go b/modules/envoy/envoy.go index de9efa13d..fdf06fab6 100644 --- a/modules/envoy/envoy.go +++ b/modules/envoy/envoy.go @@ -4,6 +4,7 @@ package envoy import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Envoy { URL: "http://127.0.0.1:9091/stats/prometheus", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, }, }, @@ -65,24 +66,37 @@ type Envoy struct { listenerDownstream map[string]bool } -func (e *Envoy) Init() bool { +func (e *Envoy) Configuration() any { + return e.Config +} + +func (e *Envoy) Init() error { if err := e.validateConfig(); err != nil { e.Errorf("config validation: %v", err) - return false + return err } prom, err := e.initPrometheusClient() if err != nil { e.Errorf("init Prometheus client: %v", err) - return false + return err } e.prom = prom - return true + return nil } -func (e *Envoy) Check() bool { - return len(e.Collect()) > 0 +func (e *Envoy) Check() error { + mx, err := e.collect() + if err != nil { + e.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (e *Envoy) Charts() *module.Charts { diff --git a/modules/envoy/envoy_test.go b/modules/envoy/envoy_test.go index 3bdd82cb1..7bf5325ac 100644 --- a/modules/envoy/envoy_test.go +++ b/modules/envoy/envoy_test.go @@ -53,9 +53,9 @@ func TestEnvoy_Init(t *testing.T) { envoy.Config = test.config if test.wantFail { - assert.False(t, envoy.Init()) + assert.Error(t, envoy.Init()) } else { - assert.True(t, envoy.Init()) + assert.NoError(t, envoy.Init()) } }) } @@ -66,7 +66,7 @@ func TestEnvoy_Cleanup(t *testing.T) { envoy := New() assert.NotPanics(t, envoy.Cleanup) - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) assert.NotPanics(t, envoy.Cleanup) } @@ -76,7 +76,7 @@ func TestEnvoy_Charts(t *testing.T) { require.Empty(t, *envoy.Charts()) - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) _ = envoy.Collect() require.NotEmpty(t, *envoy.Charts()) } @@ -109,12 +109,12 @@ func TestEnvoy_Check(t *testing.T) { envoy, cleanup := test.prepare() defer cleanup() - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) if test.wantFail { - assert.False(t, envoy.Check()) + assert.Error(t, envoy.Check()) } else { - assert.True(t, envoy.Check()) + assert.NoError(t, envoy.Check()) } }) } @@ -489,7 +489,7 @@ func TestEnvoy_Collect(t *testing.T) { envoy, cleanup := test.prepare() defer cleanup() - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) mx := envoy.Collect() diff --git a/modules/example/config_schema.json b/modules/example/config_schema.json index 852b39b1c..56b83034b 100644 --- a/modules/example/config_schema.json +++ b/modules/example/config_schema.json @@ -1,68 +1,61 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/example job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "charts": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "num": { - "type": "integer" - }, - "contexts": { - "type": "integer" - }, - "dimensions": { - "type": "integer" - }, - "labels": { - "type": "integer" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/example job configuration schema.", + "type": "object", + "properties": { + "charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer", + "minimum": 1, + "default": 2 + }, + "labels": { + "type": "integer" + } + }, + "required": [ + "num", + "dimensions" + ] }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - }, - "hidden_charts": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "num": { - "type": "integer" - }, - "contexts": { - "type": "integer" - }, - "dimensions": { - "type": "integer" - }, - "labels": { - "type": "integer" + "hidden_charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer" + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer" + }, + "labels": { + "type": "integer" + } } - }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - } + } + }, + "required": [ + "charts" + ] }, - "required": [ - "name", - "charts" - ] + "uiSchema": {} } diff --git a/modules/example/example.go b/modules/example/example.go index fe24bcc3e..8105da6d9 100644 --- a/modules/example/example.go +++ b/modules/example/example.go @@ -4,6 +4,7 @@ package example import ( _ "embed" + "errors" "math/rand" "github.com/netdata/go.d.plugin/agent/module" @@ -17,7 +18,7 @@ func init() { JobConfigSchema: configSchema, Defaults: module.Defaults{ UpdateEvery: module.UpdateEvery, - AutoDetectionRetry: module.AutoDetectionRetry, + AutoDetectionRetry: 5, Priority: module.Priority, Disabled: true, }, @@ -45,15 +46,15 @@ func New() *Example { type ( Config struct { - Charts ConfigCharts `yaml:"charts"` - HiddenCharts ConfigCharts `yaml:"hidden_charts"` + Charts ConfigCharts `yaml:"charts" json:"charts"` + HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"` } ConfigCharts struct { - Type string `yaml:"type"` - Num int `yaml:"num"` - Contexts int `yaml:"contexts"` - Dims int `yaml:"dimensions"` - Labels int `yaml:"labels"` + Type string `yaml:"type" json:"type"` + Num int `yaml:"num" json:"num"` + Contexts int `yaml:"contexts" json:"context"` + Dims int `yaml:"dimensions" json:"dimensions"` + Labels int `yaml:"labels" json:"labels"` } ) @@ -66,24 +67,40 @@ type Example struct { collectedDims map[string]bool } -func (e *Example) Init() bool { +func (e *Example) Configuration() any { + return e.Config +} + +func (e *Example) Init() error { err := e.validateConfig() if err != nil { e.Errorf("config validation: %v", err) - return false + return err } charts, err := e.initCharts() if err != nil { e.Errorf("charts init: %v", err) - return false + return err } e.charts = charts - return true + return nil } -func (e *Example) Check() bool { - return len(e.Collect()) > 0 +func (e *Example) Check() error { + if e.Config.Charts.Dims == 5 { + return errors.New("guess what, 5 dimension is not allowed") + } + mx, err := e.collect() + if err != nil { + e.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (e *Example) Charts() *module.Charts { diff --git a/modules/example/example_test.go b/modules/example/example_test.go index 47cc51a2f..36181727d 100644 --- a/modules/example/example_test.go +++ b/modules/example/example_test.go @@ -96,9 +96,9 @@ func TestExample_Init(t *testing.T) { example.Config = test.config if test.wantFail { - assert.False(t, example.Init()) + assert.Error(t, example.Init()) } else { - assert.True(t, example.Init()) + assert.NoError(t, example.Init()) } }) } @@ -124,12 +124,12 @@ func TestExample_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { example := test.prepare() - require.True(t, example.Init()) + require.NoError(t, example.Init()) if test.wantFail { - assert.False(t, example.Check()) + assert.Error(t, example.Check()) } else { - assert.True(t, example.Check()) + assert.NoError(t, example.Check()) } }) } @@ -153,7 +153,7 @@ func TestExample_Charts(t *testing.T) { "initialized collector": { prepare: func(t *testing.T) *Example { example := New() - require.True(t, example.Init()) + require.NoError(t, example.Init()) return example }, }, @@ -259,7 +259,7 @@ func TestExample_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { example := test.prepare() - require.True(t, example.Init()) + require.NoError(t, example.Init()) collected := example.Collect() diff --git a/modules/filecheck/collect_dirs.go b/modules/filecheck/collect_dirs.go index 32861c0e0..622cbf76a 100644 --- a/modules/filecheck/collect_dirs.go +++ b/modules/filecheck/collect_dirs.go @@ -14,7 +14,7 @@ import ( func (fc *Filecheck) collectDirs(ms map[string]int64) { curTime := time.Now() - if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration { + if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration() { fc.lastDiscoveryDirs = curTime fc.curDirs = fc.discoveryDirs() fc.updateDirsCharts(fc.curDirs) @@ -54,7 +54,7 @@ func (fc *Filecheck) collectDir(ms map[string]int64, path string, curTime time.T } } -func (fc Filecheck) discoveryDirs() (dirs []string) { +func (fc *Filecheck) discoveryDirs() (dirs []string) { for _, path := range fc.Dirs.Include { if hasMeta(path) { continue diff --git a/modules/filecheck/collect_files.go b/modules/filecheck/collect_files.go index 25568473f..a3dd93ef8 100644 --- a/modules/filecheck/collect_files.go +++ b/modules/filecheck/collect_files.go @@ -14,7 +14,7 @@ import ( func (fc *Filecheck) collectFiles(ms map[string]int64) { curTime := time.Now() - if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration { + if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration() { fc.lastDiscoveryFiles = curTime fc.curFiles = fc.discoveryFiles() fc.updateFilesCharts(fc.curFiles) @@ -47,7 +47,7 @@ func (fc *Filecheck) collectFile(ms map[string]int64, path string, curTime time. ms[fileDimID(path, "mtime_ago")] = int64(curTime.Sub(info.ModTime()).Seconds()) } -func (fc Filecheck) discoveryFiles() (files []string) { +func (fc *Filecheck) discoveryFiles() (files []string) { for _, path := range fc.Files.Include { if hasMeta(path) { continue diff --git a/modules/filecheck/config_schema.json b/modules/filecheck/config_schema.json index a6b0efca9..254bd75e3 100644 --- a/modules/filecheck/config_schema.json +++ b/modules/filecheck/config_schema.json @@ -1,75 +1,78 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/filecheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "discovery_every": { - "type": [ - "string", - "integer" - ] - }, - "files": { - "type": "object", - "properties": { - "include": { - "type": "array", - "items": { - "type": "string" - } - }, - "exclude": { - "type": "array", - "items": { - "type": "string" - } - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/filecheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" }, - "required": [ - "include", - "exclude" - ] - }, - "dirs": { - "type": "object", - "properties": { - "include": { - "type": "array", - "items": { - "type": "string" + "discovery_every": { + "type": [ + "string", + "integer" + ] + }, + "files": { + "type": "object", + "properties": { + "include": { + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "type": "array", + "items": { + "type": "string" + } } }, - "exclude": { - "type": "array", - "items": { - "type": "string" + "required": [ + "include", + "exclude" + ] + }, + "dirs": { + "type": "object", + "properties": { + "include": { + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "type": "array", + "items": { + "type": "string" + } + }, + "collect_dir_size": { + "type": "boolean" } }, - "collect_dir_size": { - "type": "boolean" - } + "required": [ + "include", + "exclude" + ] + } + }, + "oneOf": [ + { + "required": [ + "name", + "files" + ] }, - "required": [ - "include", - "exclude" - ] - } + { + "required": [ + "name", + "dirs" + ] + } + ] }, - "oneOf": [ - { - "required": [ - "name", - "files" - ] - }, - { - "required": [ - "name", - "dirs" - ] - } - ] + "uiSchema": {} } diff --git a/modules/filecheck/filecheck.go b/modules/filecheck/filecheck.go index e1369bc1c..5a3cd1ffb 100644 --- a/modules/filecheck/filecheck.go +++ b/modules/filecheck/filecheck.go @@ -26,7 +26,7 @@ func init() { func New() *Filecheck { return &Filecheck{ Config: Config{ - DiscoveryEvery: web.Duration{Duration: time.Second * 30}, + DiscoveryEvery: web.Duration(time.Second * 30), Files: filesConfig{}, Dirs: dirsConfig{ CollectDirSize: true, @@ -69,30 +69,32 @@ type Filecheck struct { charts *module.Charts } -func (Filecheck) Cleanup() { +func (fc *Filecheck) Configuration() any { + return fc.Config } -func (fc *Filecheck) Init() bool { +func (fc *Filecheck) Init() error { err := fc.validateConfig() if err != nil { fc.Errorf("error on validating config: %v", err) - return false + return err } charts, err := fc.initCharts() if err != nil { fc.Errorf("error on charts initialization: %v", err) - return false + return err } fc.charts = charts fc.Debugf("monitored files: %v", fc.Files.Include) fc.Debugf("monitored dirs: %v", fc.Dirs.Include) - return true + + return nil } -func (fc Filecheck) Check() bool { - return true +func (fc *Filecheck) Check() error { + return nil } func (fc *Filecheck) Charts() *module.Charts { @@ -110,3 +112,6 @@ func (fc *Filecheck) Collect() map[string]int64 { } return ms } + +func (fc *Filecheck) Cleanup() { +} diff --git a/modules/filecheck/filecheck_test.go b/modules/filecheck/filecheck_test.go index 5024f6460..e973c26bc 100644 --- a/modules/filecheck/filecheck_test.go +++ b/modules/filecheck/filecheck_test.go @@ -86,9 +86,9 @@ func TestFilecheck_Init(t *testing.T) { fc.Config = test.config if test.wantFail { - assert.False(t, fc.Init()) + assert.Error(t, fc.Init()) } else { - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) assert.Equal(t, test.wantNumOfCharts, len(*fc.Charts())) } }) @@ -111,9 +111,9 @@ func TestFilecheck_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { fc := test.prepare() - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) - assert.True(t, fc.Check()) + assert.NoError(t, fc.Check()) }) } } @@ -226,7 +226,7 @@ func TestFilecheck_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { fc := test.prepare() - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) collected := fc.Collect() diff --git a/modules/filecheck/init.go b/modules/filecheck/init.go index 858e3e503..b2e27459a 100644 --- a/modules/filecheck/init.go +++ b/modules/filecheck/init.go @@ -8,14 +8,14 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (fc Filecheck) validateConfig() error { +func (fc *Filecheck) validateConfig() error { if len(fc.Files.Include) == 0 && len(fc.Dirs.Include) == 0 { return errors.New("both 'files->include' and 'dirs->include' are empty") } return nil } -func (fc Filecheck) initCharts() (*module.Charts, error) { +func (fc *Filecheck) initCharts() (*module.Charts, error) { charts := &module.Charts{} if len(fc.Files.Include) > 0 { diff --git a/modules/fluentd/collect.go b/modules/fluentd/collect.go new file mode 100644 index 000000000..14ee6df68 --- /dev/null +++ b/modules/fluentd/collect.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import "fmt" + +func (f *Fluentd) collect() (map[string]int64, error) { + info, err := f.apiClient.getPluginsInfo() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + for _, p := range info.Payload { + // TODO: if p.Category == "input" ? + if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() { + continue + } + + if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) { + f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category) + continue + } + + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + mx[id+"_retry_count"] = *p.RetryCount + } + if p.hasBufferQueueLength() { + mx[id+"_buffer_queue_length"] = *p.BufferQueueLength + } + if p.hasBufferTotalQueuedSize() { + mx[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize + } + + if !f.activePlugins[id] { + f.activePlugins[id] = true + f.addPluginToCharts(p) + } + + } + + return mx, nil +} + +func (f *Fluentd) addPluginToCharts(p pluginData) { + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + chart := f.charts.Get("retry_count") + _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferQueueLength() { + chart := f.charts.Get("buffer_queue_length") + _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferTotalQueuedSize() { + chart := f.charts.Get("buffer_total_queued_size") + _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID}) + chart.MarkNotCreated() + } +} diff --git a/modules/fluentd/config_schema.json b/modules/fluentd/config_schema.json index f5bfe3047..f225d7b6c 100644 --- a/modules/fluentd/config_schema.json +++ b/modules/fluentd/config_schema.json @@ -1,62 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/fluentd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "permit_plugin_id": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/fluentd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "permit_plugin_id": { + "type": "string" + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/fluentd/fluentd.go b/modules/fluentd/fluentd.go index 5b627b7b4..6d6755122 100644 --- a/modules/fluentd/fluentd.go +++ b/modules/fluentd/fluentd.go @@ -4,7 +4,7 @@ package fluentd import ( _ "embed" - "fmt" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -23,25 +23,18 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:24220" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Fluentd with default values. func New() *Fluentd { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, - }, - }} - return &Fluentd{ - Config: config, + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:24220", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, + }}, activePlugins: make(map[string]bool), charts: charts.Copy(), } @@ -63,105 +56,73 @@ type Fluentd struct { charts *Charts } -// Cleanup makes cleanup. -func (Fluentd) Cleanup() {} +func (f *Fluentd) Configuration() any { + return f.Config +} // Init makes initialization. -func (f *Fluentd) Init() bool { - if f.URL == "" { - f.Error("URL not set") - return false +func (f *Fluentd) Init() error { + if err := f.validateConfig(); err != nil { + f.Error(err) + return err } - if f.PermitPlugin != "" { - m, err := matcher.NewSimplePatternsMatcher(f.PermitPlugin) - if err != nil { - f.Errorf("error on creating permit_plugin matcher : %v", err) - return false - } - f.permitPlugin = matcher.WithCache(m) + pm, err := f.initPermitPluginMatcher() + if err != nil { + f.Error(err) + return err + } + if pm != nil { + f.permitPlugin = pm } - client, err := web.NewHTTPClient(f.Client) + client, err := f.initApiClient() if err != nil { - f.Errorf("error on creating client : %v", err) - return false + f.Error(err) + return err } - - f.apiClient = newAPIClient(client, f.Request) + f.apiClient = client f.Debugf("using URL %s", f.URL) - f.Debugf("using timeout: %s", f.Timeout.Duration) + f.Debugf("using timeout: %s", f.Timeout.Duration()) - return true + return nil } // Check makes check. -func (f Fluentd) Check() bool { return len(f.Collect()) > 0 } +func (f *Fluentd) Check() error { + mx, err := f.collect() + if err != nil { + f.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil +} // Charts creates Charts. -func (f Fluentd) Charts() *Charts { return f.charts } +func (f Fluentd) Charts() *Charts { + return f.charts +} // Collect collects metrics. func (f *Fluentd) Collect() map[string]int64 { - info, err := f.apiClient.getPluginsInfo() + mx, err := f.collect() if err != nil { f.Error(err) return nil } - metrics := make(map[string]int64) - - for _, p := range info.Payload { - // TODO: if p.Category == "input" ? - if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() { - continue - } - - if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) { - f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category) - continue - } - - id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) - - if p.hasCategory() { - metrics[id+"_retry_count"] = *p.RetryCount - } - if p.hasBufferQueueLength() { - metrics[id+"_buffer_queue_length"] = *p.BufferQueueLength - } - if p.hasBufferTotalQueuedSize() { - metrics[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize - } - - if !f.activePlugins[id] { - f.activePlugins[id] = true - f.addPluginToCharts(p) - } - - } - - return metrics + return mx } -func (f *Fluentd) addPluginToCharts(p pluginData) { - id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) - - if p.hasCategory() { - chart := f.charts.Get("retry_count") - _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID}) - chart.MarkNotCreated() - } - if p.hasBufferQueueLength() { - chart := f.charts.Get("buffer_queue_length") - _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID}) - chart.MarkNotCreated() - } - if p.hasBufferTotalQueuedSize() { - chart := f.charts.Get("buffer_total_queued_size") - _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID}) - chart.MarkNotCreated() +// Cleanup makes cleanup. +func (f *Fluentd) Cleanup() { + if f.apiClient != nil && f.apiClient.httpClient != nil { + f.apiClient.httpClient.CloseIdleConnections() } } diff --git a/modules/fluentd/fluentd_test.go b/modules/fluentd/fluentd_test.go index 492e2ebaa..51413d4bf 100644 --- a/modules/fluentd/fluentd_test.go +++ b/modules/fluentd/fluentd_test.go @@ -14,25 +14,16 @@ import ( var testDataPlugins, _ = os.ReadFile("testdata/plugins.json") -func TestNew(t *testing.T) { - job := New() - assert.IsType(t, (*Fluentd)(nil), job) - assert.NotNil(t, job.charts) - assert.NotNil(t, job.activePlugins) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestFluentd_Init(t *testing.T) { // OK job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) //NG job = New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestFluentd_Check(t *testing.T) { @@ -45,14 +36,14 @@ func TestFluentd_Check(t *testing.T) { // OK job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) // NG job = New() job.URL = "http://127.0.0.1:38001/api/plugins.json" - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) } func TestFluentd_Charts(t *testing.T) { @@ -73,8 +64,8 @@ func TestFluentd_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "output_stdout_stdout_output_retry_count": 0, @@ -97,8 +88,8 @@ func TestFluentd_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestFluentd_404(t *testing.T) { @@ -110,6 +101,6 @@ func TestFluentd_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/fluentd/init.go b/modules/fluentd/init.go new file mode 100644 index 000000000..37627c03d --- /dev/null +++ b/modules/fluentd/init.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (f *Fluentd) validateConfig() error { + if f.URL == "" { + return errors.New("url not set") + } + + return nil +} + +func (f *Fluentd) initPermitPluginMatcher() (matcher.Matcher, error) { + if f.PermitPlugin == "" { + return nil, nil + } + + return matcher.NewSimplePatternsMatcher(f.PermitPlugin) +} + +func (f *Fluentd) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(f.Client) + if err != nil { + return nil, err + } + + return newAPIClient(client, f.Request), nil +} diff --git a/modules/freeradius/config_schema.json b/modules/freeradius/config_schema.json index b8bd25fa9..3b8554c07 100644 --- a/modules/freeradius/config_schema.json +++ b/modules/freeradius/config_schema.json @@ -1,31 +1,34 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/freeradius job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/freeradius job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "secret": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "address": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "secret": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address", + "port", + "secret" + ] }, - "required": [ - "name", - "address", - "port", - "secret" - ] + "uiSchema": {} } diff --git a/modules/freeradius/freeradius.go b/modules/freeradius/freeradius.go index 5897917cf..89fe9a63d 100644 --- a/modules/freeradius/freeradius.go +++ b/modules/freeradius/freeradius.go @@ -24,72 +24,68 @@ func init() { } func New() *FreeRADIUS { - cfg := Config{ - Address: "127.0.0.1", - Port: 18121, - Secret: "adminsecret", - Timeout: web.Duration{Duration: time.Second}, - } return &FreeRADIUS{ - Config: cfg, + Config: Config{ + Address: "127.0.0.1", + Port: 18121, + Secret: "adminsecret", + Timeout: web.Duration(time.Second), + }, } } +type Config struct { + Address string + Port int + Secret string + Timeout web.Duration +} + type ( - client interface { - Status() (*api.Status, error) - } - Config struct { - Address string - Port int - Secret string - Timeout web.Duration - } FreeRADIUS struct { module.Base Config `yaml:",inline"` client } + client interface { + Status() (*api.Status, error) + } ) -func (f FreeRADIUS) validateConfig() error { - if f.Address == "" { - return errors.New("address not set") - } - if f.Port == 0 { - return errors.New("port not set") - } - if f.Secret == "" { - return errors.New("secret not set") - } - return nil +func (f *FreeRADIUS) Configuration() any { + return f.Config } -func (f *FreeRADIUS) initClient() { +func (f *FreeRADIUS) Init() error { + if err := f.validateConfig(); err != nil { + f.Errorf("config validation: %v", err) + return err + } + f.client = api.New(api.Config{ Address: f.Address, Port: f.Port, Secret: f.Secret, - Timeout: f.Timeout.Duration, + Timeout: f.Timeout.Duration(), }) + + return nil } -func (f *FreeRADIUS) Init() bool { - err := f.validateConfig() +func (f *FreeRADIUS) Check() error { + mx, err := f.collect() if err != nil { - f.Errorf("error on validating config: %v", err) - return false + f.Error(err) + return err } + if len(mx) == 0 { + return errors.New("no metrics collected") - f.initClient() - return true -} - -func (f FreeRADIUS) Check() bool { - return len(f.Collect()) > 0 + } + return nil } -func (FreeRADIUS) Charts() *Charts { +func (f *FreeRADIUS) Charts() *Charts { return charts.Copy() } @@ -105,4 +101,4 @@ func (f *FreeRADIUS) Collect() map[string]int64 { return mx } -func (FreeRADIUS) Cleanup() {} +func (f *FreeRADIUS) Cleanup() {} diff --git a/modules/freeradius/freeradius_test.go b/modules/freeradius/freeradius_test.go index b9432ec96..79bba7002 100644 --- a/modules/freeradius/freeradius_test.go +++ b/modules/freeradius/freeradius_test.go @@ -19,42 +19,42 @@ func TestNew(t *testing.T) { func TestFreeRADIUS_Init(t *testing.T) { freeRADIUS := New() - assert.True(t, freeRADIUS.Init()) + assert.NoError(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfAddressNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Address = "" - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfPortNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Port = 0 - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfSecretNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Secret = "" - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Check(t *testing.T) { freeRADIUS := New() freeRADIUS.client = newOKMockClient() - assert.True(t, freeRADIUS.Check()) + assert.NoError(t, freeRADIUS.Check()) } func TestFreeRADIUS_Check_ReturnsFalseIfClientStatusReturnsError(t *testing.T) { freeRADIUS := New() freeRADIUS.client = newErrorMockClient() - assert.False(t, freeRADIUS.Check()) + assert.Error(t, freeRADIUS.Check()) } func TestFreeRADIUS_Charts(t *testing.T) { diff --git a/modules/freeradius/init.go b/modules/freeradius/init.go new file mode 100644 index 000000000..9c14da0ea --- /dev/null +++ b/modules/freeradius/init.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import ( + "errors" +) + +func (f *FreeRADIUS) validateConfig() error { + if f.Address == "" { + return errors.New("address not set") + } + if f.Port == 0 { + return errors.New("port not set") + } + if f.Secret == "" { + return errors.New("secret not set") + } + return nil +} diff --git a/modules/geth/config_schema.json b/modules/geth/config_schema.json index 78d3e0abb..9500f63e8 100644 --- a/modules/geth/config_schema.json +++ b/modules/geth/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/geth job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/geth job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/geth/geth.go b/modules/geth/geth.go index fe6b2bd96..16dca853a 100644 --- a/modules/geth/geth.go +++ b/modules/geth/geth.go @@ -24,68 +24,63 @@ func init() { } func New() *Geth { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:6060/debug/metrics/prometheus", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &Geth{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:6060/debug/metrics/prometheus", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &Geth{ - Config: config, charts: charts.Copy(), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } +type Config struct { + web.HTTP `yaml:",inline"` +} - Geth struct { - module.Base - Config `yaml:",inline"` +type Geth struct { + module.Base + Config `yaml:",inline"` - prom prometheus.Prometheus - charts *Charts - } -) + prom prometheus.Prometheus + charts *Charts +} -func (g Geth) validateConfig() error { - if g.URL == "" { - return errors.New("URL is not set") - } - return nil +func (g *Geth) Configuration() any { + return g.Config } -func (g *Geth) initClient() error { - client, err := web.NewHTTPClient(g.Client) +func (g *Geth) Init() error { + if err := g.validateConfig(); err != nil { + g.Errorf("error on validating config: %g", err) + return err + } + + prom, err := g.initPrometheusClient() if err != nil { + g.Error(err) return err } + g.prom = prom - g.prom = prometheus.New(client, g.Request) return nil } -func (g *Geth) Init() bool { - if err := g.validateConfig(); err != nil { - g.Errorf("error on validating config: %g", err) - return false +func (g *Geth) Check() error { + mx, err := g.collect() + if err != nil { + g.Error(err) + return err } - if err := g.initClient(); err != nil { - g.Errorf("error on initializing client: %g", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (g *Geth) Check() bool { - return len(g.Collect()) > 0 + return nil } func (g *Geth) Charts() *Charts { @@ -104,4 +99,8 @@ func (g *Geth) Collect() map[string]int64 { return mx } -func (Geth) Cleanup() {} +func (g *Geth) Cleanup() { + if g.prom != nil && g.prom.HTTPClient() != nil { + g.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/geth/init.go b/modules/geth/init.go new file mode 100644 index 000000000..bf9a81712 --- /dev/null +++ b/modules/geth/init.go @@ -0,0 +1,24 @@ +package geth + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (g *Geth) validateConfig() error { + if g.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (g *Geth) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(g.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, g.Request), nil +} diff --git a/modules/haproxy/config_schema.json b/modules/haproxy/config_schema.json index 9fa8cd111..633fd6191 100644 --- a/modules/haproxy/config_schema.json +++ b/modules/haproxy/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/haproxy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/haproxy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/haproxy/haproxy.go b/modules/haproxy/haproxy.go index ffc936711..5d18d5be3 100644 --- a/modules/haproxy/haproxy.go +++ b/modules/haproxy/haproxy.go @@ -4,6 +4,7 @@ package haproxy import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Haproxy { URL: "http://127.0.0.1:8404/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -55,24 +56,36 @@ type Haproxy struct { proxies map[string]bool } -func (h *Haproxy) Init() bool { +func (h *Haproxy) Configuration() any { + return h.Config +} + +func (h *Haproxy) Init() error { if err := h.validateConfig(); err != nil { h.Errorf("config validation: %v", err) - return false + return err } prom, err := h.initPrometheusClient() if err != nil { h.Errorf("prometheus client initialization: %v", err) - return false + return err } h.prom = prom - return true + return nil } -func (h *Haproxy) Check() bool { - return len(h.Collect()) > 0 +func (h *Haproxy) Check() error { + mx, err := h.collect() + if err != nil { + h.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (h *Haproxy) Charts() *module.Charts { @@ -80,18 +93,20 @@ func (h *Haproxy) Charts() *module.Charts { } func (h *Haproxy) Collect() map[string]int64 { - ms, err := h.collect() + mx, err := h.collect() if err != nil { h.Error(err) return nil } - if len(ms) == 0 { + if len(mx) == 0 { return nil } - return ms + return mx } -func (Haproxy) Cleanup() { - // TODO: close http idle connections +func (h *Haproxy) Cleanup() { + if h.prom != nil && h.prom.HTTPClient() != nil { + h.prom.HTTPClient().CloseIdleConnections() + } } diff --git a/modules/haproxy/haproxy_test.go b/modules/haproxy/haproxy_test.go index c881c19f3..df7c31ed3 100644 --- a/modules/haproxy/haproxy_test.go +++ b/modules/haproxy/haproxy_test.go @@ -62,9 +62,9 @@ func TestHaproxy_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -107,9 +107,9 @@ func TestHaproxy_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, h.Check()) + assert.Error(t, h.Check()) } else { - assert.True(t, h.Check()) + assert.NoError(t, h.Check()) } }) } @@ -185,7 +185,7 @@ func prepareCaseHaproxyV231Metrics(t *testing.T) (*Haproxy, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -213,7 +213,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -226,7 +226,7 @@ func prepareCase404Response(t *testing.T) (*Haproxy, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -235,7 +235,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) { t.Helper() h := New() h.URL = "http://127.0.0.1:38001" - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, func() {} } diff --git a/modules/hdfs/collect.go b/modules/hdfs/collect.go index 9879787cd..8d613e074 100644 --- a/modules/hdfs/collect.go +++ b/modules/hdfs/collect.go @@ -11,68 +11,51 @@ import ( "github.com/netdata/go.d.plugin/pkg/stm" ) -type ( - rawData map[string]json.RawMessage - rawJMX struct { - Beans []rawData +func (h *HDFS) collect() (map[string]int64, error) { + var raw rawJMX + err := h.client.doOKWithDecodeJSON(&raw) + if err != nil { + return nil, err } -) - -func (r rawJMX) isEmpty() bool { - return len(r.Beans) == 0 -} -func (r rawJMX) find(f func(rawData) bool) rawData { - for _, v := range r.Beans { - if f(v) { - return v - } + if raw.isEmpty() { + return nil, errors.New("empty response") } - return nil -} - -func (r rawJMX) findJvm() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" } - return r.find(f) -} - -func (r rawJMX) findRPCActivity() rawData { - f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") } - return r.find(f) -} - -func (r rawJMX) findFSNameSystem() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" } - return r.find(f) -} -func (r rawJMX) findFSDatasetState() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" } - return r.find(f) -} + mx := h.collectRawJMX(raw) -func (r rawJMX) findDataNodeActivity() rawData { - f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") } - return r.find(f) + return stm.ToMap(mx), nil } -func (h *HDFS) collect() (map[string]int64, error) { +func (h *HDFS) determineNodeType() (nodeType, error) { var raw rawJMX err := h.client.doOKWithDecodeJSON(&raw) if err != nil { - return nil, err + return "", err } if raw.isEmpty() { - return nil, errors.New("empty response") + return "", errors.New("empty response") } - mx := h.collectRawJMX(raw) + jvm := raw.findJvm() + if jvm == nil { + return "", errors.New("couldn't find jvm in response") + } - return stm.ToMap(mx), nil + v, ok := jvm["tag.ProcessName"] + if !ok { + return "", errors.New("couldn't find process name in JvmMetrics") + } + + t := nodeType(strings.Trim(string(v), "\"")) + if t == nameNodeType || t == dataNodeType { + return t, nil + } + return "", errors.New("unknown node type") } -func (h HDFS) collectRawJMX(raw rawJMX) *metrics { +func (h *HDFS) collectRawJMX(raw rawJMX) *metrics { var mx metrics switch h.nodeType { default: @@ -85,7 +68,7 @@ func (h HDFS) collectRawJMX(raw rawJMX) *metrics { return &mx } -func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) { +func (h *HDFS) collectNameNode(mx *metrics, raw rawJMX) { err := h.collectJVM(mx, raw) if err != nil { h.Debugf("error on collecting jvm : %v", err) @@ -102,7 +85,7 @@ func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) { } } -func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) { +func (h *HDFS) collectDataNode(mx *metrics, raw rawJMX) { err := h.collectJVM(mx, raw) if err != nil { h.Debugf("error on collecting jvm : %v", err) @@ -124,7 +107,7 @@ func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) { } } -func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectJVM(mx *metrics, raw rawJMX) error { v := raw.findJvm() if v == nil { return nil @@ -140,7 +123,7 @@ func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { v := raw.findRPCActivity() if v == nil { return nil @@ -156,7 +139,7 @@ func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { v := raw.findFSNameSystem() if v == nil { return nil @@ -174,7 +157,7 @@ func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { v := raw.findFSDatasetState() if v == nil { return nil @@ -193,7 +176,7 @@ func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { v := raw.findDataNodeActivity() if v == nil { return nil diff --git a/modules/hdfs/config_schema.json b/modules/hdfs/config_schema.json index 483c49301..6310f60a0 100644 --- a/modules/hdfs/config_schema.json +++ b/modules/hdfs/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/hdfs job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/hdfs job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/hdfs/hdfs.go b/modules/hdfs/hdfs.go index aa0b2efe2..ca33fdb57 100644 --- a/modules/hdfs/hdfs.go +++ b/modules/hdfs/hdfs.go @@ -5,7 +5,6 @@ package hdfs import ( _ "embed" "errors" - "strings" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -31,7 +30,8 @@ func New() *HDFS { URL: "http://127.0.0.1:50070/jmx", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}}, + Timeout: web.Duration(time.Second), + }, }, } @@ -61,72 +61,49 @@ type HDFS struct { client *client } -// Cleanup makes cleanup. -func (HDFS) Cleanup() {} - -func (h HDFS) createClient() (*client, error) { - httpClient, err := web.NewHTTPClient(h.Client) - if err != nil { - return nil, err - } - - return newClient(httpClient, h.Request), nil +func (h *HDFS) Configuration() any { + return h.Config } -func (h HDFS) determineNodeType() (nodeType, error) { - var raw rawJMX - err := h.client.doOKWithDecodeJSON(&raw) - if err != nil { - return "", err - } - - if raw.isEmpty() { - return "", errors.New("empty response") - } - - jvm := raw.findJvm() - if jvm == nil { - return "", errors.New("couldn't find jvm in response") - } - - v, ok := jvm["tag.ProcessName"] - if !ok { - return "", errors.New("couldn't find process name in JvmMetrics") - } - - t := nodeType(strings.Trim(string(v), "\"")) - if t == nameNodeType || t == dataNodeType { - return t, nil +// Init makes initialization. +func (h *HDFS) Init() error { + if err := h.validateConfig(); err != nil { + h.Errorf("config validation: %v", err) + return err } - return "", errors.New("unknown node type") -} -// Init makes initialization. -func (h *HDFS) Init() bool { cl, err := h.createClient() if err != nil { h.Errorf("error on creating client : %v", err) - return false + return err } h.client = cl - return true + return nil } // Check makes check. -func (h *HDFS) Check() bool { - t, err := h.determineNodeType() +func (h *HDFS) Check() error { + typ, err := h.determineNodeType() if err != nil { h.Errorf("error on node type determination : %v", err) - return false + return err } - h.nodeType = t + h.nodeType = typ - return len(h.Collect()) > 0 + mx, err := h.collect() + if err != nil { + h.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts returns Charts. -func (h HDFS) Charts() *Charts { +func (h *HDFS) Charts() *Charts { switch h.nodeType { default: return nil @@ -151,3 +128,10 @@ func (h *HDFS) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (h *HDFS) Cleanup() { + if h.client != nil && h.client.httpClient != nil { + h.client.httpClient.CloseIdleConnections() + } +} diff --git a/modules/hdfs/hdfs_test.go b/modules/hdfs/hdfs_test.go index dc5b7cf0e..1870c1f0a 100644 --- a/modules/hdfs/hdfs_test.go +++ b/modules/hdfs/hdfs_test.go @@ -32,14 +32,14 @@ func TestNew(t *testing.T) { func TestHDFS_Init(t *testing.T) { job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestHDFS_Check(t *testing.T) { @@ -52,9 +52,9 @@ func TestHDFS_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.NotZero(t, job.nodeType) } @@ -68,9 +68,9 @@ func TestHDFS_CheckDataNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.Equal(t, dataNodeType, job.nodeType) } @@ -84,9 +84,9 @@ func TestHDFS_CheckNameNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.Equal(t, nameNodeType, job.nodeType) } @@ -100,17 +100,17 @@ func TestHDFS_CheckErrorOnNodeTypeDetermination(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestHDFS_CheckNoResponse(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/jmx" - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestHDFS_Charts(t *testing.T) { @@ -151,8 +151,8 @@ func TestHDFS_CollectDataNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "dna_bytes_read": 80689178, @@ -203,8 +203,8 @@ func TestHDFS_CollectNameNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "fsns_blocks_total": 15, @@ -262,7 +262,7 @@ func TestHDFS_CollectUnknownNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Panics(t, func() { _ = job.Collect() }) } @@ -270,7 +270,7 @@ func TestHDFS_CollectUnknownNode(t *testing.T) { func TestHDFS_CollectNoResponse(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/jmx" - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } @@ -285,7 +285,7 @@ func TestHDFS_CollectReceiveInvalidResponse(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } @@ -300,7 +300,7 @@ func TestHDFS_CollectReceive404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } diff --git a/modules/hdfs/init.go b/modules/hdfs/init.go new file mode 100644 index 000000000..2fbcfc32d --- /dev/null +++ b/modules/hdfs/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (h *HDFS) validateConfig() error { + if h.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (h *HDFS) createClient() (*client, error) { + httpClient, err := web.NewHTTPClient(h.Client) + if err != nil { + return nil, err + } + + return newClient(httpClient, h.Request), nil +} diff --git a/modules/hdfs/raw_data.go b/modules/hdfs/raw_data.go new file mode 100644 index 000000000..ab434ae17 --- /dev/null +++ b/modules/hdfs/raw_data.go @@ -0,0 +1,51 @@ +package hdfs + +import ( + "encoding/json" + "strings" +) + +type ( + rawData map[string]json.RawMessage + rawJMX struct { + Beans []rawData + } +) + +func (r rawJMX) isEmpty() bool { + return len(r.Beans) == 0 +} + +func (r rawJMX) find(f func(rawData) bool) rawData { + for _, v := range r.Beans { + if f(v) { + return v + } + } + return nil +} + +func (r rawJMX) findJvm() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" } + return r.find(f) +} + +func (r rawJMX) findRPCActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") } + return r.find(f) +} + +func (r rawJMX) findFSNameSystem() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" } + return r.find(f) +} + +func (r rawJMX) findFSDatasetState() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" } + return r.find(f) +} + +func (r rawJMX) findDataNodeActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") } + return r.find(f) +} diff --git a/modules/httpcheck/config_schema.json b/modules/httpcheck/config_schema.json index d344853f7..638d45bb5 100644 --- a/modules/httpcheck/config_schema.json +++ b/modules/httpcheck/config_schema.json @@ -1,71 +1,74 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/httpcheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "accepted_statuses": { - "type": "array", - "items": { - "type": "integer" - } - }, - "response_match": { - "type": "string" - }, - "cookie_file": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/httpcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "accepted_statuses": { + "type": "array", + "items": { + "type": "integer" + } + }, + "response_match": { "type": "string" + }, + "cookie_file": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/httpcheck/httpcheck.go b/modules/httpcheck/httpcheck.go index abb2c821e..82ed0c7a9 100644 --- a/modules/httpcheck/httpcheck.go +++ b/modules/httpcheck/httpcheck.go @@ -4,6 +4,7 @@ package httpcheck import ( _ "embed" + "errors" "net/http" "regexp" "time" @@ -31,7 +32,7 @@ func New() *HTTPCheck { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, AcceptedStatuses: []int{200}, @@ -73,10 +74,14 @@ type HTTPCheck struct { metrics metrics } -func (hc *HTTPCheck) Init() bool { +func (hc *HTTPCheck) Configuration() any { + return hc.Config +} + +func (hc *HTTPCheck) Init() error { if err := hc.validateConfig(); err != nil { hc.Errorf("config validation: %v", err) - return false + return err } hc.charts = hc.initCharts() @@ -84,21 +89,21 @@ func (hc *HTTPCheck) Init() bool { httpClient, err := hc.initHTTPClient() if err != nil { hc.Errorf("init HTTP client: %v", err) - return false + return err } hc.httpClient = httpClient re, err := hc.initResponseMatchRegexp() if err != nil { hc.Errorf("init response match regexp: %v", err) - return false + return err } hc.reResponse = re hm, err := hc.initHeaderMatch() if err != nil { hc.Errorf("init header match: %v", err) - return false + return err } hc.headerMatch = hm @@ -107,17 +112,25 @@ func (hc *HTTPCheck) Init() bool { } hc.Debugf("using URL %s", hc.URL) - hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration) + hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration()) hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses) if hc.reResponse != nil { hc.Debugf("using response match regexp %s", hc.reResponse) } - return true + return nil } -func (hc *HTTPCheck) Check() bool { - return len(hc.Collect()) > 0 +func (hc *HTTPCheck) Check() error { + mx, err := hc.collect() + if err != nil { + hc.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (hc *HTTPCheck) Charts() *module.Charts { diff --git a/modules/httpcheck/httpcheck_test.go b/modules/httpcheck/httpcheck_test.go index 9d866e093..6c6f99243 100644 --- a/modules/httpcheck/httpcheck_test.go +++ b/modules/httpcheck/httpcheck_test.go @@ -56,9 +56,9 @@ func TestHTTPCheck_Init(t *testing.T) { httpCheck.Config = test.config if test.wantFail { - assert.False(t, httpCheck.Init()) + assert.Error(t, httpCheck.Init()) } else { - assert.True(t, httpCheck.Init()) + assert.NoError(t, httpCheck.Init()) } }) } @@ -80,7 +80,7 @@ func TestHTTPCheck_Charts(t *testing.T) { prepare: func(t *testing.T) *HTTPCheck { httpCheck := New() httpCheck.URL = "http://127.0.0.1:38001" - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) return httpCheck }, @@ -105,7 +105,7 @@ func TestHTTPCheck_Cleanup(t *testing.T) { assert.NotPanics(t, httpCheck.Cleanup) httpCheck.URL = "http://127.0.0.1:38001" - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) assert.NotPanics(t, httpCheck.Cleanup) } @@ -129,12 +129,12 @@ func TestHTTPCheck_Check(t *testing.T) { httpCheck, cleanup := test.prepare() defer cleanup() - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) if test.wantFail { - assert.False(t, httpCheck.Check()) + assert.Error(t, httpCheck.Check()) } else { - assert.True(t, httpCheck.Check()) + assert.NoError(t, httpCheck.Check()) } }) } @@ -438,7 +438,7 @@ func TestHTTPCheck_Collect(t *testing.T) { test.update(httpCheck) } - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) var mx map[string]int64 @@ -475,11 +475,11 @@ func prepareSuccessCase() (*HTTPCheck, func()) { func prepareTimeoutCase() (*HTTPCheck, func()) { httpCheck := New() httpCheck.UpdateEvery = 1 - httpCheck.Timeout.Duration = time.Millisecond * 100 + httpCheck.Timeout = web.Duration(time.Millisecond * 100) srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - time.Sleep(httpCheck.Timeout.Duration + time.Millisecond*100) + time.Sleep(httpCheck.Timeout.Duration() + time.Millisecond*100) })) httpCheck.URL = srv.URL diff --git a/modules/init.go b/modules/init.go index 9e44cf98a..44688d494 100644 --- a/modules/init.go +++ b/modules/init.go @@ -3,84 +3,84 @@ package modules import ( - _ "github.com/netdata/go.d.plugin/modules/activemq" - _ "github.com/netdata/go.d.plugin/modules/apache" - _ "github.com/netdata/go.d.plugin/modules/bind" - _ "github.com/netdata/go.d.plugin/modules/cassandra" - _ "github.com/netdata/go.d.plugin/modules/chrony" - _ "github.com/netdata/go.d.plugin/modules/cockroachdb" - _ "github.com/netdata/go.d.plugin/modules/consul" - _ "github.com/netdata/go.d.plugin/modules/coredns" - _ "github.com/netdata/go.d.plugin/modules/couchbase" - _ "github.com/netdata/go.d.plugin/modules/couchdb" - _ "github.com/netdata/go.d.plugin/modules/dnsdist" - _ "github.com/netdata/go.d.plugin/modules/dnsmasq" - _ "github.com/netdata/go.d.plugin/modules/dnsmasq_dhcp" - _ "github.com/netdata/go.d.plugin/modules/dnsquery" - _ "github.com/netdata/go.d.plugin/modules/docker" - _ "github.com/netdata/go.d.plugin/modules/docker_engine" - _ "github.com/netdata/go.d.plugin/modules/dockerhub" - _ "github.com/netdata/go.d.plugin/modules/elasticsearch" - _ "github.com/netdata/go.d.plugin/modules/energid" - _ "github.com/netdata/go.d.plugin/modules/envoy" + //_ "github.com/netdata/go.d.plugin/modules/activemq" + //_ "github.com/netdata/go.d.plugin/modules/apache" + //_ "github.com/netdata/go.d.plugin/modules/bind" + //_ "github.com/netdata/go.d.plugin/modules/cassandra" + //_ "github.com/netdata/go.d.plugin/modules/chrony" + //_ "github.com/netdata/go.d.plugin/modules/cockroachdb" + //_ "github.com/netdata/go.d.plugin/modules/consul" + //_ "github.com/netdata/go.d.plugin/modules/coredns" + //_ "github.com/netdata/go.d.plugin/modules/couchbase" + //_ "github.com/netdata/go.d.plugin/modules/couchdb" + //_ "github.com/netdata/go.d.plugin/modules/dnsdist" + //_ "github.com/netdata/go.d.plugin/modules/dnsmasq" + //_ "github.com/netdata/go.d.plugin/modules/dnsmasq_dhcp" + //_ "github.com/netdata/go.d.plugin/modules/dnsquery" + //_ "github.com/netdata/go.d.plugin/modules/docker" + //_ "github.com/netdata/go.d.plugin/modules/docker_engine" + //_ "github.com/netdata/go.d.plugin/modules/dockerhub" + //_ "github.com/netdata/go.d.plugin/modules/elasticsearch" + //_ "github.com/netdata/go.d.plugin/modules/energid" + //_ "github.com/netdata/go.d.plugin/modules/envoy" _ "github.com/netdata/go.d.plugin/modules/example" - _ "github.com/netdata/go.d.plugin/modules/filecheck" - _ "github.com/netdata/go.d.plugin/modules/fluentd" - _ "github.com/netdata/go.d.plugin/modules/freeradius" - _ "github.com/netdata/go.d.plugin/modules/geth" - _ "github.com/netdata/go.d.plugin/modules/haproxy" - _ "github.com/netdata/go.d.plugin/modules/hdfs" - _ "github.com/netdata/go.d.plugin/modules/httpcheck" - _ "github.com/netdata/go.d.plugin/modules/isc_dhcpd" - _ "github.com/netdata/go.d.plugin/modules/k8s_kubelet" - _ "github.com/netdata/go.d.plugin/modules/k8s_kubeproxy" - _ "github.com/netdata/go.d.plugin/modules/k8s_state" - _ "github.com/netdata/go.d.plugin/modules/lighttpd" - _ "github.com/netdata/go.d.plugin/modules/logind" - _ "github.com/netdata/go.d.plugin/modules/logstash" - _ "github.com/netdata/go.d.plugin/modules/mongodb" - _ "github.com/netdata/go.d.plugin/modules/mysql" + //_ "github.com/netdata/go.d.plugin/modules/filecheck" + //_ "github.com/netdata/go.d.plugin/modules/fluentd" + //_ "github.com/netdata/go.d.plugin/modules/freeradius" + //_ "github.com/netdata/go.d.plugin/modules/geth" + //_ "github.com/netdata/go.d.plugin/modules/haproxy" + //_ "github.com/netdata/go.d.plugin/modules/hdfs" + //_ "github.com/netdata/go.d.plugin/modules/httpcheck" + //_ "github.com/netdata/go.d.plugin/modules/isc_dhcpd" + //_ "github.com/netdata/go.d.plugin/modules/k8s_kubelet" + //_ "github.com/netdata/go.d.plugin/modules/k8s_kubeproxy" + //_ "github.com/netdata/go.d.plugin/modules/k8s_state" + //_ "github.com/netdata/go.d.plugin/modules/lighttpd" + //_ "github.com/netdata/go.d.plugin/modules/logind" + //_ "github.com/netdata/go.d.plugin/modules/logstash" + //_ "github.com/netdata/go.d.plugin/modules/mongodb" + //_ "github.com/netdata/go.d.plugin/modules/mysql" _ "github.com/netdata/go.d.plugin/modules/nginx" - _ "github.com/netdata/go.d.plugin/modules/nginxplus" - _ "github.com/netdata/go.d.plugin/modules/nginxvts" - _ "github.com/netdata/go.d.plugin/modules/ntpd" - _ "github.com/netdata/go.d.plugin/modules/nvidia_smi" - _ "github.com/netdata/go.d.plugin/modules/nvme" - _ "github.com/netdata/go.d.plugin/modules/openvpn" - _ "github.com/netdata/go.d.plugin/modules/openvpn_status_log" - _ "github.com/netdata/go.d.plugin/modules/pgbouncer" - _ "github.com/netdata/go.d.plugin/modules/phpdaemon" - _ "github.com/netdata/go.d.plugin/modules/phpfpm" - _ "github.com/netdata/go.d.plugin/modules/pihole" - _ "github.com/netdata/go.d.plugin/modules/pika" + //_ "github.com/netdata/go.d.plugin/modules/nginxplus" + //_ "github.com/netdata/go.d.plugin/modules/nginxvts" + //_ "github.com/netdata/go.d.plugin/modules/ntpd" + //_ "github.com/netdata/go.d.plugin/modules/nvidia_smi" + //_ "github.com/netdata/go.d.plugin/modules/nvme" + //_ "github.com/netdata/go.d.plugin/modules/openvpn" + //_ "github.com/netdata/go.d.plugin/modules/openvpn_status_log" + //_ "github.com/netdata/go.d.plugin/modules/pgbouncer" + //_ "github.com/netdata/go.d.plugin/modules/phpdaemon" + //_ "github.com/netdata/go.d.plugin/modules/phpfpm" + //_ "github.com/netdata/go.d.plugin/modules/pihole" + //_ "github.com/netdata/go.d.plugin/modules/pika" _ "github.com/netdata/go.d.plugin/modules/ping" - _ "github.com/netdata/go.d.plugin/modules/portcheck" - _ "github.com/netdata/go.d.plugin/modules/postgres" - _ "github.com/netdata/go.d.plugin/modules/powerdns" - _ "github.com/netdata/go.d.plugin/modules/powerdns_recursor" - _ "github.com/netdata/go.d.plugin/modules/prometheus" - _ "github.com/netdata/go.d.plugin/modules/proxysql" - _ "github.com/netdata/go.d.plugin/modules/pulsar" - _ "github.com/netdata/go.d.plugin/modules/rabbitmq" - _ "github.com/netdata/go.d.plugin/modules/redis" - _ "github.com/netdata/go.d.plugin/modules/scaleio" - _ "github.com/netdata/go.d.plugin/modules/snmp" - _ "github.com/netdata/go.d.plugin/modules/solr" - _ "github.com/netdata/go.d.plugin/modules/springboot2" - _ "github.com/netdata/go.d.plugin/modules/squidlog" - _ "github.com/netdata/go.d.plugin/modules/supervisord" - _ "github.com/netdata/go.d.plugin/modules/systemdunits" - _ "github.com/netdata/go.d.plugin/modules/tengine" - _ "github.com/netdata/go.d.plugin/modules/traefik" - _ "github.com/netdata/go.d.plugin/modules/unbound" - _ "github.com/netdata/go.d.plugin/modules/upsd" - _ "github.com/netdata/go.d.plugin/modules/vcsa" - _ "github.com/netdata/go.d.plugin/modules/vernemq" - _ "github.com/netdata/go.d.plugin/modules/vsphere" - _ "github.com/netdata/go.d.plugin/modules/weblog" - _ "github.com/netdata/go.d.plugin/modules/whoisquery" - _ "github.com/netdata/go.d.plugin/modules/windows" - _ "github.com/netdata/go.d.plugin/modules/wireguard" - _ "github.com/netdata/go.d.plugin/modules/x509check" - _ "github.com/netdata/go.d.plugin/modules/zookeeper" + //_ "github.com/netdata/go.d.plugin/modules/portcheck" + //_ "github.com/netdata/go.d.plugin/modules/postgres" + //_ "github.com/netdata/go.d.plugin/modules/powerdns" + //_ "github.com/netdata/go.d.plugin/modules/powerdns_recursor" + //_ "github.com/netdata/go.d.plugin/modules/prometheus" + //_ "github.com/netdata/go.d.plugin/modules/proxysql" + //_ "github.com/netdata/go.d.plugin/modules/pulsar" + //_ "github.com/netdata/go.d.plugin/modules/rabbitmq" + //_ "github.com/netdata/go.d.plugin/modules/redis" + //_ "github.com/netdata/go.d.plugin/modules/scaleio" + //_ "github.com/netdata/go.d.plugin/modules/snmp" + //_ "github.com/netdata/go.d.plugin/modules/solr" + //_ "github.com/netdata/go.d.plugin/modules/springboot2" + //_ "github.com/netdata/go.d.plugin/modules/squidlog" + //_ "github.com/netdata/go.d.plugin/modules/supervisord" + //_ "github.com/netdata/go.d.plugin/modules/systemdunits" + //_ "github.com/netdata/go.d.plugin/modules/tengine" + //_ "github.com/netdata/go.d.plugin/modules/traefik" + //_ "github.com/netdata/go.d.plugin/modules/unbound" + //_ "github.com/netdata/go.d.plugin/modules/upsd" + //_ "github.com/netdata/go.d.plugin/modules/vcsa" + //_ "github.com/netdata/go.d.plugin/modules/vernemq" + //_ "github.com/netdata/go.d.plugin/modules/vsphere" + //_ "github.com/netdata/go.d.plugin/modules/weblog" + //_ "github.com/netdata/go.d.plugin/modules/whoisquery" + //_ "github.com/netdata/go.d.plugin/modules/windows" + //_ "github.com/netdata/go.d.plugin/modules/wireguard" + //_ "github.com/netdata/go.d.plugin/modules/x509check" + //_ "github.com/netdata/go.d.plugin/modules/zookeeper" ) diff --git a/modules/isc_dhcpd/config_schema.json b/modules/isc_dhcpd/config_schema.json index ed860cbeb..cddba5ab6 100644 --- a/modules/isc_dhcpd/config_schema.json +++ b/modules/isc_dhcpd/config_schema.json @@ -1,36 +1,39 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/isc_dhcpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "leases_path": { - "type": "string" - }, - "pools": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/isc_dhcpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "leases_path": { + "type": "string" + }, + "pools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "networks": { + "type": "string" + } }, - "networks": { - "type": "string" - } - }, - "required": [ - "name", - "networks" - ] + "required": [ + "name", + "networks" + ] + } } - } + }, + "required": [ + "name", + "leases_path", + "pools" + ] }, - "required": [ - "name", - "leases_path", - "pools" - ] + "uiSchema": {} } diff --git a/modules/isc_dhcpd/init.go b/modules/isc_dhcpd/init.go index 847a4590b..de26499b0 100644 --- a/modules/isc_dhcpd/init.go +++ b/modules/isc_dhcpd/init.go @@ -15,7 +15,7 @@ type ipPool struct { addresses iprange.Pool } -func (d DHCPd) validateConfig() error { +func (d *DHCPd) validateConfig() error { if d.Config.LeasesPath == "" { return errors.New("'lease_path' parameter not set") } @@ -33,7 +33,7 @@ func (d DHCPd) validateConfig() error { return nil } -func (d DHCPd) initPools() ([]ipPool, error) { +func (d *DHCPd) initPools() ([]ipPool, error) { var pools []ipPool for i, cfg := range d.Pools { rs, err := iprange.ParseRanges(cfg.Networks) @@ -50,7 +50,7 @@ func (d DHCPd) initPools() ([]ipPool, error) { return pools, nil } -func (d DHCPd) initCharts(pools []ipPool) (*module.Charts, error) { +func (d *DHCPd) initCharts(pools []ipPool) (*module.Charts, error) { charts := &module.Charts{} if err := charts.Add(activeLeasesTotalChart.Copy()); err != nil { diff --git a/modules/isc_dhcpd/isc_dhcpd.go b/modules/isc_dhcpd/isc_dhcpd.go index e1f4e5764..72ceaca2c 100644 --- a/modules/isc_dhcpd/isc_dhcpd.go +++ b/modules/isc_dhcpd/isc_dhcpd.go @@ -4,6 +4,7 @@ package isc_dhcpd import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -22,6 +23,16 @@ func init() { }) } +func New() *DHCPd { + return &DHCPd{ + Config: Config{ + LeasesPath: "/var/lib/dhcp/dhcpd.leases", + }, + + collected: make(map[string]int64), + } +} + type ( Config struct { LeasesPath string `yaml:"leases_path"` @@ -43,46 +54,47 @@ type DHCPd struct { collected map[string]int64 } -func New() *DHCPd { - return &DHCPd{ - Config: Config{ - LeasesPath: "/var/lib/dhcp/dhcpd.leases", - }, - - collected: make(map[string]int64), - } +func (d *DHCPd) Configuration() any { + return d.Config } -func (DHCPd) Cleanup() {} - -func (d *DHCPd) Init() bool { +func (d *DHCPd) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } pools, err := d.initPools() if err != nil { d.Errorf("ip pools init: %v", err) - return false + return err } d.pools = pools charts, err := d.initCharts(pools) if err != nil { d.Errorf("charts init: %v", err) - return false + return err } d.charts = charts d.Debugf("monitoring leases file: %v", d.Config.LeasesPath) d.Debugf("monitoring ip pools: %v", d.Config.Pools) - return true + + return nil } -func (d *DHCPd) Check() bool { - return len(d.Collect()) > 0 +func (d *DHCPd) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (d *DHCPd) Charts() *module.Charts { @@ -101,3 +113,5 @@ func (d *DHCPd) Collect() map[string]int64 { return mx } + +func (d *DHCPd) Cleanup() {} diff --git a/modules/isc_dhcpd/isc_dhcpd_test.go b/modules/isc_dhcpd/isc_dhcpd_test.go index 72980e469..ab03f3a0b 100644 --- a/modules/isc_dhcpd/isc_dhcpd_test.go +++ b/modules/isc_dhcpd/isc_dhcpd_test.go @@ -67,9 +67,9 @@ func TestDHCPd_Init(t *testing.T) { dhcpd.Config = test.config if test.wantFail { - assert.False(t, dhcpd.Init()) + assert.Error(t, dhcpd.Init()) } else { - assert.True(t, dhcpd.Init()) + assert.NoError(t, dhcpd.Init()) } }) } @@ -91,12 +91,12 @@ func TestDHCPd_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dhcpd := test.prepare() - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) if test.wantFail { - assert.False(t, dhcpd.Check()) + assert.Error(t, dhcpd.Check()) } else { - assert.True(t, dhcpd.Check()) + assert.NoError(t, dhcpd.Check()) } }) } @@ -108,7 +108,7 @@ func TestDHCPd_Charts(t *testing.T) { dhcpd.Pools = []PoolConfig{ {Name: "name", Networks: "192.0.2.0/24"}, } - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) assert.NotNil(t, dhcpd.Charts()) } @@ -209,7 +209,7 @@ func TestDHCPd_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dhcpd := test.prepare() - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) collected := dhcpd.Collect() diff --git a/modules/k8s_kubelet/config_schema.json b/modules/k8s_kubelet/config_schema.json index 6e42187f2..2d3c84daa 100644 --- a/modules/k8s_kubelet/config_schema.json +++ b/modules/k8s_kubelet/config_schema.json @@ -1,62 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_kubelet job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "token_path": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_kubelet job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "token_path": { + "type": "string" + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/k8s_kubelet/init.go b/modules/k8s_kubelet/init.go new file mode 100644 index 000000000..f9fcda8ce --- /dev/null +++ b/modules/k8s_kubelet/init.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + "errors" + "os" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (k *Kubelet) validateConfig() error { + if k.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (k *Kubelet) initAuthToken() string { + bs, err := os.ReadFile(k.TokenPath) + if err != nil { + k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err) + } + return string(bs) +} + +func (k *Kubelet) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(k.Client) + if err != nil { + return nil, err + } + + return prometheus.New(httpClient, k.Request), nil +} diff --git a/modules/k8s_kubelet/kubelet.go b/modules/k8s_kubelet/kubelet.go index 7f62c9f30..cb963ce7e 100644 --- a/modules/k8s_kubelet/kubelet.go +++ b/modules/k8s_kubelet/kubelet.go @@ -4,7 +4,7 @@ package k8s_kubelet import ( _ "embed" - "os" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/prometheus" @@ -29,72 +29,80 @@ func init() { // New creates Kubelet with default values. func New() *Kubelet { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:10255/metrics", - Headers: make(map[string]string), - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &Kubelet{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:10255/metrics", + Headers: make(map[string]string), + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, + TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", }, - TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", - } - return &Kubelet{ - Config: config, charts: charts.Copy(), collectedVMPlugins: make(map[string]bool), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - TokenPath string `yaml:"token_path"` - } +type Config struct { + web.HTTP `yaml:",inline"` + TokenPath string `yaml:"token_path"` +} - Kubelet struct { - module.Base - Config `yaml:",inline"` +type Kubelet struct { + module.Base + Config `yaml:",inline"` - prom prometheus.Prometheus - charts *Charts - // volume_manager_total_volumes - collectedVMPlugins map[string]bool - } -) + prom prometheus.Prometheus + charts *Charts + // volume_manager_total_volumes + collectedVMPlugins map[string]bool +} -// Cleanup makes cleanup. -func (Kubelet) Cleanup() {} +func (k *Kubelet) Configuration() any { + return k.Config +} // Init makes initialization. -func (k *Kubelet) Init() bool { - b, err := os.ReadFile(k.TokenPath) - if err != nil { - k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err) - } else { - k.Request.Headers["Authorization"] = "Bearer " + string(b) +func (k *Kubelet) Init() error { + if err := k.validateConfig(); err != nil { + k.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(k.Client) + prom, err := k.initPrometheusClient() if err != nil { - k.Errorf("error on creating http client: %v", err) - return false + k.Error(err) + return err + } + k.prom = prom + + if tok := k.initAuthToken(); tok != "" { + k.Request.Headers["Authorization"] = "Bearer " + tok } - k.prom = prometheus.New(client, k.Request) - return true + return nil } // Check makes check. -func (k *Kubelet) Check() bool { - return len(k.Collect()) > 0 +func (k *Kubelet) Check() error { + mx, err := k.collect() + if err != nil { + k.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (k Kubelet) Charts() *Charts { +func (k *Kubelet) Charts() *Charts { return k.charts } @@ -109,3 +117,10 @@ func (k *Kubelet) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (k *Kubelet) Cleanup() { + if k.prom != nil && k.prom.HTTPClient() != nil { + k.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/k8s_kubelet/kubelet_test.go b/modules/k8s_kubelet/kubelet_test.go index a69a0724b..42331b891 100644 --- a/modules/k8s_kubelet/kubelet_test.go +++ b/modules/k8s_kubelet/kubelet_test.go @@ -37,14 +37,14 @@ func TestKubelet_Cleanup(t *testing.T) { } func TestKubelet_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) { job := New() job.TokenPath = "testdata/token.txt" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.Equal(t, "Bearer "+string(testTokenData), job.Request.Headers["Authorization"]) } @@ -52,7 +52,7 @@ func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestKubelet_Check(t *testing.T) { @@ -65,15 +65,15 @@ func TestKubelet_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestKubelet_Check_ConnectionRefused(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubelet_Collect(t *testing.T) { @@ -86,8 +86,8 @@ func TestKubelet_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "apiserver_audit_requests_rejected_total": 0, @@ -185,8 +185,8 @@ func TestKubelet_Collect_ReceiveInvalidResponse(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubelet_Collect_Receive404(t *testing.T) { @@ -199,6 +199,6 @@ func TestKubelet_Collect_Receive404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/k8s_kubeproxy/config_schema.json b/modules/k8s_kubeproxy/config_schema.json index c26231397..810d65811 100644 --- a/modules/k8s_kubeproxy/config_schema.json +++ b/modules/k8s_kubeproxy/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_kubeproxy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_kubeproxy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/k8s_kubeproxy/init.go b/modules/k8s_kubeproxy/init.go new file mode 100644 index 000000000..39b46d353 --- /dev/null +++ b/modules/k8s_kubeproxy/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (kp *KubeProxy) validateConfig() error { + if kp.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (kp *KubeProxy) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(kp.Client) + if err != nil { + return nil, err + } + + return prometheus.New(httpClient, kp.Request), nil +} diff --git a/modules/k8s_kubeproxy/kubeproxy.go b/modules/k8s_kubeproxy/kubeproxy.go index a681619c4..f3fe27d84 100644 --- a/modules/k8s_kubeproxy/kubeproxy.go +++ b/modules/k8s_kubeproxy/kubeproxy.go @@ -4,17 +4,12 @@ package k8s_kubeproxy import ( _ "embed" + "errors" "time" + "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/prometheus" "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -const ( - defaultURL = "http://127.0.0.1:10249/metrics" - defaultHTTPTimeout = time.Second * 2 ) //go:embed "config_schema.json" @@ -33,18 +28,17 @@ func init() { // New creates KubeProxy with default values. func New() *KubeProxy { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &KubeProxy{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:10249/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, - } - return &KubeProxy{ - Config: config, charts: charts.Copy(), } } @@ -63,34 +57,42 @@ type KubeProxy struct { charts *Charts } -// Cleanup makes cleanup. -func (KubeProxy) Cleanup() {} +func (kp *KubeProxy) Configuration() any { + return kp.Config +} // Init makes initialization. -func (kp *KubeProxy) Init() bool { - if kp.URL == "" { - kp.Error("URL not set") - return false +func (kp *KubeProxy) Init() error { + if err := kp.validateConfig(); err != nil { + kp.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(kp.Client) + prom, err := kp.initPrometheusClient() if err != nil { - kp.Errorf("error on creating http client : %v", err) - return false + kp.Error(err) + return err } + kp.prom = prom - kp.prom = prometheus.New(client, kp.Request) - - return true + return nil } // Check makes check. -func (kp *KubeProxy) Check() bool { - return len(kp.Collect()) > 0 +func (kp *KubeProxy) Check() error { + mx, err := kp.collect() + if err != nil { + kp.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (kp KubeProxy) Charts() *Charts { +func (kp *KubeProxy) Charts() *Charts { return kp.charts } @@ -105,3 +107,10 @@ func (kp *KubeProxy) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (kp *KubeProxy) Cleanup() { + if kp.prom != nil && kp.prom.HTTPClient() != nil { + kp.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/k8s_kubeproxy/kubeproxy_test.go b/modules/k8s_kubeproxy/kubeproxy_test.go index 4c1831a99..f2346055e 100644 --- a/modules/k8s_kubeproxy/kubeproxy_test.go +++ b/modules/k8s_kubeproxy/kubeproxy_test.go @@ -14,24 +14,22 @@ import ( var testMetrics, _ = os.ReadFile("testdata/metrics.txt") -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*KubeProxy)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestKubeProxy_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) } -func TestKubeProxy_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } - -func TestKubeProxy_Cleanup(t *testing.T) { New().Cleanup() } +func TestKubeProxy_Cleanup(t *testing.T) { + New().Cleanup() +} -func TestKubeProxy_Init(t *testing.T) { assert.True(t, New().Init()) } +func TestKubeProxy_Init(t *testing.T) { + assert.NoError(t, New().Init()) +} func TestKubeProxy_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestKubeProxy_Check(t *testing.T) { @@ -44,15 +42,15 @@ func TestKubeProxy_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestKubeProxy_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubeProxy_Collect(t *testing.T) { @@ -65,8 +63,8 @@ func TestKubeProxy_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "sync_proxy_rules_count": 2669, @@ -108,8 +106,8 @@ func TestKubeProxy_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubeProxy_404(t *testing.T) { @@ -122,6 +120,6 @@ func TestKubeProxy_404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/k8s_state/config_schema.json b/modules/k8s_state/config_schema.json index 42b6b0fd6..9f5124b98 100644 --- a/modules/k8s_state/config_schema.json +++ b/modules/k8s_state/config_schema.json @@ -1,13 +1,16 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_state job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_state job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": {} } diff --git a/modules/k8s_state/kube_state.go b/modules/k8s_state/kube_state.go index 3a3046e47..1b0ecb93e 100644 --- a/modules/k8s_state/kube_state.go +++ b/modules/k8s_state/kube_state.go @@ -5,6 +5,8 @@ package k8s_state import ( "context" _ "embed" + "errors" + "fmt" "sync" "time" @@ -38,12 +40,6 @@ func New() *KubeState { } type ( - discoverer interface { - run(ctx context.Context, in chan<- resource) - ready() bool - stopped() bool - } - KubeState struct { module.Base @@ -65,13 +61,23 @@ type ( kubeClusterID string kubeClusterName string } + discoverer interface { + run(ctx context.Context, in chan<- resource) + ready() bool + stopped() bool + } ) -func (ks *KubeState) Init() bool { +func (ks *KubeState) Configuration() any { + // TODO: fix? + return nil +} + +func (ks *KubeState) Init() error { client, err := ks.initClient() if err != nil { ks.Errorf("client initialization: %v", err) - return false + return err } ks.client = client @@ -79,23 +85,25 @@ func (ks *KubeState) Init() bool { ks.discoverer = ks.initDiscoverer(ks.client) - return true + return nil } -func (ks *KubeState) Check() bool { +func (ks *KubeState) Check() error { if ks.client == nil || ks.discoverer == nil { ks.Error("not initialized job") - return false + return errors.New("not initialized") } ver, err := ks.client.Discovery().ServerVersion() if err != nil { - ks.Errorf("failed to connect to the Kubernetes API server: %v", err) - return false + err := fmt.Errorf("failed to connect to K8s API server: %v", err) + ks.Error(err) + return err } ks.Infof("successfully connected to the Kubernetes API server '%s'", ver) - return true + + return nil } func (ks *KubeState) Charts() *module.Charts { @@ -123,7 +131,7 @@ func (ks *KubeState) Cleanup() { c := make(chan struct{}) go func() { defer close(c); ks.wg.Wait() }() - t := time.NewTimer(time.Second * 3) + t := time.NewTimer(time.Second * 5) defer t.Stop() select { diff --git a/modules/k8s_state/kube_state_test.go b/modules/k8s_state/kube_state_test.go index 451028532..7bb4aa426 100644 --- a/modules/k8s_state/kube_state_test.go +++ b/modules/k8s_state/kube_state_test.go @@ -55,9 +55,9 @@ func TestKubeState_Init(t *testing.T) { ks := test.prepare() if test.wantFail { - assert.False(t, ks.Init()) + assert.Error(t, ks.Init()) } else { - assert.True(t, ks.Init()) + assert.NoError(t, ks.Init()) } }) } @@ -90,12 +90,12 @@ func TestKubeState_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { ks := test.prepare() - require.True(t, ks.Init()) + require.NoError(t, ks.Init()) if test.wantFail { - assert.False(t, ks.Check()) + assert.Error(t, ks.Check()) } else { - assert.True(t, ks.Check()) + assert.NoError(t, ks.Check()) } }) } @@ -663,8 +663,8 @@ func TestKubeState_Collect(t *testing.T) { ks := New() ks.newKubeClient = func() (kubernetes.Interface, error) { return test.client, nil } - require.True(t, ks.Init()) - require.True(t, ks.Check()) + require.NoError(t, ks.Init()) + require.NoError(t, ks.Check()) defer ks.Cleanup() for i, executeStep := range test.steps { diff --git a/modules/lighttpd/config_schema.json b/modules/lighttpd/config_schema.json index c1b51d065..cf32aa32f 100644 --- a/modules/lighttpd/config_schema.json +++ b/modules/lighttpd/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/lighttpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/lighttpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/lighttpd/init.go b/modules/lighttpd/init.go new file mode 100644 index 000000000..f9f4baf37 --- /dev/null +++ b/modules/lighttpd/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (l *Lighttpd) validateConfig() error { + if l.URL == "" { + return errors.New("url not set") + } + if !strings.HasSuffix(l.URL, "?auto") { + return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL) + } + return nil +} + +func (l *Lighttpd) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(l.Client) + if err != nil { + return nil, err + } + return newAPIClient(client, l.Request), nil +} diff --git a/modules/lighttpd/lighttpd.go b/modules/lighttpd/lighttpd.go index 2f98a96bf..c1de7be11 100644 --- a/modules/lighttpd/lighttpd.go +++ b/modules/lighttpd/lighttpd.go @@ -4,7 +4,7 @@ package lighttpd import ( _ "embed" - "strings" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -22,24 +22,18 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/server-status?auto" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Lighttpd with default values. func New() *Lighttpd { - config := Config{ + return &Lighttpd{Config: Config{ HTTP: web.HTTP{ Request: web.Request{ - URL: defaultURL, + URL: "http://127.0.0.1/server-status?auto", }, Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + Timeout: web.Duration(time.Second * 2), }, }, - } - return &Lighttpd{Config: config} + }} } // Config is the Lighttpd module configuration. @@ -53,39 +47,47 @@ type Lighttpd struct { apiClient *apiClient } -// Cleanup makes cleanup. -func (Lighttpd) Cleanup() {} +func (l *Lighttpd) Configuration() any { + return l.Config +} // Init makes initialization. -func (l *Lighttpd) Init() bool { - if l.URL == "" { - l.Error("URL not set") - return false +func (l *Lighttpd) Init() error { + if err := l.validateConfig(); err != nil { + l.Errorf("config validation: %v", err) + return err } - if !strings.HasSuffix(l.URL, "?auto") { - l.Errorf("bad URL '%s', should ends in '?auto'", l.URL) - return false - } - - client, err := web.NewHTTPClient(l.Client) + client, err := l.initApiClient() if err != nil { - l.Errorf("error on creating http client : %v", err) - return false + l.Error(err) + return err } - l.apiClient = newAPIClient(client, l.Request) + l.apiClient = client l.Debugf("using URL %s", l.URL) - l.Debugf("using timeout: %s", l.Timeout.Duration) + l.Debugf("using timeout: %s", l.Timeout.Duration()) - return true + return nil } // Check makes check -func (l *Lighttpd) Check() bool { return len(l.Collect()) > 0 } +func (l *Lighttpd) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil +} // Charts returns Charts. -func (l Lighttpd) Charts() *Charts { return charts.Copy() } +func (l Lighttpd) Charts() *Charts { + return charts.Copy() +} // Collect collects metrics. func (l *Lighttpd) Collect() map[string]int64 { @@ -98,3 +100,10 @@ func (l *Lighttpd) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (l *Lighttpd) Cleanup() { + if l.apiClient != nil && l.apiClient.httpClient != nil { + l.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/lighttpd/lighttpd_test.go b/modules/lighttpd/lighttpd_test.go index e6a7b016e..781e2db97 100644 --- a/modules/lighttpd/lighttpd_test.go +++ b/modules/lighttpd/lighttpd_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,18 +19,10 @@ var ( func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() } -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestLighttpd_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -39,7 +30,7 @@ func TestLighttpd_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestLighttpd_Check(t *testing.T) { @@ -52,16 +43,16 @@ func TestLighttpd_Check(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestLighttpd_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestLighttpd_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } @@ -76,8 +67,8 @@ func TestLighttpd_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "scoreboard_waiting": 125, @@ -113,8 +104,8 @@ func TestLighttpd_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestLighttpd_ApacheData(t *testing.T) { @@ -127,8 +118,8 @@ func TestLighttpd_ApacheData(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) } func TestLighttpd_404(t *testing.T) { @@ -141,6 +132,6 @@ func TestLighttpd_404(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/logind/config_schema.json b/modules/logind/config_schema.json index b7ad53e9a..3dfe57e36 100644 --- a/modules/logind/config_schema.json +++ b/modules/logind/config_schema.json @@ -1,19 +1,22 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/logind job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/logind job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": {} } diff --git a/modules/logind/logind.go b/modules/logind/logind.go index 456217e9f..7cbb33971 100644 --- a/modules/logind/logind.go +++ b/modules/logind/logind.go @@ -7,6 +7,7 @@ package logind import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,10 +30,10 @@ func init() { func New() *Logind { return &Logind{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, newLogindConn: func(cfg Config) (logindConnection, error) { - return newLogindConnection(cfg.Timeout.Duration) + return newLogindConnection(cfg.Timeout.Duration()) }, charts: charts.Copy(), } @@ -51,12 +52,24 @@ type Logind struct { charts *module.Charts } -func (l *Logind) Init() bool { - return true +func (l *Logind) Configuration() any { + return l.Config } -func (l *Logind) Check() bool { - return len(l.Collect()) > 0 +func (l *Logind) Init() error { + return nil +} + +func (l *Logind) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (l *Logind) Charts() *module.Charts { diff --git a/modules/logind/logind_test.go b/modules/logind/logind_test.go index 07b00c168..7aa35672a 100644 --- a/modules/logind/logind_test.go +++ b/modules/logind/logind_test.go @@ -32,9 +32,9 @@ func TestLogind_Init(t *testing.T) { l.Config = test.config if test.wantFail { - assert.False(t, l.Init()) + assert.Error(t, l.Init()) } else { - assert.True(t, l.Init()) + assert.NoError(t, l.Init()) } }) } @@ -55,15 +55,15 @@ func TestLogind_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(l *Logind) { l.Init() }, + prepare: func(l *Logind) { _ = l.Init() }, }, "after Check": { wantClose: true, - prepare: func(l *Logind) { l.Init(); l.Check() }, + prepare: func(l *Logind) { _ = l.Init(); _ = l.Check() }, }, "after Collect": { wantClose: true, - prepare: func(l *Logind) { l.Init(); l.Collect() }, + prepare: func(l *Logind) { _ = l.Init(); l.Collect() }, }, } @@ -119,13 +119,13 @@ func TestLogind_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { l := New() - require.True(t, l.Init()) + require.NoError(t, l.Init()) l.conn = test.prepare() if test.wantFail { - assert.False(t, l.Check()) + assert.Error(t, l.Check()) } else { - assert.True(t, l.Check()) + assert.NoError(t, l.Check()) } }) } @@ -193,7 +193,7 @@ func TestLogind_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { l := New() - require.True(t, l.Init()) + require.NoError(t, l.Init()) l.conn = test.prepare() mx := l.Collect() diff --git a/modules/logstash/config_schema.json b/modules/logstash/config_schema.json index 9e4d59642..59003646d 100644 --- a/modules/logstash/config_schema.json +++ b/modules/logstash/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/logstash job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/logstash job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/logstash/logstash.go b/modules/logstash/logstash.go index 728267294..cd77de3dc 100644 --- a/modules/logstash/logstash.go +++ b/modules/logstash/logstash.go @@ -4,6 +4,7 @@ package logstash import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *Logstash { URL: "http://localhost:9600", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -50,26 +51,39 @@ type Logstash struct { pipelines map[string]bool } -func (l *Logstash) Init() bool { +func (l *Logstash) Configuration() any { + return l.Config +} + +func (l *Logstash) Init() error { if l.URL == "" { l.Error("config validation: 'url' cannot be empty") - return false + return errors.New("url not set") } httpClient, err := web.NewHTTPClient(l.Client) if err != nil { l.Errorf("init HTTP client: %v", err) - return false + return err } l.httpClient = httpClient l.Debugf("using URL %s", l.URL) - l.Debugf("using timeout: %s", l.Timeout.Duration) - return true + l.Debugf("using timeout: %s", l.Timeout.Duration()) + + return nil } -func (l *Logstash) Check() bool { - return len(l.Collect()) > 0 +func (l *Logstash) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (l *Logstash) Charts() *module.Charts { diff --git a/modules/logstash/logstash_test.go b/modules/logstash/logstash_test.go index 2b5fd32d5..81e86c414 100644 --- a/modules/logstash/logstash_test.go +++ b/modules/logstash/logstash_test.go @@ -52,9 +52,9 @@ func TestLogstash_Init(t *testing.T) { ls.Config = test.config if test.wantFail { - assert.False(t, ls.Init()) + assert.Error(t, ls.Init()) } else { - assert.True(t, ls.Init()) + assert.NoError(t, ls.Init()) } }) } @@ -97,9 +97,9 @@ func TestLogstash_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, ls.Check()) + assert.Error(t, ls.Check()) } else { - assert.True(t, ls.Check()) + assert.NoError(t, ls.Check()) } }) } @@ -202,7 +202,7 @@ func caseValidResponse(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } @@ -215,7 +215,7 @@ func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } @@ -224,7 +224,7 @@ func caseConnectionRefused(t *testing.T) (*Logstash, func()) { t.Helper() ls := New() ls.URL = "http://127.0.0.1:65001" - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, func() {} } @@ -237,7 +237,7 @@ func case404(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } diff --git a/modules/mongodb/config_schema.json b/modules/mongodb/config_schema.json index 48afef584..50fa53c9a 100644 --- a/modules/mongodb/config_schema.json +++ b/modules/mongodb/config_schema.json @@ -1,23 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/mongodb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mongodb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uri": { + "type": "string" + }, + "timeout": { + "type": "number" + }, + "databases": { + "type": "string" + } }, - "uri": { - "type": "string" - }, - "timeout": { - "type": "number" - }, - "databases": { - "type": "string" - } + "required": [ + "name", + "uri" + ] }, - "required": [ - "name", - "uri" - ] + "uiSchema": {} } diff --git a/modules/mongodb/mongodb.go b/modules/mongodb/mongodb.go index 522acbaa0..4b54dc7c5 100644 --- a/modules/mongodb/mongodb.go +++ b/modules/mongodb/mongodb.go @@ -4,6 +4,7 @@ package mongo import ( _ "embed" + "errors" "sync" "time" @@ -68,22 +69,34 @@ type Mongo struct { shards map[string]bool } -func (m *Mongo) Init() bool { +func (m *Mongo) Configuration() any { + return m.Config +} + +func (m *Mongo) Init() error { if err := m.verifyConfig(); err != nil { m.Errorf("config validation: %v", err) - return false + return err } if err := m.initDatabaseSelector(); err != nil { m.Errorf("init database selector: %v", err) - return false + return err } - return true + return nil } -func (m *Mongo) Check() bool { - return len(m.Collect()) > 0 +func (m *Mongo) Check() error { + mx, err := m.collect() + if err != nil { + m.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (m *Mongo) Charts() *module.Charts { diff --git a/modules/mongodb/mongodb_test.go b/modules/mongodb/mongodb_test.go index 37da851ed..766ab8ee5 100644 --- a/modules/mongodb/mongodb_test.go +++ b/modules/mongodb/mongodb_test.go @@ -65,9 +65,9 @@ func TestMongo_Init(t *testing.T) { mongo.Config = test.config if test.wantFail { - assert.False(t, mongo.Init()) + assert.Error(t, mongo.Init()) } else { - assert.True(t, mongo.Init()) + assert.NoError(t, mongo.Init()) } }) } @@ -139,12 +139,12 @@ func TestMongo_Check(t *testing.T) { defer mongo.Cleanup() mongo.conn = test.prepare() - require.True(t, mongo.Init()) + require.NoError(t, mongo.Init()) if test.wantFail { - assert.False(t, mongo.Check()) + assert.Error(t, mongo.Check()) } else { - assert.True(t, mongo.Check()) + assert.NoError(t, mongo.Check()) } }) } @@ -590,7 +590,7 @@ func TestMongo_Collect(t *testing.T) { defer mongo.Cleanup() mongo.conn = test.prepare() - require.True(t, mongo.Init()) + require.NoError(t, mongo.Init()) mx := mongo.Collect() diff --git a/modules/mysql/collect.go b/modules/mysql/collect.go index 3ff0882ad..796ca22ff 100644 --- a/modules/mysql/collect.go +++ b/modules/mysql/collect.go @@ -97,7 +97,7 @@ func (m *MySQL) openConnection() error { db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { @@ -145,7 +145,7 @@ func hasTableOpenCacheOverflowsMetrics(collected map[string]int64) bool { } func (m *MySQL) collectQuery(query string, assign func(column, value string, lineEnd bool)) (duration int64, err error) { - ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration()) defer cancel() s := time.Now() diff --git a/modules/mysql/config_schema.json b/modules/mysql/config_schema.json index 1db919824..f1d4c94b8 100644 --- a/modules/mysql/config_schema.json +++ b/modules/mysql/config_schema.json @@ -1,29 +1,32 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/mysql job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "update_every": { + "type": "integer" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "my.cnf": { - "type": "string" - }, - "update_every": { - "type": "integer" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": {} } diff --git a/modules/mysql/mysql.go b/modules/mysql/mysql.go index c7016098f..90b583491 100644 --- a/modules/mysql/mysql.go +++ b/modules/mysql/mysql.go @@ -5,6 +5,7 @@ package mysql import ( "database/sql" _ "embed" + "errors" "strings" "sync" "time" @@ -31,7 +32,7 @@ func New() *MySQL { return &MySQL{ Config: Config{ DSN: "root@tcp(localhost:3306)/", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, charts: baseCharts.Copy(), @@ -92,36 +93,49 @@ type MySQL struct { varPerformanceSchema string } -func (m *MySQL) Init() bool { +func (m *MySQL) Configuration() any { + return m.Config +} + +func (m *MySQL) Init() error { if m.MyCNF != "" { dsn, err := dsnFromFile(m.MyCNF) if err != nil { m.Error(err) - return false + return err } m.DSN = dsn } if m.DSN == "" { - m.Error("DSN not set") - return false + m.Error("dsn not set") + return errors.New("dsn not set") } cfg, err := mysql.ParseDSN(m.DSN) if err != nil { m.Errorf("error on parsing DSN: %v", err) - return false + return err } cfg.Passwd = strings.Repeat("*", len(cfg.Passwd)) m.safeDSN = cfg.FormatDSN() m.Debugf("using DSN [%s]", m.DSN) - return true + + return nil } -func (m *MySQL) Check() bool { - return len(m.Collect()) > 0 +func (m *MySQL) Check() error { + mx, err := m.collect() + if err != nil { + m.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (m *MySQL) Charts() *module.Charts { diff --git a/modules/mysql/mysql_test.go b/modules/mysql/mysql_test.go index 283b13770..5f633b3a3 100644 --- a/modules/mysql/mysql_test.go +++ b/modules/mysql/mysql_test.go @@ -113,9 +113,9 @@ func TestMySQL_Init(t *testing.T) { mySQL.Config = test.config if test.wantFail { - assert.False(t, mySQL.Init()) + assert.Error(t, mySQL.Init()) } else { - assert.True(t, mySQL.Init()) + assert.NoError(t, mySQL.Init()) } }) } @@ -235,14 +235,14 @@ func TestMySQL_Check(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, my.Check()) + assert.Error(t, my.Check()) } else { - assert.True(t, my.Check()) + assert.NoError(t, my.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -1607,7 +1607,7 @@ func TestMySQL_Collect(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/nginx/config_schema.json b/modules/nginx/config_schema.json index 58a6865da..f79fd8e51 100644 --- a/modules/nginx/config_schema.json +++ b/modules/nginx/config_schema.json @@ -1,59 +1,85 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginx job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginx job configuration schema.", + "type": "object", + "properties": { + "url": { + "default": "http://127.0.0.1/stub_status", + "title": "URL", + "description": "The URL of the NGINX status page to monitor.", + "type": "string" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "minimum": 1, + "default": 1, + "type": "integer" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string" + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", "type": "string" + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy Username", + "description": "The username for proxy authentication (if required).", + "type": "string" + }, + "proxy_password": { + "title": "Proxy Password", + "description": "The password for proxy authentication (if required).", + "type": "string" + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "title": "Not Follow Redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS Certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS Key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + }, + "tls_skip_verify": { + "title": "Skip TLS Verify", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": { + } } diff --git a/modules/nginx/nginx.go b/modules/nginx/nginx.go index 9acf1e72b..98a2fb62d 100644 --- a/modules/nginx/nginx.go +++ b/modules/nginx/nginx.go @@ -4,6 +4,7 @@ package nginx import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,33 +22,24 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/stub_status" - defaultHTTPTimeout = time.Second -) - -// New creates Nginx with default values. func New() *Nginx { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Nginx{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/stub_status", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 1), + }, }, - }, - } - - return &Nginx{Config: config} + }} } -// Config is the Nginx module configuration. type Config struct { - web.HTTP `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` } -// Nginx nginx module. type Nginx struct { module.Base Config `yaml:",inline"` @@ -55,40 +47,49 @@ type Nginx struct { apiClient *apiClient } -// Cleanup makes cleanup. -func (Nginx) Cleanup() {} +func (n *Nginx) Configuration() any { + return n.Config +} -// Init makes initialization. -func (n *Nginx) Init() bool { +func (n *Nginx) Init() error { if n.URL == "" { n.Error("URL not set") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(n.Client) if err != nil { n.Error(err) - return false + return err } n.apiClient = newAPIClient(client, n.Request) n.Debugf("using URL %s", n.URL) - n.Debugf("using timeout: %s", n.Timeout.Duration) + n.Debugf("using timeout: %s", n.Timeout) - return true + return nil } -// Check makes check. -func (n *Nginx) Check() bool { return len(n.Collect()) > 0 } +func (n *Nginx) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil +} -// Charts creates Charts. -func (Nginx) Charts() *Charts { return charts.Copy() } +func (n *Nginx) Charts() *Charts { + return charts.Copy() +} -// Collect collects metrics. func (n *Nginx) Collect() map[string]int64 { mx, err := n.collect() - if err != nil { n.Error(err) return nil @@ -96,3 +97,9 @@ func (n *Nginx) Collect() map[string]int64 { return mx } + +func (n *Nginx) Cleanup() { + if n.apiClient != nil && n.apiClient.httpClient != nil { + n.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/nginx/nginx_test.go b/modules/nginx/nginx_test.go index ef115482e..b01884c9e 100644 --- a/modules/nginx/nginx_test.go +++ b/modules/nginx/nginx_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -18,20 +17,14 @@ var ( testTengineStatusData, _ = os.ReadFile("testdata/tengine-status.txt") ) -func TestNginx_Cleanup(t *testing.T) { New().Cleanup() } - -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestNginx_Cleanup(t *testing.T) { + New().Cleanup() } func TestNginx_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -45,19 +38,21 @@ func TestNginx_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestNginx_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } -func TestNginx_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } +func TestNginx_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} func TestNginx_Collect(t *testing.T) { ts := httptest.NewServer( @@ -69,8 +64,8 @@ func TestNginx_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "accepts": 36, @@ -95,8 +90,8 @@ func TestNginx_CollectTengine(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "accepts": 1140, @@ -122,8 +117,8 @@ func TestNginx_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestNginx_404(t *testing.T) { @@ -136,6 +131,6 @@ func TestNginx_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/nginxplus/config_schema.json b/modules/nginxplus/config_schema.json index c1457d2d7..173744c14 100644 --- a/modules/nginxplus/config_schema.json +++ b/modules/nginxplus/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginxplus job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginxplus job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/nginxplus/nginxplus.go b/modules/nginxplus/nginxplus.go index ba82242f8..f56106fee 100644 --- a/modules/nginxplus/nginxplus.go +++ b/modules/nginxplus/nginxplus.go @@ -4,6 +4,7 @@ package nginxplus import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *NginxPlus { URL: "http://127.0.0.1", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 1}, + Timeout: web.Duration(time.Second * 1), }, }, }, @@ -72,24 +73,36 @@ type NginxPlus struct { cache *cache } -func (n *NginxPlus) Init() bool { +func (n *NginxPlus) Configuration() any { + return n.Config +} + +func (n *NginxPlus) Init() error { if n.URL == "" { n.Error("config validation: 'url' can not be empty'") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(n.Client) if err != nil { n.Errorf("init HTTP client: %v", err) - return false + return err } n.httpClient = client - return true + return nil } -func (n *NginxPlus) Check() bool { - return len(n.Collect()) > 0 +func (n *NginxPlus) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NginxPlus) Charts() *module.Charts { diff --git a/modules/nginxplus/nginxplus_test.go b/modules/nginxplus/nginxplus_test.go index 7bbe89557..fbc4d671e 100644 --- a/modules/nginxplus/nginxplus_test.go +++ b/modules/nginxplus/nginxplus_test.go @@ -80,9 +80,9 @@ func TestNginxPlus_Init(t *testing.T) { nginx.Config = test.config if test.wantFail { - assert.False(t, nginx.Init()) + assert.Error(t, nginx.Init()) } else { - assert.True(t, nginx.Init()) + assert.NoError(t, nginx.Init()) } }) } @@ -117,9 +117,9 @@ func TestNginxPlus_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, nginx.Check()) + assert.Error(t, nginx.Check()) } else { - assert.True(t, nginx.Check()) + assert.NoError(t, nginx.Check()) } }) } @@ -500,7 +500,7 @@ func caseAPI8AllRequestsOK(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -542,7 +542,7 @@ func caseAPI8AllRequestsExceptStreamOK(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -555,7 +555,7 @@ func caseInvalidDataResponse(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -564,7 +564,7 @@ func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) { t.Helper() nginx := New() nginx.URL = "http://127.0.0.1:65001" - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, func() {} } diff --git a/modules/nginxvts/config_schema.json b/modules/nginxvts/config_schema.json index a4b44429f..46010be3d 100644 --- a/modules/nginxvts/config_schema.json +++ b/modules/nginxvts/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginxvts job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginxvts job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/nginxvts/init.go b/modules/nginxvts/init.go index 7ebf049ab..59896a8ef 100644 --- a/modules/nginxvts/init.go +++ b/modules/nginxvts/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (vts NginxVTS) validateConfig() error { +func (vts *NginxVTS) validateConfig() error { if vts.URL == "" { return errors.New("URL not set") } @@ -21,11 +21,11 @@ func (vts NginxVTS) validateConfig() error { return nil } -func (vts NginxVTS) initHTTPClient() (*http.Client, error) { +func (vts *NginxVTS) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(vts.Client) } -func (vts NginxVTS) initCharts() (*module.Charts, error) { +func (vts *NginxVTS) initCharts() (*module.Charts, error) { charts := module.Charts{} if err := charts.Add(*mainCharts.Copy()...); err != nil { diff --git a/modules/nginxvts/nginxvts.go b/modules/nginxvts/nginxvts.go index 1cc3a6014..e0736ba13 100644 --- a/modules/nginxvts/nginxvts.go +++ b/modules/nginxvts/nginxvts.go @@ -4,6 +4,7 @@ package nginxvts import ( _ "embed" + "errors" "net/http" "time" @@ -32,7 +33,7 @@ func New() *NginxVTS { URL: "http://localhost/status/format/json", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -51,6 +52,10 @@ type NginxVTS struct { charts *module.Charts } +func (vts *NginxVTS) Configuration() any { + return vts.Config +} + func (vts *NginxVTS) Cleanup() { if vts.httpClient == nil { return @@ -58,11 +63,11 @@ func (vts *NginxVTS) Cleanup() { vts.httpClient.CloseIdleConnections() } -func (vts *NginxVTS) Init() bool { +func (vts *NginxVTS) Init() error { err := vts.validateConfig() if err != nil { vts.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := vts.initHTTPClient() @@ -74,15 +79,23 @@ func (vts *NginxVTS) Init() bool { charts, err := vts.initCharts() if err != nil { vts.Errorf("init charts: %v", err) - return false + return err } vts.charts = charts - return true + return nil } -func (vts *NginxVTS) Check() bool { - return len(vts.Collect()) > 0 +func (vts *NginxVTS) Check() error { + mx, err := vts.collect() + if err != nil { + vts.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (vts *NginxVTS) Charts() *module.Charts { diff --git a/modules/nginxvts/nginxvts_test.go b/modules/nginxvts/nginxvts_test.go index ef204ad75..6333b1580 100644 --- a/modules/nginxvts/nginxvts_test.go +++ b/modules/nginxvts/nginxvts_test.go @@ -70,9 +70,9 @@ func TestNginxVTS_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) } }) @@ -96,9 +96,9 @@ func TestNginxVTS_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, vts.Check()) + assert.Error(t, vts.Check()) } else { - assert.True(t, vts.Check()) + assert.NoError(t, vts.Check()) } }) } @@ -197,7 +197,7 @@ func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxV srv := prepareNginxVTSEndpoint() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -214,7 +214,7 @@ func prepareNginxVTSInvalidData(t *testing.T) (*NginxVTS, func()) { })) vts := New() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -227,7 +227,7 @@ func prepareNginxVTS404(t *testing.T) (*NginxVTS, func()) { })) vts := New() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -236,7 +236,7 @@ func prepareNginxVTSConnectionRefused(t *testing.T) (*NginxVTS, func()) { t.Helper() vts := New() vts.URL = "http://127.0.0.1:18080" - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, func() {} } diff --git a/modules/ntpd/client.go b/modules/ntpd/client.go index 5164c80e8..8e111cd76 100644 --- a/modules/ntpd/client.go +++ b/modules/ntpd/client.go @@ -10,14 +10,14 @@ import ( ) func newNTPClient(c Config) (ntpConn, error) { - conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration()) if err != nil { return nil, err } client := &ntpClient{ conn: conn, - timeout: c.Timeout.Duration, + timeout: c.Timeout.Duration(), client: &control.NTPClient{Connection: conn}, } diff --git a/modules/ntpd/config_schema.json b/modules/ntpd/config_schema.json index ef360a7f9..5565305ab 100644 --- a/modules/ntpd/config_schema.json +++ b/modules/ntpd/config_schema.json @@ -1,26 +1,29 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/ntpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/ntpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_peers": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_peers": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/ntpd/ntpd.go b/modules/ntpd/ntpd.go index 8bbc0ba4f..83be1c090 100644 --- a/modules/ntpd/ntpd.go +++ b/modules/ntpd/ntpd.go @@ -4,6 +4,8 @@ package ntpd import ( _ "embed" + "errors" + "fmt" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -25,7 +27,7 @@ func New() *NTPd { return &NTPd{ Config: Config{ Address: "127.0.0.1:123", - Timeout: web.Duration{Duration: time.Second * 3}, + Timeout: web.Duration(time.Second * 3), CollectPeers: false, }, charts: systemCharts.Copy(), @@ -65,26 +67,38 @@ type ( } ) -func (n *NTPd) Init() bool { +func (n *NTPd) Configuration() any { + return n.Config +} + +func (n *NTPd) Init() error { if n.Address == "" { n.Error("config validation: 'address' can not be empty") - return false + return errors.New("address not set") } txt := "0.0.0.0 127.0.0.0/8" r, err := iprange.ParseRanges(txt) if err != nil { - n.Errorf("error on parse ip range '%s': %v", txt, err) - return false + n.Errorf("error on parsing ip range '%s': %v", txt, err) + return fmt.Errorf("error on parsing ip range '%s': %v", txt, err) } n.peerIPAddrFilter = r - return true + return nil } -func (n *NTPd) Check() bool { - return len(n.Collect()) > 0 +func (n *NTPd) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NTPd) Charts() *module.Charts { diff --git a/modules/ntpd/ntpd_test.go b/modules/ntpd/ntpd_test.go index 481d2d7e9..745e7341d 100644 --- a/modules/ntpd/ntpd_test.go +++ b/modules/ntpd/ntpd_test.go @@ -33,9 +33,9 @@ func TestNTPd_Init(t *testing.T) { n.Config = test.config if test.wantFail { - assert.False(t, n.Init()) + assert.Error(t, n.Init()) } else { - assert.True(t, n.Init()) + assert.NoError(t, n.Init()) } }) } @@ -56,15 +56,15 @@ func TestNTPd_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(n *NTPd) { n.Init() }, + prepare: func(n *NTPd) { _ = n.Init() }, }, "after Check": { wantClose: true, - prepare: func(n *NTPd) { n.Init(); n.Check() }, + prepare: func(n *NTPd) { _ = n.Init(); _ = n.Check() }, }, "after Collect": { wantClose: true, - prepare: func(n *NTPd) { n.Init(); n.Collect() }, + prepare: func(n *NTPd) { _ = n.Init(); n.Collect() }, }, } @@ -116,12 +116,12 @@ func TestNTPd_Check(t *testing.T) { t.Run(name, func(t *testing.T) { n := test.prepare() - require.True(t, n.Init()) + require.NoError(t, n.Init()) if test.wantFail { - assert.False(t, n.Check()) + assert.Error(t, n.Check()) } else { - assert.True(t, n.Check()) + assert.NoError(t, n.Check()) } }) } @@ -237,7 +237,7 @@ func TestNTPd_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { n := test.prepare() - require.True(t, n.Init()) + require.NoError(t, n.Init()) _ = n.Check() mx := n.Collect() diff --git a/modules/nvidia_smi/config_schema.json b/modules/nvidia_smi/config_schema.json index fc5b38e08..cf35054a0 100644 --- a/modules/nvidia_smi/config_schema.json +++ b/modules/nvidia_smi/config_schema.json @@ -1,25 +1,28 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nvidia_smi job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvidia_smi job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + }, + "use_csv_format": { + "type": "boolean" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "binary_path": { - "type": "string" - }, - "use_csv_format": { - "type": "boolean" - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": {} } diff --git a/modules/nvidia_smi/exec.go b/modules/nvidia_smi/exec.go index 93e23057b..c4f1e3f2c 100644 --- a/modules/nvidia_smi/exec.go +++ b/modules/nvidia_smi/exec.go @@ -16,7 +16,7 @@ import ( func newNvidiaSMIExec(path string, cfg Config, log *logger.Logger) (*nvidiaSMIExec, error) { return &nvidiaSMIExec{ binPath: path, - timeout: cfg.Timeout.Duration, + timeout: cfg.Timeout.Duration(), Logger: log, }, nil } diff --git a/modules/nvidia_smi/nvidia_smi.go b/modules/nvidia_smi/nvidia_smi.go index 1370b4335..4ad9cce56 100644 --- a/modules/nvidia_smi/nvidia_smi.go +++ b/modules/nvidia_smi/nvidia_smi.go @@ -4,6 +4,7 @@ package nvidia_smi import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func init() { func New() *NvidiaSMI { return &NvidiaSMI{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 10}, + Timeout: web.Duration(time.Second * 10), UseCSVFormat: true, }, binName: "nvidia-smi", @@ -66,21 +67,33 @@ type ( } ) -func (nv *NvidiaSMI) Init() bool { +func (nv *NvidiaSMI) Configuration() any { + return nv.Config +} + +func (nv *NvidiaSMI) Init() error { if nv.exec == nil { smi, err := nv.initNvidiaSMIExec() if err != nil { nv.Error(err) - return false + return err } nv.exec = smi } - return true + return nil } -func (nv *NvidiaSMI) Check() bool { - return len(nv.Collect()) > 0 +func (nv *NvidiaSMI) Check() error { + mx, err := nv.collect() + if err != nil { + nv.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (nv *NvidiaSMI) Charts() *module.Charts { diff --git a/modules/nvidia_smi/nvidia_smi_test.go b/modules/nvidia_smi/nvidia_smi_test.go index cdd7742fd..bb83642f6 100644 --- a/modules/nvidia_smi/nvidia_smi_test.go +++ b/modules/nvidia_smi/nvidia_smi_test.go @@ -60,9 +60,9 @@ func TestNvidiaSMI_Init(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Init()) + assert.Error(t, nv.Init()) } else { - assert.True(t, nv.Init()) + assert.NoError(t, nv.Init()) } }) } @@ -118,9 +118,9 @@ func TestNvidiaSMI_Check(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Check()) + assert.Error(t, nv.Check()) } else { - assert.True(t, nv.Check()) + assert.NoError(t, nv.Check()) } }) } diff --git a/modules/nvme/config_schema.json b/modules/nvme/config_schema.json index fcd2869d6..2b79acb5b 100644 --- a/modules/nvme/config_schema.json +++ b/modules/nvme/config_schema.json @@ -1,22 +1,25 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nvme job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvme job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "binary_path": { - "type": "string" - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": {} } diff --git a/modules/nvme/init.go b/modules/nvme/init.go index 70988031c..44ff90f4e 100644 --- a/modules/nvme/init.go +++ b/modules/nvme/init.go @@ -29,7 +29,7 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { n.Debug("using ndsudo") return &nvmeCLIExec{ ndsudoPath: ndsudoPath, - timeout: n.Timeout.Duration, + timeout: n.Timeout.Duration(), }, nil } } @@ -51,14 +51,14 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { } if sudoPath != "" { - ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration()) defer cancel1() if _, err := exec.CommandContext(ctx1, sudoPath, "-n", "-v").Output(); err != nil { return nil, fmt.Errorf("can not run sudo on this host: %v", err) } - ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration()) defer cancel2() if _, err := exec.CommandContext(ctx2, sudoPath, "-n", "-l", nvmePath).Output(); err != nil { @@ -69,6 +69,6 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { return &nvmeCLIExec{ sudoPath: sudoPath, nvmePath: nvmePath, - timeout: n.Timeout.Duration, + timeout: n.Timeout.Duration(), }, nil } diff --git a/modules/nvme/nvme.go b/modules/nvme/nvme.go index d8f86869a..7d1d59bc2 100644 --- a/modules/nvme/nvme.go +++ b/modules/nvme/nvme.go @@ -4,6 +4,7 @@ package nvme import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func New() *NVMe { return &NVMe{ Config: Config{ BinaryPath: "nvme", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: &module.Charts{}, devicePaths: make(map[string]bool), @@ -61,24 +62,36 @@ type ( } ) -func (n *NVMe) Init() bool { +func (n *NVMe) Configuration() any { + return n.Config +} + +func (n *NVMe) Init() error { if err := n.validateConfig(); err != nil { n.Errorf("config validation: %v", err) - return false + return err } v, err := n.initNVMeCLIExec() if err != nil { n.Errorf("init nvme-cli exec: %v", err) - return false + return err } n.exec = v - return true + return nil } -func (n *NVMe) Check() bool { - return len(n.Collect()) > 0 +func (n *NVMe) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NVMe) Charts() *module.Charts { diff --git a/modules/nvme/nvme_test.go b/modules/nvme/nvme_test.go index 26c55182b..7483f9d1a 100644 --- a/modules/nvme/nvme_test.go +++ b/modules/nvme/nvme_test.go @@ -58,9 +58,9 @@ func TestNVMe_Init(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Init()) + assert.Error(t, nv.Init()) } else { - assert.True(t, nv.Init()) + assert.NoError(t, nv.Init()) } }) } @@ -104,9 +104,9 @@ func TestNVMe_Check(t *testing.T) { test.prepare(n) if test.wantFail { - assert.False(t, n.Check()) + assert.Error(t, n.Check()) } else { - assert.True(t, n.Check()) + assert.NoError(t, n.Check()) } }) } diff --git a/modules/openvpn/config_schema.json b/modules/openvpn/config_schema.json index db6442db9..230b5e3f8 100644 --- a/modules/openvpn/config_schema.json +++ b/modules/openvpn/config_schema.json @@ -1,52 +1,55 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/openvpn job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "connect_timeout": { - "type": [ - "string", - "integer" - ] - }, - "read_timeout": { - "type": [ - "string", - "integer" - ] - }, - "write_timeout": { - "type": [ - "string", - "integer" - ] - }, - "per_user_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "connect_timeout": { + "type": [ + "string", + "integer" + ] + }, + "read_timeout": { + "type": [ + "string", + "integer" + ] + }, + "write_timeout": { + "type": [ + "string", + "integer" + ] + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } } - } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/openvpn/init.go b/modules/openvpn/init.go new file mode 100644 index 000000000..843981e48 --- /dev/null +++ b/modules/openvpn/init.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import ( + "github.com/netdata/go.d.plugin/modules/openvpn/client" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/socket" +) + +func (o *OpenVPN) validateConfig() error { + return nil +} + +func (o *OpenVPN) initPerUserMatcher() (matcher.Matcher, error) { + if o.PerUserStats.Empty() { + return nil, nil + } + return o.PerUserStats.Parse() +} + +func (o *OpenVPN) initClient() *client.Client { + config := socket.Config{ + Address: o.Address, + ConnectTimeout: o.ConnectTimeout.Duration(), + ReadTimeout: o.ReadTimeout.Duration(), + WriteTimeout: o.WriteTimeout.Duration(), + } + return &client.Client{Client: socket.New(config)} +} diff --git a/modules/openvpn/openvpn.go b/modules/openvpn/openvpn.go index 0a6ccbb81..6c161a3bd 100644 --- a/modules/openvpn/openvpn.go +++ b/modules/openvpn/openvpn.go @@ -6,19 +6,11 @@ import ( _ "embed" "time" + "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/modules/openvpn/client" "github.com/netdata/go.d.plugin/pkg/matcher" "github.com/netdata/go.d.plugin/pkg/socket" "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -const ( - defaultAddress = "127.0.0.1:7505" - defaultConnectTimeout = time.Second * 2 - defaultReadTimeout = time.Second * 2 - defaultWriteTimeout = time.Second * 2 ) //go:embed "config_schema.json" @@ -37,10 +29,10 @@ func init() { // New creates OpenVPN with default values. func New() *OpenVPN { config := Config{ - Address: defaultAddress, - ConnectTimeout: web.Duration{Duration: defaultConnectTimeout}, - ReadTimeout: web.Duration{Duration: defaultReadTimeout}, - WriteTimeout: web.Duration{Duration: defaultWriteTimeout}, + Address: "127.0.0.1:7505", + ConnectTimeout: web.Duration(time.Second * 2), + ReadTimeout: web.Duration(time.Second * 2), + WriteTimeout: web.Duration(time.Second * 2), } return &OpenVPN{ Config: config, @@ -58,61 +50,55 @@ type Config struct { PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"` } -type openVPNClient interface { - socket.Client - Version() (*client.Version, error) - LoadStats() (*client.LoadStats, error) - Users() (client.Users, error) -} - // OpenVPN OpenVPN module. -type OpenVPN struct { - module.Base - Config `yaml:",inline"` - client openVPNClient - charts *Charts - collectedUsers map[string]bool - perUserMatcher matcher.Matcher -} - -// Cleanup makes cleanup. -func (o *OpenVPN) Cleanup() { - if o.client == nil { - return +type ( + OpenVPN struct { + module.Base + Config `yaml:",inline"` + client openVPNClient + charts *Charts + collectedUsers map[string]bool + perUserMatcher matcher.Matcher } - _ = o.client.Disconnect() + openVPNClient interface { + socket.Client + Version() (*client.Version, error) + LoadStats() (*client.LoadStats, error) + Users() (client.Users, error) + } +) + +func (o *OpenVPN) Configuration() any { + return o.Config } // Init makes initialization. -func (o *OpenVPN) Init() bool { - if !o.PerUserStats.Empty() { - m, err := o.PerUserStats.Parse() - if err != nil { - o.Errorf("error on creating per user stats matcher : %v", err) - return false - } - o.perUserMatcher = matcher.WithCache(m) +func (o *OpenVPN) Init() error { + if err := o.validateConfig(); err != nil { + o.Error(err) + return err } - config := socket.Config{ - Address: o.Address, - ConnectTimeout: o.ConnectTimeout.Duration, - ReadTimeout: o.ReadTimeout.Duration, - WriteTimeout: o.WriteTimeout.Duration, + m, err := o.initPerUserMatcher() + if err != nil { + o.Error(err) + return err } - o.client = &client.Client{Client: socket.New(config)} + o.perUserMatcher = m + + o.client = o.initClient() o.Infof("using address: %s, connect timeout: %s, read timeout: %s, write timeout: %s", - o.Address, o.ConnectTimeout.Duration, o.ReadTimeout.Duration, o.WriteTimeout.Duration) + o.Address, o.ConnectTimeout, o.ReadTimeout, o.WriteTimeout) - return true + return nil } // Check makes check. -func (o *OpenVPN) Check() bool { +func (o *OpenVPN) Check() error { if err := o.client.Connect(); err != nil { o.Error(err) - return false + return err } defer func() { _ = o.client.Disconnect() }() @@ -120,11 +106,12 @@ func (o *OpenVPN) Check() bool { if err != nil { o.Error(err) o.Cleanup() - return false + return err } o.Infof("connected to OpenVPN v%d.%d.%d, Management v%d", ver.Major, ver.Minor, ver.Patch, ver.Management) - return true + + return nil } // Charts creates Charts. @@ -142,3 +129,11 @@ func (o *OpenVPN) Collect() map[string]int64 { } return mx } + +// Cleanup makes cleanup. +func (o *OpenVPN) Cleanup() { + if o.client == nil { + return + } + _ = o.client.Disconnect() +} diff --git a/modules/openvpn/openvpn_test.go b/modules/openvpn/openvpn_test.go index 02fa1a602..8981f21aa 100644 --- a/modules/openvpn/openvpn_test.go +++ b/modules/openvpn/openvpn_test.go @@ -5,7 +5,6 @@ package openvpn import ( "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/modules/openvpn/client" "github.com/netdata/go.d.plugin/pkg/matcher" "github.com/netdata/go.d.plugin/pkg/socket" @@ -36,28 +35,16 @@ var ( }} ) -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultAddress, job.Address) - assert.Equal(t, defaultConnectTimeout, job.ConnectTimeout.Duration) - assert.Equal(t, defaultReadTimeout, job.ReadTimeout.Duration) - assert.Equal(t, defaultWriteTimeout, job.WriteTimeout.Duration) - assert.NotNil(t, job.charts) - assert.NotNil(t, job.collectedUsers) -} - func TestOpenVPN_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestOpenVPN_Check(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) } func TestOpenVPN_Charts(t *testing.T) { @@ -68,19 +55,19 @@ func TestOpenVPN_Cleanup(t *testing.T) { job := New() assert.NotPanics(t, job.Cleanup) - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) job.Cleanup() } func TestOpenVPN_Collect(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.perUserMatcher = matcher.TRUE() job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 1, @@ -99,12 +86,12 @@ func TestOpenVPN_Collect(t *testing.T) { func TestOpenVPN_Collect_UNDEFUsername(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.perUserMatcher = matcher.TRUE() cl := prepareMockOpenVPNClient() cl.users = testUsersUNDEF job.client = cl - require.True(t, job.Check()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 1, diff --git a/modules/openvpn_status_log/config_schema.json b/modules/openvpn_status_log/config_schema.json index 904da56c0..23cdc7858 100644 --- a/modules/openvpn_status_log/config_schema.json +++ b/modules/openvpn_status_log/config_schema.json @@ -1,34 +1,37 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/openvpn_status_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "log_path": { - "type": "string" - }, - "per_user_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn_status_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "log_path": { + "type": "string" + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } } - } + }, + "required": [ + "name", + "log_path" + ] }, - "required": [ - "name", - "log_path" - ] + "uiSchema": {} } diff --git a/modules/openvpn_status_log/init.go b/modules/openvpn_status_log/init.go index 9bd34a510..5e1521e5e 100644 --- a/modules/openvpn_status_log/init.go +++ b/modules/openvpn_status_log/init.go @@ -7,14 +7,14 @@ import ( "github.com/netdata/go.d.plugin/pkg/matcher" ) -func (o OpenVPNStatusLog) validateConfig() error { +func (o *OpenVPNStatusLog) validateConfig() error { if o.LogPath == "" { return errors.New("empty 'log_path'") } return nil } -func (o OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) { +func (o *OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) { if o.PerUserStats.Empty() { return nil, nil } diff --git a/modules/openvpn_status_log/openvpn.go b/modules/openvpn_status_log/openvpn.go index dc9e7340b..b44969918 100644 --- a/modules/openvpn_status_log/openvpn.go +++ b/modules/openvpn_status_log/openvpn.go @@ -4,6 +4,7 @@ package openvpn_status_log import ( _ "embed" + "errors" "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -20,11 +21,10 @@ func init() { } func New() *OpenVPNStatusLog { - config := Config{ - LogPath: "/var/log/openvpn/status.log", - } return &OpenVPNStatusLog{ - Config: config, + Config: Config{ + LogPath: "/var/log/openvpn/status.log", + }, charts: charts.Copy(), collectedUsers: make(map[string]bool), } @@ -42,34 +42,46 @@ type OpenVPNStatusLog struct { charts *module.Charts - collectedUsers map[string]bool perUserMatcher matcher.Matcher + + collectedUsers map[string]bool } -func (o *OpenVPNStatusLog) Init() bool { +func (o *OpenVPNStatusLog) Configuration() any { + return o.Config +} + +func (o *OpenVPNStatusLog) Init() error { if err := o.validateConfig(); err != nil { o.Errorf("error on validating config: %v", err) - return false + return err } m, err := o.initPerUserStatsMatcher() if err != nil { o.Errorf("error on creating 'per_user_stats' matcher: %v", err) - return false + return err } - if m != nil { o.perUserMatcher = m } - return true + return nil } -func (o *OpenVPNStatusLog) Check() bool { - return len(o.Collect()) > 0 +func (o *OpenVPNStatusLog) Check() error { + mx, err := o.collect() + if err != nil { + o.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (o OpenVPNStatusLog) Charts() *module.Charts { +func (o *OpenVPNStatusLog) Charts() *module.Charts { return o.charts } diff --git a/modules/openvpn_status_log/openvpn_test.go b/modules/openvpn_status_log/openvpn_test.go index d54d27824..f6db50e4a 100644 --- a/modules/openvpn_status_log/openvpn_test.go +++ b/modules/openvpn_status_log/openvpn_test.go @@ -49,9 +49,9 @@ func TestOpenVPNStatusLog_Init(t *testing.T) { ovpn.Config = test.config if test.wantFail { - assert.False(t, ovpn.Init()) + assert.Error(t, ovpn.Init()) } else { - assert.True(t, ovpn.Init()) + assert.NoError(t, ovpn.Init()) } }) } @@ -76,12 +76,12 @@ func TestOpenVPNStatusLog_Check(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) if test.wantFail { - assert.False(t, ovpn.Check()) + assert.Error(t, ovpn.Check()) } else { - assert.True(t, ovpn.Check()) + assert.NoError(t, ovpn.Check()) } }) } @@ -114,7 +114,7 @@ func TestOpenVPNStatusLog_Charts(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) _ = ovpn.Check() _ = ovpn.Collect() @@ -240,7 +240,7 @@ func TestOpenVPNStatusLog_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) _ = ovpn.Check() collected := ovpn.Collect() diff --git a/modules/pgbouncer/collect.go b/modules/pgbouncer/collect.go index 40dbddb9f..c0e4bf2da 100644 --- a/modules/pgbouncer/collect.go +++ b/modules/pgbouncer/collect.go @@ -236,7 +236,7 @@ func (p *PgBouncer) queryVersion() (*semver.Version, error) { p.Debugf("executing query: %v", q) var resp string - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := p.db.QueryRowContext(ctx, q).Scan(&resp); err != nil { return nil, err @@ -281,7 +281,7 @@ func (p *PgBouncer) openConnection() error { } func (p *PgBouncer) collectQuery(query string, assign func(column, value string)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := p.db.QueryContext(ctx, query) if err != nil { diff --git a/modules/pgbouncer/config_schema.json b/modules/pgbouncer/config_schema.json index 16cf22ecb..43442e6c2 100644 --- a/modules/pgbouncer/config_schema.json +++ b/modules/pgbouncer/config_schema.json @@ -1,23 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pgbouncer job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pgbouncer job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": {} } diff --git a/modules/pgbouncer/pgbouncer.go b/modules/pgbouncer/pgbouncer.go index ebb11327b..a19f8f074 100644 --- a/modules/pgbouncer/pgbouncer.go +++ b/modules/pgbouncer/pgbouncer.go @@ -5,6 +5,7 @@ package pgbouncer import ( "database/sql" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func init() { func New() *PgBouncer { return &PgBouncer{ Config: Config{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer", }, charts: globalCharts.Copy(), @@ -59,18 +60,30 @@ type PgBouncer struct { metrics *metrics } -func (p *PgBouncer) Init() bool { +func (p *PgBouncer) Configuration() any { + return p.Config +} + +func (p *PgBouncer) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } - return true + return nil } -func (p *PgBouncer) Check() bool { - return len(p.Collect()) > 0 +func (p *PgBouncer) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *PgBouncer) Charts() *module.Charts { diff --git a/modules/pgbouncer/pgbouncer_test.go b/modules/pgbouncer/pgbouncer_test.go index e1e0695dd..d6b69abb7 100644 --- a/modules/pgbouncer/pgbouncer_test.go +++ b/modules/pgbouncer/pgbouncer_test.go @@ -60,9 +60,9 @@ func TestPgBouncer_Init(t *testing.T) { p.Config = test.config if test.wantFail { - assert.False(t, p.Init()) + assert.Error(t, p.Init()) } else { - assert.True(t, p.Init()) + assert.NoError(t, p.Init()) } }) } @@ -118,14 +118,14 @@ func TestPgBouncer_Check(t *testing.T) { p.db = db defer func() { _ = db.Close() }() - require.True(t, p.Init()) + require.NoError(t, p.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, p.Check()) + assert.Error(t, p.Check()) } else { - assert.True(t, p.Check()) + assert.NoError(t, p.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -283,7 +283,7 @@ func TestPgBouncer_Collect(t *testing.T) { p.db = db defer func() { _ = db.Close() }() - require.True(t, p.Init()) + require.NoError(t, p.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/phpdaemon/config_schema.json b/modules/phpdaemon/config_schema.json index c200d437b..511f17429 100644 --- a/modules/phpdaemon/config_schema.json +++ b/modules/phpdaemon/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/phpdaemon job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/phpdaemon job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/phpdaemon/init.go b/modules/phpdaemon/init.go new file mode 100644 index 000000000..d96b23011 --- /dev/null +++ b/modules/phpdaemon/init.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *PHPDaemon) validateConfig() error { + if p.URL == "" { + return errors.New("url not set") + } + if _, err := web.NewHTTPRequest(p.Request); err != nil { + return err + } + return nil +} + +func (p *PHPDaemon) initClient() (*client, error) { + httpClient, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, err + } + return newAPIClient(httpClient, p.Request), nil +} diff --git a/modules/phpdaemon/phpdaemon.go b/modules/phpdaemon/phpdaemon.go index 506892cfe..f02bb13ea 100644 --- a/modules/phpdaemon/phpdaemon.go +++ b/modules/phpdaemon/phpdaemon.go @@ -4,6 +4,7 @@ package phpdaemon import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,26 +22,19 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:8509/FullStatus" - defaultHTTPTimeout = time.Second * 2 -) - // New creates PHPDaemon with default values. func New() *PHPDaemon { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &PHPDaemon{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8509/FullStatus", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, - } - - return &PHPDaemon{ - Config: config, charts: charts.Copy(), } } @@ -59,48 +53,50 @@ type PHPDaemon struct { charts *Charts } -// Cleanup makes cleanup. -func (PHPDaemon) Cleanup() {} +func (p *PHPDaemon) Configuration() any { + return p.Config +} // Init makes initialization. -func (p *PHPDaemon) Init() bool { - httpClient, err := web.NewHTTPClient(p.Client) - if err != nil { - p.Errorf("error on creating http client : %v", err) - return false +func (p *PHPDaemon) Init() error { + if err := p.validateConfig(); err != nil { + p.Error(err) + return err } - _, err = web.NewHTTPRequest(p.Request) + c, err := p.initClient() if err != nil { - p.Errorf("error on creating http request to %s : %v", p.URL, err) - return false + p.Error(err) + return err } - - p.client = newAPIClient(httpClient, p.Request) + p.client = c p.Debugf("using URL %s", p.URL) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) - return true + return nil } // Check makes check. -func (p *PHPDaemon) Check() bool { - mx := p.Collect() - +func (p *PHPDaemon) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } if len(mx) == 0 { - return false + return errors.New("no metrics collected") } + if _, ok := mx["uptime"]; ok { - // TODO: remove panic - panicIf(p.charts.Add(uptimeChart.Copy())) + _ = p.charts.Add(uptimeChart.Copy()) } - return true + return nil } // Charts creates Charts. -func (p PHPDaemon) Charts() *Charts { return p.charts } +func (p *PHPDaemon) Charts() *Charts { return p.charts } // Collect collects metrics. func (p *PHPDaemon) Collect() map[string]int64 { @@ -114,9 +110,9 @@ func (p *PHPDaemon) Collect() map[string]int64 { return mx } -func panicIf(err error) { - if err == nil { - return +// Cleanup makes cleanup. +func (p *PHPDaemon) Cleanup() { + if p.client != nil && p.client.httpClient != nil { + p.client.httpClient.CloseIdleConnections() } - panic(err) } diff --git a/modules/phpdaemon/phpdaemon_test.go b/modules/phpdaemon/phpdaemon_test.go index 0634e6ec4..aea27e54f 100644 --- a/modules/phpdaemon/phpdaemon_test.go +++ b/modules/phpdaemon/phpdaemon_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,18 +22,10 @@ func Test_testData(t *testing.T) { assert.NotEmpty(t, testFullStatusData) } -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestPHPDaemon_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.client) } @@ -48,15 +39,15 @@ func TestPHPDaemon_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestPHPDaemon_CheckNG(t *testing.T) { job := New() job.URL = testURL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestPHPDaemon_Charts(t *testing.T) { @@ -73,8 +64,8 @@ func TestPHPDaemon_Charts(t *testing.T) { defer ts.Close() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) assert.True(t, job.charts.Has(uptimeChart.ID)) } @@ -92,8 +83,8 @@ func TestPHPDaemon_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) expected := map[string]int64{ "alive": 350, @@ -121,8 +112,8 @@ func TestPHPDaemon_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestPHPDaemon_404(t *testing.T) { @@ -135,6 +126,6 @@ func TestPHPDaemon_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/phpfpm/config_schema.json b/modules/phpfpm/config_schema.json index a6b0140f3..cdf5aaad6 100644 --- a/modules/phpfpm/config_schema.json +++ b/modules/phpfpm/config_schema.json @@ -1,84 +1,87 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/phpfpm job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "socket": { - "type": "string" - }, - "address": { - "type": "string" - }, - "fcgi_path": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/phpfpm job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "socket": { + "type": "string" + }, + "address": { + "type": "string" + }, + "fcgi_path": { "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "oneOf": [ + { + "required": [ + "name", + "url" + ] + }, + { + "required": [ + "name", + "socket" + ] + }, + { + "required": [ + "name", + "address" + ] + } + ] }, - "oneOf": [ - { - "required": [ - "name", - "url" - ] - }, - { - "required": [ - "name", - "socket" - ] - }, - { - "required": [ - "name", - "address" - ] - } - ] + "uiSchema": {} } diff --git a/modules/phpfpm/init.go b/modules/phpfpm/init.go index 0e764cbe0..5a6694634 100644 --- a/modules/phpfpm/init.go +++ b/modules/phpfpm/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (p Phpfpm) initClient() (client, error) { +func (p *Phpfpm) initClient() (client, error) { if p.Socket != "" { return p.initSocketClient() } @@ -20,32 +20,38 @@ func (p Phpfpm) initClient() (client, error) { if p.URL != "" { return p.initHTTPClient() } + return nil, errors.New("neither 'socket' nor 'url' set") } -func (p Phpfpm) initHTTPClient() (*httpClient, error) { +func (p *Phpfpm) initHTTPClient() (*httpClient, error) { c, err := web.NewHTTPClient(p.Client) if err != nil { return nil, fmt.Errorf("create HTTP client: %v", err) } + p.Debugf("using HTTP client, URL: %s", p.URL) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) + return newHTTPClient(c, p.Request) } -func (p Phpfpm) initSocketClient() (*socketClient, error) { +func (p *Phpfpm) initSocketClient() (*socketClient, error) { if _, err := os.Stat(p.Socket); err != nil { return nil, fmt.Errorf("the socket '%s' does not exist: %v", p.Socket, err) } + p.Debugf("using socket client: %s", p.Socket) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) p.Debugf("using fcgi path: %s", p.FcgiPath) - return newSocketClient(p.Socket, p.Timeout.Duration, p.FcgiPath), nil + + return newSocketClient(p.Socket, p.Timeout.Duration(), p.FcgiPath), nil } -func (p Phpfpm) initTcpClient() (*tcpClient, error) { +func (p *Phpfpm) initTcpClient() (*tcpClient, error) { p.Debugf("using tcp client: %s", p.Address) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) p.Debugf("using fcgi path: %s", p.FcgiPath) - return newTcpClient(p.Address, p.Timeout.Duration, p.FcgiPath), nil + + return newTcpClient(p.Address, p.Timeout.Duration(), p.FcgiPath), nil } diff --git a/modules/phpfpm/phpfpm.go b/modules/phpfpm/phpfpm.go index a61827929..79eee44f7 100644 --- a/modules/phpfpm/phpfpm.go +++ b/modules/phpfpm/phpfpm.go @@ -4,6 +4,7 @@ package phpfpm import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -29,7 +30,7 @@ func New() *Phpfpm { URL: "http://127.0.0.1/status?full&json", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, FcgiPath: "/status", @@ -37,36 +38,48 @@ func New() *Phpfpm { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - Socket string `yaml:"socket"` - Address string `yaml:"address"` - FcgiPath string `yaml:"fcgi_path"` - } - Phpfpm struct { - module.Base - Config `yaml:",inline"` +type Config struct { + web.HTTP `yaml:",inline"` + Socket string `yaml:"socket"` + Address string `yaml:"address"` + FcgiPath string `yaml:"fcgi_path"` +} - client client - } -) +type Phpfpm struct { + module.Base + Config `yaml:",inline"` + + client client +} + +func (p *Phpfpm) Configuration() any { + return p.Config +} -func (p *Phpfpm) Init() bool { +func (p *Phpfpm) Init() error { c, err := p.initClient() if err != nil { p.Errorf("init client: %v", err) - return false + return err } p.client = c - return true + + return nil } -func (p *Phpfpm) Check() bool { - return len(p.Collect()) > 0 +func (p *Phpfpm) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (Phpfpm) Charts() *Charts { +func (p *Phpfpm) Charts() *Charts { return charts.Copy() } @@ -82,4 +95,4 @@ func (p *Phpfpm) Collect() map[string]int64 { return mx } -func (Phpfpm) Cleanup() {} +func (p *Phpfpm) Cleanup() {} diff --git a/modules/phpfpm/phpfpm_test.go b/modules/phpfpm/phpfpm_test.go index 5b9ecd236..4f6cb212e 100644 --- a/modules/phpfpm/phpfpm_test.go +++ b/modules/phpfpm/phpfpm_test.go @@ -38,9 +38,7 @@ func TestNew(t *testing.T) { func TestPhpfpm_Init(t *testing.T) { job := New() - got := job.Init() - - require.True(t, got) + require.NoError(t, job.Init()) assert.NotNil(t, job.client) } @@ -54,30 +52,23 @@ func TestPhpfpm_Check(t *testing.T) { job := New() job.URL = ts.URL - job.Init() - require.True(t, job.Init()) - - got := job.Check() + require.NoError(t, job.Init()) - assert.True(t, got) + assert.NoError(t, job.Check()) } func TestPhpfpm_CheckReturnsFalseOnFailure(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - - got := job.Check() + require.NoError(t, job.Init()) - assert.False(t, got) + assert.Error(t, job.Check()) } func TestPhpfpm_Charts(t *testing.T) { job := New() - got := job.Charts() - - assert.NotNil(t, got) + assert.NotNil(t, job.Charts()) } func TestPhpfpm_CollectJSON(t *testing.T) { @@ -90,7 +81,7 @@ func TestPhpfpm_CollectJSON(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -115,7 +106,7 @@ func TestPhpfpm_CollectJSONFull(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -149,7 +140,7 @@ func TestPhpfpm_CollectNoIdleProcessesJSONFull(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -174,7 +165,7 @@ func TestPhpfpm_CollectText(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -199,7 +190,7 @@ func TestPhpfpm_CollectTextFull(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -233,11 +224,9 @@ func TestPhpfpm_CollectReturnsNothingWhenInvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) { @@ -250,11 +239,9 @@ func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) { @@ -267,11 +254,9 @@ func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_Cleanup(t *testing.T) { diff --git a/modules/pihole/config_schema.json b/modules/pihole/config_schema.json index e4c13fa10..ba9c3dd55 100644 --- a/modules/pihole/config_schema.json +++ b/modules/pihole/config_schema.json @@ -1,62 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pihole job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "setup_vars_path": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pihole job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "setup_vars_path": { + "type": "string" + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/pihole/pihole.go b/modules/pihole/pihole.go index 6aba5cad0..0f2ce7460 100644 --- a/modules/pihole/pihole.go +++ b/modules/pihole/pihole.go @@ -4,6 +4,7 @@ package pihole import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -34,7 +35,8 @@ func New() *Pihole { URL: "http://127.0.0.1", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}}, + Timeout: web.Duration(time.Second * 5), + }, }, SetupVarsPath: "/etc/pihole/setupVars.conf", }, @@ -62,16 +64,20 @@ type Pihole struct { checkVersion bool } -func (p *Pihole) Init() bool { +func (p *Pihole) Configuration() any { + return p.Config +} + +func (p *Pihole) Init() error { if err := p.validateConfig(); err != nil { p.Errorf("config validation: %v", err) - return false + return err } httpClient, err := p.initHTTPClient() if err != nil { p.Errorf("init http client: %v", err) - return false + return err } p.httpClient = httpClient @@ -82,11 +88,19 @@ func (p *Pihole) Init() bool { p.Debugf("web password: %s", p.Password) } - return true + return nil } -func (p *Pihole) Check() bool { - return len(p.Collect()) > 0 +func (p *Pihole) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Pihole) Charts() *module.Charts { diff --git a/modules/pihole/pihole_test.go b/modules/pihole/pihole_test.go index 08ad244a7..65168083e 100644 --- a/modules/pihole/pihole_test.go +++ b/modules/pihole/pihole_test.go @@ -52,9 +52,9 @@ func TestPihole_Init(t *testing.T) { p.Config = test.config if test.wantFail { - assert.False(t, p.Init()) + assert.Error(t, p.Init()) } else { - assert.True(t, p.Init()) + assert.NoError(t, p.Init()) } }) } @@ -85,9 +85,9 @@ func TestPihole_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, p.Check()) + assert.Error(t, p.Check()) } else { - assert.True(t, p.Check()) + assert.NoError(t, p.Check()) } }) } @@ -164,7 +164,7 @@ func caseSuccessWithWebPassword(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsOK p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } @@ -175,7 +175,7 @@ func caseFailNoWebPassword(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsWrong p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } @@ -186,7 +186,7 @@ func caseFailUnsupportedVersion(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsOK p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } diff --git a/modules/pika/config_schema.json b/modules/pika/config_schema.json index d284faaa1..d495c11a9 100644 --- a/modules/pika/config_schema.json +++ b/modules/pika/config_schema.json @@ -1,35 +1,38 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/pika job configuration schema.", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/pika job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/pika/init.go b/modules/pika/init.go index 2ad3ae8ec..5d9e34451 100644 --- a/modules/pika/init.go +++ b/modules/pika/init.go @@ -35,9 +35,9 @@ func (p Pika) initRedisClient() (*redis.Client, error) { opts.PoolSize = 1 opts.TLSConfig = tlsConfig - opts.DialTimeout = p.Timeout.Duration - opts.ReadTimeout = p.Timeout.Duration - opts.WriteTimeout = p.Timeout.Duration + opts.DialTimeout = p.Timeout.Duration() + opts.ReadTimeout = p.Timeout.Duration() + opts.WriteTimeout = p.Timeout.Duration() return redis.NewClient(opts), nil } diff --git a/modules/pika/pika.go b/modules/pika/pika.go index a14a44113..2121850cd 100644 --- a/modules/pika/pika.go +++ b/modules/pika/pika.go @@ -5,6 +5,7 @@ package pika import ( "context" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Pika { return &Pika{ Config: Config{ Address: "redis://@localhost:9221", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, collectedCommands: make(map[string]bool), @@ -64,32 +65,44 @@ type ( } ) -func (p *Pika) Init() bool { +func (p *Pika) Configuration() any { + return p.Config +} + +func (p *Pika) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } pdb, err := p.initRedisClient() if err != nil { p.Errorf("init redis client: %v", err) - return false + return err } p.pdb = pdb charts, err := p.initCharts() if err != nil { p.Errorf("init charts: %v", err) - return false + return err } p.charts = charts - return true + return nil } -func (p *Pika) Check() bool { - return len(p.Collect()) > 0 +func (p *Pika) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Pika) Charts() *module.Charts { diff --git a/modules/pika/pika_test.go b/modules/pika/pika_test.go index a564a54ce..df883a64d 100644 --- a/modules/pika/pika_test.go +++ b/modules/pika/pika_test.go @@ -64,9 +64,9 @@ func TestPika_Init(t *testing.T) { pika.Config = test.config if test.wantFail { - assert.False(t, pika.Init()) + assert.Error(t, pika.Init()) } else { - assert.True(t, pika.Init()) + assert.NoError(t, pika.Init()) } }) } @@ -95,9 +95,9 @@ func TestPika_Check(t *testing.T) { pika := test.prepare(t) if test.wantFail { - assert.False(t, pika.Check()) + assert.Error(t, pika.Check()) } else { - assert.True(t, pika.Check()) + assert.NoError(t, pika.Check()) } }) } @@ -105,7 +105,7 @@ func TestPika_Check(t *testing.T) { func TestPika_Charts(t *testing.T) { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) assert.NotNil(t, pika.Charts()) } @@ -114,7 +114,7 @@ func TestPika_Cleanup(t *testing.T) { pika := New() assert.NotPanics(t, pika.Cleanup) - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) m := &mockRedisClient{} pika.pdb = m @@ -195,7 +195,7 @@ func TestPika_Collect(t *testing.T) { func preparePikaV340(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ result: v340InfoAll, } @@ -204,7 +204,7 @@ func preparePikaV340(t *testing.T) *Pika { func preparePikaErrorOnInfo(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ errOnInfo: true, } @@ -213,7 +213,7 @@ func preparePikaErrorOnInfo(t *testing.T) *Pika { func preparePikaWithRedisMetrics(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ result: redisInfoAll, } diff --git a/modules/ping/config_schema.json b/modules/ping/config_schema.json index fe3779bf4..e0d126cfa 100644 --- a/modules/ping/config_schema.json +++ b/modules/ping/config_schema.json @@ -1,47 +1,50 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/ping job configuration schema.", - "properties": { - "name": { - "type": "string" - }, - "update_every": { - "type": "integer", - "minimum": 1 - }, - "hosts": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/ping job configuration schema.", + "properties": { + "update_every": { + "type": "integer", + "default": 1, + "minimum": 1 }, - "minItems": 1 - }, - "network": { - "type": "string", - "enum": [ - "ip", - "ip4", - "ip6" - ] - }, - "privileged": { - "type": "boolean" - }, - "sendPackets": { - "type": "integer", - "minimum": 1 - }, - "interval": { - "type": "integer", - "minimum": 1 + "hosts": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "network": { + "type": "string", + "default": "ip", + "enum": [ + "ip", + "ip4", + "ip6" + ] + }, + "privileged": { + "default": true, + "type": "boolean" + }, + "sendPackets": { + "type": "integer", + "default": 5, + "minimum": 1 + }, + "interval": { + "type": "string", + "default": "1s" + }, + "interface": { + "type": "string" + } }, - "interface": { - "type": "string" - } + "required": [ + "hosts" + ] }, - "required": [ - "name", - "hosts" - ] + "uiSchema": {} } diff --git a/modules/ping/init.go b/modules/ping/init.go index e71aa6c75..62d78c8e6 100644 --- a/modules/ping/init.go +++ b/modules/ping/init.go @@ -31,7 +31,7 @@ func (p *Ping) initProber() (prober, error) { privileged: p.Privileged, packets: p.SendPackets, iface: p.Interface, - interval: p.Interval.Duration, + interval: p.Interval.Duration(), deadline: deadline, } diff --git a/modules/ping/ping.go b/modules/ping/ping.go index 7aa402985..4ef51ce78 100644 --- a/modules/ping/ping.go +++ b/modules/ping/ping.go @@ -4,6 +4,7 @@ package ping import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,7 +33,7 @@ func New() *Ping { Network: "ip", Privileged: true, SendPackets: 5, - Interval: web.Duration{Duration: time.Millisecond * 100}, + Interval: web.Duration(time.Millisecond * 100), }, charts: &module.Charts{}, @@ -42,13 +43,13 @@ func New() *Ping { } type Config struct { - UpdateEvery int `yaml:"update_every"` - Hosts []string `yaml:"hosts"` - Network string `yaml:"network"` - Privileged bool `yaml:"privileged"` - SendPackets int `yaml:"packets"` - Interval web.Duration `yaml:"interval"` - Interface string `yaml:"interface"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + Hosts []string `yaml:"hosts" json:"hosts"` + Network string `yaml:"network" json:"network"` + Privileged bool `yaml:"privileged" json:"privileged"` + SendPackets int `yaml:"packets" json:"packets"` + Interval web.Duration `yaml:"interval" json:"interval"` + Interface string `yaml:"interface" json:"interface"` } type ( @@ -68,25 +69,37 @@ type ( } ) -func (p *Ping) Init() bool { +func (p *Ping) Configuration() any { + return p.Config +} + +func (p *Ping) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } pr, err := p.initProber() if err != nil { p.Errorf("init prober: %v", err) - return false + return err } p.prober = pr - return true + return nil } -func (p *Ping) Check() bool { - return len(p.Collect()) > 0 +func (p *Ping) Check() error { + mx, err := p.collect() + if err != nil { + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (p *Ping) Charts() *module.Charts { diff --git a/modules/ping/ping_test.go b/modules/ping/ping_test.go index 57958d557..c68b61f70 100644 --- a/modules/ping/ping_test.go +++ b/modules/ping/ping_test.go @@ -39,9 +39,9 @@ func TestPing_Init(t *testing.T) { ping.UpdateEvery = 1 if test.wantFail { - assert.False(t, ping.Init()) + assert.Error(t, ping.Init()) } else { - assert.True(t, ping.Init()) + assert.NoError(t, ping.Init()) } }) } @@ -75,9 +75,9 @@ func TestPing_Check(t *testing.T) { ping := test.prepare(t) if test.wantFail { - assert.False(t, ping.Check()) + assert.Error(t, ping.Check()) } else { - assert.True(t, ping.Check()) + assert.NoError(t, ping.Check()) } }) } @@ -145,7 +145,7 @@ func casePingSuccess(t *testing.T) *Ping { ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { return &mockProber{} } - require.True(t, ping.Init()) + require.NoError(t, ping.Init()) return ping } @@ -156,7 +156,7 @@ func casePingError(t *testing.T) *Ping { ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { return &mockProber{errOnPing: true} } - require.True(t, ping.Init()) + require.NoError(t, ping.Init()) return ping } diff --git a/modules/portcheck/collect.go b/modules/portcheck/collect.go index 723c105c3..dab45ec41 100644 --- a/modules/portcheck/collect.go +++ b/modules/portcheck/collect.go @@ -41,7 +41,7 @@ func (pc *PortCheck) collect() (map[string]int64, error) { func (pc *PortCheck) checkPort(p *port) { start := time.Now() - conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration) + conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration()) dur := time.Since(start) defer func() { diff --git a/modules/portcheck/config_schema.json b/modules/portcheck/config_schema.json index 8b9515702..2d50cd6eb 100644 --- a/modules/portcheck/config_schema.json +++ b/modules/portcheck/config_schema.json @@ -1,37 +1,40 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/portcheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string", - "minLength": 1 - }, - "host": { - "type": "string", - "minLength": 1 - }, - "ports": { - "type": "array", - "items": { - "type": "integer", - "minimum": 1 + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/portcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "host": { + "type": "string", + "minLength": 1 + }, + "ports": { + "type": "array", + "items": { + "type": "integer", + "minimum": 1 + }, + "minItems": 1 }, - "minItems": 1 + "timeout": { + "type": [ + "string", + "integer" + ], + "minLength": 1, + "minimum": 1, + "description": "The timeout duration, in seconds. Must be at least 1." + } }, - "timeout": { - "type": [ - "string", - "integer" - ], - "minLength": 1, - "minimum": 1, - "description": "The timeout duration, in seconds. Must be at least 1." - } + "required": [ + "name", + "host", + "ports" + ] }, - "required": [ - "name", - "host", - "ports" - ] + "uiSchema": {} } diff --git a/modules/portcheck/init.go b/modules/portcheck/init.go index d5c2ebb55..23825620b 100644 --- a/modules/portcheck/init.go +++ b/modules/portcheck/init.go @@ -4,10 +4,21 @@ package portcheck import ( "errors" + "net" + "time" "github.com/netdata/go.d.plugin/agent/module" ) +type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) + +type port struct { + number int + state checkState + inState int + latency int +} + func (pc *PortCheck) validateConfig() error { if pc.Host == "" { return errors.New("'host' parameter not set") @@ -29,3 +40,10 @@ func (pc *PortCheck) initCharts() (*module.Charts, error) { return &charts, nil } + +func (pc *PortCheck) initPorts() (ports []*port) { + for _, p := range pc.Ports { + ports = append(ports, &port{number: p}) + } + return ports +} diff --git a/modules/portcheck/portcheck.go b/modules/portcheck/portcheck.go index c7e2c0b9d..1e81c3bad 100644 --- a/modules/portcheck/portcheck.go +++ b/modules/portcheck/portcheck.go @@ -27,7 +27,7 @@ func init() { func New() *PortCheck { return &PortCheck{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, dial: net.DialTimeout, } @@ -39,15 +39,6 @@ type Config struct { Timeout web.Duration `yaml:"timeout"` } -type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) - -type port struct { - number int - state checkState - inState int - latency int -} - type PortCheck struct { module.Base Config `yaml:",inline"` @@ -58,32 +49,34 @@ type PortCheck struct { ports []*port } -func (pc *PortCheck) Init() bool { +func (pc *PortCheck) Configuration() any { + return pc.Config +} + +func (pc *PortCheck) Init() error { if err := pc.validateConfig(); err != nil { pc.Errorf("config validation: %v", err) - return false + return err } charts, err := pc.initCharts() if err != nil { pc.Errorf("init charts: %v", err) - return false + return err } pc.charts = charts - for _, p := range pc.Ports { - pc.ports = append(pc.ports, &port{number: p}) - } + pc.ports = pc.initPorts() pc.Debugf("using host: %s", pc.Host) pc.Debugf("using ports: %v", pc.Ports) pc.Debugf("using TCP connection timeout: %s", pc.Timeout) - return true + return nil } -func (pc *PortCheck) Check() bool { - return true +func (pc *PortCheck) Check() error { + return nil } func (pc *PortCheck) Charts() *module.Charts { diff --git a/modules/portcheck/portcheck_test.go b/modules/portcheck/portcheck_test.go index 2e242cbbb..28af96800 100644 --- a/modules/portcheck/portcheck_test.go +++ b/modules/portcheck/portcheck_test.go @@ -25,21 +25,21 @@ func TestPortCheck_Init(t *testing.T) { job.Host = "127.0.0.1" job.Ports = []int{39001, 39002} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.Len(t, job.ports, 2) } func TestPortCheck_InitNG(t *testing.T) { job := New() - assert.False(t, job.Init()) + assert.Error(t, job.Init()) job.Host = "127.0.0.1" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) job.Ports = []int{39001, 39002} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestPortCheck_Check(t *testing.T) { - assert.True(t, New().Check()) + assert.NoError(t, New().Check()) } func TestPortCheck_Cleanup(t *testing.T) { @@ -50,7 +50,7 @@ func TestPortCheck_Charts(t *testing.T) { job := New() job.Ports = []int{1, 2} job.Host = "localhost" - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports)) } @@ -61,8 +61,8 @@ func TestPortCheck_Collect(t *testing.T) { job.Ports = []int{39001, 39002} job.UpdateEvery = 5 job.dial = testDial(nil) - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) copyLatency := func(dst, src map[string]int64) { for k := range dst { diff --git a/modules/postgres/collect.go b/modules/postgres/collect.go index f66e956a3..b43e2806e 100644 --- a/modules/postgres/collect.go +++ b/modules/postgres/collect.go @@ -132,7 +132,7 @@ func (p *Postgres) openPrimaryConnection() (*sql.DB, error) { db.SetMaxIdleConns(1) db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { @@ -162,7 +162,7 @@ func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, erro db.SetMaxIdleConns(1) db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { diff --git a/modules/postgres/config_schema.json b/modules/postgres/config_schema.json index 98a8616b7..d0374cd8b 100644 --- a/modules/postgres/config_schema.json +++ b/modules/postgres/config_schema.json @@ -1,44 +1,47 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/postgres job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "dsn": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_databases_matching": { - "type": "string" - }, - "transaction_time_histogram": { - "type": "array", - "items": { - "type": "number" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/postgres job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_databases_matching": { + "type": "string" + }, + "transaction_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "query_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "max_db_tables": { + "type": "integer" + }, + "max_db_indexes": { + "type": "integer" } }, - "query_time_histogram": { - "type": "array", - "items": { - "type": "number" - } - }, - "max_db_tables": { - "type": "integer" - }, - "max_db_indexes": { - "type": "integer" - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": {} } diff --git a/modules/postgres/do_query.go b/modules/postgres/do_query.go index ea134ec5f..3b90be0d7 100644 --- a/modules/postgres/do_query.go +++ b/modules/postgres/do_query.go @@ -8,14 +8,14 @@ import ( ) func (p *Postgres) doQueryRow(query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return p.db.QueryRowContext(ctx, query).Scan(v) } func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return db.QueryRowContext(ctx, query).Scan(v) @@ -26,7 +26,7 @@ func (p *Postgres) doQuery(query string, assign func(column, value string, rowEn } func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := db.QueryContext(ctx, query) diff --git a/modules/postgres/postgres.go b/modules/postgres/postgres.go index a1dabf9d3..b3a0b7579 100644 --- a/modules/postgres/postgres.go +++ b/modules/postgres/postgres.go @@ -5,6 +5,7 @@ package postgres import ( "database/sql" _ "embed" + "errors" "sync" "time" @@ -30,7 +31,7 @@ func init() { func New() *Postgres { return &Postgres{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres", XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, @@ -99,28 +100,40 @@ type ( } ) -func (p *Postgres) Init() bool { +func (p *Postgres) Configuration() any { + return p.Config +} + +func (p *Postgres) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } sr, err := p.initDBSelector() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } p.dbSr = sr p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram) p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram) - return true + return nil } -func (p *Postgres) Check() bool { - return len(p.Collect()) > 0 +func (p *Postgres) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Postgres) Charts() *module.Charts { diff --git a/modules/postgres/postgres_test.go b/modules/postgres/postgres_test.go index a41c11235..616098526 100644 --- a/modules/postgres/postgres_test.go +++ b/modules/postgres/postgres_test.go @@ -128,9 +128,9 @@ func TestPostgres_Init(t *testing.T) { pg.Config = test.config if test.wantFail { - assert.False(t, pg.Init()) + assert.Error(t, pg.Init()) } else { - assert.True(t, pg.Init()) + assert.NoError(t, pg.Init()) } }) } @@ -233,14 +233,14 @@ func TestPostgres_Check(t *testing.T) { pg.db = db defer func() { _ = db.Close() }() - require.True(t, pg.Init()) + require.NoError(t, pg.Init()) test.prepareMock(t, pg, mock) if test.wantFail { - assert.False(t, pg.Check()) + assert.Error(t, pg.Check()) } else { - assert.True(t, pg.Check()) + assert.NoError(t, pg.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -669,7 +669,7 @@ func TestPostgres_Collect(t *testing.T) { pg.db = db defer func() { _ = db.Close() }() - require.True(t, pg.Init()) + require.NoError(t, pg.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/powerdns/authoritativens.go b/modules/powerdns/authoritativens.go index 07b7fdbcf..f9bc72c4a 100644 --- a/modules/powerdns/authoritativens.go +++ b/modules/powerdns/authoritativens.go @@ -4,6 +4,7 @@ package powerdns import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *AuthoritativeNS { URL: "http://127.0.0.1:8081", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -48,32 +49,44 @@ type AuthoritativeNS struct { charts *module.Charts } -func (ns *AuthoritativeNS) Init() bool { +func (ns *AuthoritativeNS) Configuration() any { + return ns.Config +} + +func (ns *AuthoritativeNS) Init() error { err := ns.validateConfig() if err != nil { ns.Errorf("config validation: %v", err) - return false + return err } client, err := ns.initHTTPClient() if err != nil { ns.Errorf("init HTTP client: %v", err) - return false + return err } ns.httpClient = client cs, err := ns.initCharts() if err != nil { ns.Errorf("init charts: %v", err) - return false + return err } ns.charts = cs - return true + return nil } -func (ns *AuthoritativeNS) Check() bool { - return len(ns.Collect()) > 0 +func (ns *AuthoritativeNS) Check() error { + mx, err := ns.collect() + if err != nil { + ns.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (ns *AuthoritativeNS) Charts() *module.Charts { diff --git a/modules/powerdns/authoritativens_test.go b/modules/powerdns/authoritativens_test.go index 71e5c6dc4..637f251c4 100644 --- a/modules/powerdns/authoritativens_test.go +++ b/modules/powerdns/authoritativens_test.go @@ -70,9 +70,9 @@ func TestRecursor_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -108,12 +108,12 @@ func TestRecursor_Check(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) if test.wantFail { - assert.False(t, recursor.Check()) + assert.Error(t, recursor.Check()) } else { - assert.True(t, recursor.Check()) + assert.NoError(t, recursor.Check()) } }) } @@ -121,7 +121,7 @@ func TestRecursor_Check(t *testing.T) { func TestRecursor_Charts(t *testing.T) { recursor := New() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) assert.NotNil(t, recursor.Charts()) } @@ -236,7 +236,7 @@ func TestRecursor_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { ns, cleanup := test.prepare() defer cleanup() - require.True(t, ns.Init()) + require.NoError(t, ns.Init()) collected := ns.Collect() diff --git a/modules/powerdns/config_schema.json b/modules/powerdns/config_schema.json index 93f8e72a2..8d6d5d9ac 100644 --- a/modules/powerdns/config_schema.json +++ b/modules/powerdns/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/powerdns job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/powerdns job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/powerdns/init.go b/modules/powerdns/init.go index a577db773..aefdc5cb9 100644 --- a/modules/powerdns/init.go +++ b/modules/powerdns/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (ns AuthoritativeNS) validateConfig() error { +func (ns *AuthoritativeNS) validateConfig() error { if ns.URL == "" { return errors.New("URL not set") } @@ -20,10 +20,10 @@ func (ns AuthoritativeNS) validateConfig() error { return nil } -func (ns AuthoritativeNS) initHTTPClient() (*http.Client, error) { +func (ns *AuthoritativeNS) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(ns.Client) } -func (ns AuthoritativeNS) initCharts() (*module.Charts, error) { +func (ns *AuthoritativeNS) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/powerdns_recursor/config_schema.json b/modules/powerdns_recursor/config_schema.json index fcd19e150..23d0bdc31 100644 --- a/modules/powerdns_recursor/config_schema.json +++ b/modules/powerdns_recursor/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/powerdns_recursor job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/powerdns_recursor job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/powerdns_recursor/recursor.go b/modules/powerdns_recursor/recursor.go index cd052ba6d..cd3d63b20 100644 --- a/modules/powerdns_recursor/recursor.go +++ b/modules/powerdns_recursor/recursor.go @@ -4,6 +4,7 @@ package powerdns_recursor import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *Recursor { URL: "http://127.0.0.1:8081", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -48,32 +49,44 @@ type Recursor struct { charts *module.Charts } -func (r *Recursor) Init() bool { +func (r *Recursor) Configuration() any { + return r.Config +} + +func (r *Recursor) Init() error { err := r.validateConfig() if err != nil { r.Errorf("config validation: %v", err) - return false + return err } client, err := r.initHTTPClient() if err != nil { r.Errorf("init HTTP client: %v", err) - return false + return err } r.httpClient = client cs, err := r.initCharts() if err != nil { r.Errorf("init charts: %v", err) - return false + return err } r.charts = cs - return true + return nil } -func (r *Recursor) Check() bool { - return len(r.Collect()) > 0 +func (r *Recursor) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *Recursor) Charts() *module.Charts { diff --git a/modules/powerdns_recursor/recursor_test.go b/modules/powerdns_recursor/recursor_test.go index 4ef3c2d08..64b6e36e3 100644 --- a/modules/powerdns_recursor/recursor_test.go +++ b/modules/powerdns_recursor/recursor_test.go @@ -70,9 +70,9 @@ func TestRecursor_Init(t *testing.T) { recursor.Config = test.config if test.wantFail { - assert.False(t, recursor.Init()) + assert.Error(t, recursor.Init()) } else { - assert.True(t, recursor.Init()) + assert.NoError(t, recursor.Init()) } }) } @@ -108,12 +108,12 @@ func TestRecursor_Check(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) if test.wantFail { - assert.False(t, recursor.Check()) + assert.Error(t, recursor.Check()) } else { - assert.True(t, recursor.Check()) + assert.NoError(t, recursor.Check()) } }) } @@ -121,7 +121,7 @@ func TestRecursor_Check(t *testing.T) { func TestRecursor_Charts(t *testing.T) { recursor := New() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) assert.NotNil(t, recursor.Charts()) } @@ -271,7 +271,7 @@ func TestRecursor_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) collected := recursor.Collect() diff --git a/modules/prometheus/config_schema.json b/modules/prometheus/config_schema.json index 60261d542..8103344e5 100644 --- a/modules/prometheus/config_schema.json +++ b/modules/prometheus/config_schema.json @@ -1,113 +1,116 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/prometheus job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "selector": { - "type": "object", - "properties": { - "allow": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/prometheus job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "selector": { + "type": "object", + "properties": { + "allow": { + "type": "array", + "items": { + "type": "string" + } + }, + "deny": { + "type": "array", + "items": { + "type": "string" + } } }, - "deny": { - "type": "array", - "items": { - "type": "string" - } - } + "required": [ + "allow", + "deny" + ] }, - "required": [ - "allow", - "deny" - ] - }, - "fallback_type": { - "type": "object", - "properties": { - "counter": { - "type": "array", - "items": { - "type": "string" + "fallback_type": { + "type": "object", + "properties": { + "counter": { + "type": "array", + "items": { + "type": "string" + } + }, + "gauge": { + "type": "array", + "items": { + "type": "string" + } } }, - "gauge": { - "type": "array", - "items": { - "type": "string" - } + "required": [ + "counter", + "gauge" + ] + }, + "bearer_token": { + "type": "string" + }, + "expected_prefix": { + "type": "string" + }, + "max_time_series": { + "type": "integer" + }, + "max_time_series_per_metric": { + "type": "integer" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" } }, - "required": [ - "counter", - "gauge" - ] - }, - "bearer_token": { - "type": "string" - }, - "expected_prefix": { - "type": "string" - }, - "max_time_series": { - "type": "integer" - }, - "max_time_series_per_metric": { - "type": "integer" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/prometheus/prometheus.go b/modules/prometheus/prometheus.go index 32a91e5c2..84f960902 100644 --- a/modules/prometheus/prometheus.go +++ b/modules/prometheus/prometheus.go @@ -4,6 +4,7 @@ package prometheus import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -31,7 +32,7 @@ func New() *Prometheus { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 10}, + Timeout: web.Duration(time.Second * 10), }, }, MaxTS: 2000, @@ -74,38 +75,50 @@ type Prometheus struct { } } -func (p *Prometheus) Init() bool { +func (p *Prometheus) Configuration() any { + return p.Config +} + +func (p *Prometheus) Init() error { if err := p.validateConfig(); err != nil { p.Errorf("validating config: %v", err) - return false + return err } prom, err := p.initPrometheusClient() if err != nil { p.Errorf("init prometheus client: %v", err) - return false + return err } p.prom = prom m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter) if err != nil { p.Errorf("init counter fallback type matcher: %v", err) - return false + return err } p.fallbackType.counter = m m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge) if err != nil { p.Errorf("init counter fallback type matcher: %v", err) - return false + return err } p.fallbackType.gauge = m - return true + return nil } -func (p *Prometheus) Check() bool { - return len(p.Collect()) > 0 +func (p *Prometheus) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Prometheus) Charts() *module.Charts { @@ -124,4 +137,8 @@ func (p *Prometheus) Collect() map[string]int64 { return mx } -func (p *Prometheus) Cleanup() {} +func (p *Prometheus) Cleanup() { + if p.prom != nil && p.prom.HTTPClient() != nil { + p.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/prometheus/prometheus_test.go b/modules/prometheus/prometheus_test.go index 95bf55bd2..837041026 100644 --- a/modules/prometheus/prometheus_test.go +++ b/modules/prometheus/prometheus_test.go @@ -44,9 +44,9 @@ func TestPrometheus_Init(t *testing.T) { prom.Config = test.config if test.wantFail { - assert.False(t, prom.Init()) + assert.Error(t, prom.Init()) } else { - assert.True(t, prom.Init()) + assert.NoError(t, prom.Init()) } }) } @@ -57,7 +57,7 @@ func TestPrometheus_Cleanup(t *testing.T) { prom := New() prom.URL = "http://127.0.0.1" - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) assert.NotPanics(t, prom.Cleanup) } @@ -169,12 +169,12 @@ test_counter_no_meta_metric_1_total{label1="value2"} 11 prom, cleanup := test.prepare() defer cleanup() - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) if test.wantFail { - assert.False(t, prom.Check()) + assert.Error(t, prom.Check()) } else { - assert.True(t, prom.Check()) + assert.NoError(t, prom.Check()) } }) } @@ -558,7 +558,7 @@ test_gauge_no_meta_metric_1{label1="value2"} 12 defer srv.Close() prom.URL = srv.URL - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) for num, step := range test.steps { t.Run(fmt.Sprintf("step num %d ('%s')", num+1, step.desc), func(t *testing.T) { diff --git a/modules/proxysql/collect.go b/modules/proxysql/collect.go index cc35fc02d..dfc559a97 100644 --- a/modules/proxysql/collect.go +++ b/modules/proxysql/collect.go @@ -225,14 +225,14 @@ func (p *ProxySQL) openConnection() error { } func (p *ProxySQL) doQueryRow(query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return p.db.QueryRowContext(ctx, query).Scan(v) } func (p *ProxySQL) doQuery(query string, assign func(column, value string, rowEnd bool)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := p.db.QueryContext(ctx, query) diff --git a/modules/proxysql/config_schema.json b/modules/proxysql/config_schema.json index 5fab79bc7..2c904a570 100644 --- a/modules/proxysql/config_schema.json +++ b/modules/proxysql/config_schema.json @@ -1,26 +1,29 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/proxysql job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/proxysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "my.cnf": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": {} } diff --git a/modules/proxysql/proxysql.go b/modules/proxysql/proxysql.go index d52c36efd..c7eaffaf6 100644 --- a/modules/proxysql/proxysql.go +++ b/modules/proxysql/proxysql.go @@ -5,6 +5,7 @@ package proxysql import ( "database/sql" _ "embed" + "errors" _ "github.com/go-sql-driver/mysql" "sync" "time" @@ -27,7 +28,7 @@ func New() *ProxySQL { return &ProxySQL{ Config: Config{ DSN: "stats:stats@tcp(127.0.0.1:6032)/", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: baseCharts.Copy(), @@ -46,32 +47,43 @@ type Config struct { Timeout web.Duration `yaml:"timeout"` } -type ( - ProxySQL struct { - module.Base - Config `yaml:",inline"` +type ProxySQL struct { + module.Base + Config `yaml:",inline"` - db *sql.DB + db *sql.DB - charts *module.Charts + charts *module.Charts - once *sync.Once - cache *cache - } -) + once *sync.Once + cache *cache +} -func (p *ProxySQL) Init() bool { +func (p *ProxySQL) Configuration() any { + return p.Config +} + +func (p *ProxySQL) Init() error { if p.DSN == "" { - p.Error("'dsn' not set") - return false + p.Error("dsn not set") + return errors.New("dsn not set") } p.Debugf("using DSN [%s]", p.DSN) - return true + + return nil } -func (p *ProxySQL) Check() bool { - return len(p.Collect()) > 0 +func (p *ProxySQL) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *ProxySQL) Charts() *module.Charts { diff --git a/modules/proxysql/proxysql_test.go b/modules/proxysql/proxysql_test.go index ec31c4d85..d5d30aa07 100644 --- a/modules/proxysql/proxysql_test.go +++ b/modules/proxysql/proxysql_test.go @@ -62,9 +62,9 @@ func TestProxySQL_Init(t *testing.T) { proxySQL.Config = test.config if test.wantFail { - assert.False(t, proxySQL.Init()) + assert.Error(t, proxySQL.Init()) } else { - assert.True(t, proxySQL.Init()) + assert.NoError(t, proxySQL.Init()) } }) } @@ -165,14 +165,14 @@ func TestProxySQL_Check(t *testing.T) { proxySQL.db = db defer func() { _ = db.Close() }() - require.True(t, proxySQL.Init()) + require.NoError(t, proxySQL.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, proxySQL.Check()) + assert.Error(t, proxySQL.Check()) } else { - assert.True(t, proxySQL.Check()) + assert.NoError(t, proxySQL.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -1152,7 +1152,7 @@ func TestProxySQL_Collect(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/pulsar/cache.go b/modules/pulsar/cache.go new file mode 100644 index 000000000..7f113bf86 --- /dev/null +++ b/modules/pulsar/cache.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +func newCache() *cache { + return &cache{ + namespaces: make(map[namespace]bool), + topics: make(map[topic]bool), + } +} + +type ( + namespace struct{ name string } + topic struct{ namespace, name string } + cache struct { + namespaces map[namespace]bool + topics map[topic]bool + } +) diff --git a/modules/pulsar/config_schema.json b/modules/pulsar/config_schema.json index 083eb0b98..fc69b1d55 100644 --- a/modules/pulsar/config_schema.json +++ b/modules/pulsar/config_schema.json @@ -1,76 +1,79 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pulsar job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "topic_filter": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pulsar job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "topic_filter": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/pulsar/init.go b/modules/pulsar/init.go new file mode 100644 index 000000000..d1302bd01 --- /dev/null +++ b/modules/pulsar/init.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *Pulsar) validateConfig() error { + if p.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (p *Pulsar) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, p.Request), nil +} + +func (p *Pulsar) initTopicFilerMatcher() (matcher.Matcher, error) { + if p.TopicFiler.Empty() { + return matcher.TRUE(), nil + } + return p.TopicFiler.Parse() +} diff --git a/modules/pulsar/pulsar.go b/modules/pulsar/pulsar.go index 8b0ce9101..f553a4bc2 100644 --- a/modules/pulsar/pulsar.go +++ b/modules/pulsar/pulsar.go @@ -29,22 +29,21 @@ func init() { } func New() *Pulsar { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8080/metrics", + return &Pulsar{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + TopicFiler: matcher.SimpleExpr{ + Includes: nil, + Excludes: []string{"*"}, }, }, - TopicFiler: matcher.SimpleExpr{ - Includes: nil, - Excludes: []string{"*"}, - }, - } - return &Pulsar{ - Config: config, once: &sync.Once{}, charts: summaryCharts.Copy(), nsCharts: namespaceCharts.Copy(), @@ -54,90 +53,62 @@ func New() *Pulsar { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - TopicFiler matcher.SimpleExpr `yaml:"topic_filter"` - } - - Pulsar struct { - module.Base - Config `yaml:",inline"` - - prom prometheus.Prometheus - topicFilter matcher.Matcher - cache *cache - curCache *cache - once *sync.Once - charts *Charts - nsCharts *Charts - topicChartsMapping map[string]string - } - - namespace struct{ name string } - topic struct{ namespace, name string } - cache struct { - namespaces map[namespace]bool - topics map[topic]bool - } -) +type Config struct { + web.HTTP `yaml:",inline"` + TopicFiler matcher.SimpleExpr `yaml:"topic_filter"` +} -func newCache() *cache { - return &cache{ - namespaces: make(map[namespace]bool), - topics: make(map[topic]bool), - } +type Pulsar struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + topicFilter matcher.Matcher + cache *cache + curCache *cache + once *sync.Once + charts *Charts + nsCharts *Charts + topicChartsMapping map[string]string } -func (p Pulsar) validateConfig() error { - if p.URL == "" { - return errors.New("URL is not set") - } - return nil +func (p *Pulsar) Configuration() any { + return p.Config } -func (p *Pulsar) initClient() error { - client, err := web.NewHTTPClient(p.Client) - if err != nil { +func (p *Pulsar) Init() error { + if err := p.validateConfig(); err != nil { + p.Errorf("config validation: %v", err) return err } - p.prom = prometheus.New(client, p.Request) - return nil -} - -func (p *Pulsar) initTopicFiler() error { - if p.TopicFiler.Empty() { - p.topicFilter = matcher.TRUE() - return nil + prom, err := p.initPrometheusClient() + if err != nil { + p.Error(err) + return err } + p.prom = prom - m, err := p.TopicFiler.Parse() + m, err := p.initTopicFilerMatcher() if err != nil { + p.Error(err) return err } p.topicFilter = m + return nil } -func (p *Pulsar) Init() bool { - if err := p.validateConfig(); err != nil { - p.Errorf("config validation: %v", err) - return false - } - if err := p.initClient(); err != nil { - p.Errorf("client initializing: %v", err) - return false +func (p *Pulsar) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err } - if err := p.initTopicFiler(); err != nil { - p.Errorf("topic filer initialization: %v", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (p *Pulsar) Check() bool { - return len(p.Collect()) > 0 + return nil } func (p *Pulsar) Charts() *Charts { @@ -156,4 +127,8 @@ func (p *Pulsar) Collect() map[string]int64 { return mx } -func (Pulsar) Cleanup() {} +func (p *Pulsar) Cleanup() { + if p.prom != nil && p.prom.HTTPClient() != nil { + p.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/pulsar/pulsar_test.go b/modules/pulsar/pulsar_test.go index 3bf9468b6..418e4102d 100644 --- a/modules/pulsar/pulsar_test.go +++ b/modules/pulsar/pulsar_test.go @@ -71,9 +71,9 @@ func TestPulsar_Init(t *testing.T) { pulsar.Config = test.config if test.wantFail { - assert.False(t, pulsar.Init()) + assert.Error(t, pulsar.Init()) } else { - assert.True(t, pulsar.Init()) + assert.NoError(t, pulsar.Init()) } }) } @@ -102,9 +102,9 @@ func TestPulsar_Check(t *testing.T) { defer srv.Close() if test.wantFail { - assert.False(t, pulsar.Check()) + assert.Error(t, pulsar.Check()) } else { - assert.True(t, pulsar.Check()) + assert.NoError(t, pulsar.Check()) } }) } @@ -225,7 +225,7 @@ func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Serv pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -239,7 +239,7 @@ func prepareClientServerStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -267,7 +267,7 @@ func prepareClientServersDynamicStdV250Topics(t *testing.T) (*Pulsar, *httptest. pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -281,7 +281,7 @@ func prepareClientServerNonPulsar(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -295,7 +295,7 @@ func prepareClientServerInvalidData(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -309,7 +309,7 @@ func prepareClientServer404(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -320,7 +320,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*Pulsar, *httptest.Serv pulsar := New() pulsar.URL = "http://127.0.0.1:38001/metrics" - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } diff --git a/modules/rabbitmq/config_schema.json b/modules/rabbitmq/config_schema.json index ad9f0e7b0..60e702748 100644 --- a/modules/rabbitmq/config_schema.json +++ b/modules/rabbitmq/config_schema.json @@ -1,62 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/rabbitmq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_queues_metrics": { - "type": "boolean" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/rabbitmq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_queues_metrics": { + "type": "boolean" + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/rabbitmq/rabbitmq.go b/modules/rabbitmq/rabbitmq.go index 59fe4b153..44fa3dc1a 100644 --- a/modules/rabbitmq/rabbitmq.go +++ b/modules/rabbitmq/rabbitmq.go @@ -4,6 +4,7 @@ package rabbitmq import ( _ "embed" + "errors" "net/http" "time" @@ -31,7 +32,7 @@ func New() *RabbitMQ { Password: "guest", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, CollectQueues: false, @@ -66,27 +67,39 @@ type ( } ) -func (r *RabbitMQ) Init() bool { +func (r *RabbitMQ) Configuration() any { + return r.Config +} + +func (r *RabbitMQ) Init() error { if r.URL == "" { r.Error("'url' can not be empty") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(r.Client) if err != nil { r.Errorf("init HTTP client: %v", err) - return false + return err } r.httpClient = client r.Debugf("using URL %s", r.URL) - r.Debugf("using timeout: %s", r.Timeout.Duration) + r.Debugf("using timeout: %s", r.Timeout) - return true + return nil } -func (r *RabbitMQ) Check() bool { - return len(r.Collect()) > 0 +func (r *RabbitMQ) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *RabbitMQ) Charts() *module.Charts { diff --git a/modules/rabbitmq/rabbitmq_test.go b/modules/rabbitmq/rabbitmq_test.go index c365726aa..caed7c126 100644 --- a/modules/rabbitmq/rabbitmq_test.go +++ b/modules/rabbitmq/rabbitmq_test.go @@ -58,9 +58,9 @@ func TestRabbitMQ_Init(t *testing.T) { rabbit.Config = test.config if test.wantFail { - assert.False(t, rabbit.Init()) + assert.Error(t, rabbit.Init()) } else { - assert.True(t, rabbit.Init()) + assert.NoError(t, rabbit.Init()) } }) } @@ -74,7 +74,7 @@ func TestRabbitMQ_Cleanup(t *testing.T) { assert.NotPanics(t, New().Cleanup) rabbit := New() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) assert.NotPanics(t, rabbit.Cleanup) } @@ -94,12 +94,12 @@ func TestRabbitMQ_Check(t *testing.T) { rabbit, cleanup := test.prepare() defer cleanup() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) if test.wantFail { - assert.False(t, rabbit.Check()) + assert.Error(t, rabbit.Check()) } else { - assert.True(t, rabbit.Check()) + assert.NoError(t, rabbit.Check()) } }) } @@ -285,7 +285,7 @@ func TestRabbitMQ_Collect(t *testing.T) { rabbit, cleanup := test.prepare() defer cleanup() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) mx := rabbit.Collect() diff --git a/modules/redis/config_schema.json b/modules/redis/config_schema.json index ed25da9de..a4ab1919b 100644 --- a/modules/redis/config_schema.json +++ b/modules/redis/config_schema.json @@ -1,44 +1,47 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/redis job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "password": { - "type": "string" - }, - "username": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "ping_samples": { - "type": "integer" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/redis job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "ping_samples": { + "type": "integer" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/redis/init.go b/modules/redis/init.go index ffed274c3..072febb17 100644 --- a/modules/redis/init.go +++ b/modules/redis/init.go @@ -42,9 +42,9 @@ func (r *Redis) initRedisClient() (*redis.Client, error) { opts.PoolSize = 1 opts.TLSConfig = tlsConfig - opts.DialTimeout = r.Timeout.Duration - opts.ReadTimeout = r.Timeout.Duration - opts.WriteTimeout = r.Timeout.Duration + opts.DialTimeout = r.Timeout.Duration() + opts.ReadTimeout = r.Timeout.Duration() + opts.WriteTimeout = r.Timeout.Duration() return redis.NewClient(opts), nil } diff --git a/modules/redis/redis.go b/modules/redis/redis.go index 2117cc2ce..96be5e303 100644 --- a/modules/redis/redis.go +++ b/modules/redis/redis.go @@ -5,6 +5,7 @@ package redis import ( "context" _ "embed" + "errors" "sync" "time" @@ -31,7 +32,7 @@ func New() *Redis { return &Redis{ Config: Config{ Address: "redis://@localhost:6379", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), PingSamples: 5, }, @@ -79,32 +80,44 @@ type ( } ) -func (r *Redis) Init() bool { +func (r *Redis) Configuration() any { + return r.Config +} + +func (r *Redis) Init() error { err := r.validateConfig() if err != nil { r.Errorf("config validation: %v", err) - return false + return err } rdb, err := r.initRedisClient() if err != nil { r.Errorf("init redis client: %v", err) - return false + return err } r.rdb = rdb charts, err := r.initCharts() if err != nil { r.Errorf("init charts: %v", err) - return false + return err } r.charts = charts - return true + return nil } -func (r *Redis) Check() bool { - return len(r.Collect()) > 0 +func (r *Redis) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *Redis) Charts() *module.Charts { diff --git a/modules/redis/redis_test.go b/modules/redis/redis_test.go index 9ee2f54f0..d9a35ad57 100644 --- a/modules/redis/redis_test.go +++ b/modules/redis/redis_test.go @@ -65,9 +65,9 @@ func TestRedis_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -96,9 +96,9 @@ func TestRedis_Check(t *testing.T) { rdb := test.prepare(t) if test.wantFail { - assert.False(t, rdb.Check()) + assert.Error(t, rdb.Check()) } else { - assert.True(t, rdb.Check()) + assert.NoError(t, rdb.Check()) } }) } @@ -106,7 +106,7 @@ func TestRedis_Check(t *testing.T) { func TestRedis_Charts(t *testing.T) { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) assert.NotNil(t, rdb.Charts()) } @@ -115,7 +115,7 @@ func TestRedis_Cleanup(t *testing.T) { rdb := New() assert.NotPanics(t, rdb.Cleanup) - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) m := &mockRedisClient{} rdb.rdb = m @@ -308,7 +308,7 @@ func TestRedis_Collect(t *testing.T) { func prepareRedisV609(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ result: v609InfoAll, } @@ -317,7 +317,7 @@ func prepareRedisV609(t *testing.T) *Redis { func prepareRedisErrorOnInfo(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ errOnInfo: true, } @@ -326,7 +326,7 @@ func prepareRedisErrorOnInfo(t *testing.T) *Redis { func prepareRedisWithPikaMetrics(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ result: pikaInfoAll, } diff --git a/modules/scaleio/collect_sdc.go b/modules/scaleio/collect_sdc.go index 495b1a031..be05f5c33 100644 --- a/modules/scaleio/collect_sdc.go +++ b/modules/scaleio/collect_sdc.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (s ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics { +func (s *ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics { ms := make(map[string]sdcMetrics, len(ss)) for id, stats := range ss { diff --git a/modules/scaleio/collect_storage_pool.go b/modules/scaleio/collect_storage_pool.go index 7a41b66bd..dcaf01950 100644 --- a/modules/scaleio/collect_storage_pool.go +++ b/modules/scaleio/collect_storage_pool.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (s ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics { +func (s *ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics { ms := make(map[string]storagePoolMetrics, len(ss)) for id, stats := range ss { diff --git a/modules/scaleio/collect_system.go b/modules/scaleio/collect_system.go index 6806e1969..e28fcee6c 100644 --- a/modules/scaleio/collect_system.go +++ b/modules/scaleio/collect_system.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics { +func (s *ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics { var sm systemMetrics collectSystemCapacity(&sm, ss) collectSystemWorkload(&sm, ss) diff --git a/modules/scaleio/config_schema.json b/modules/scaleio/config_schema.json index 66230acc9..d3140b9fb 100644 --- a/modules/scaleio/config_schema.json +++ b/modules/scaleio/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/scaleio job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/scaleio job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/scaleio/scaleio.go b/modules/scaleio/scaleio.go index 05bb03c5b..f400ef0c2 100644 --- a/modules/scaleio/scaleio.go +++ b/modules/scaleio/scaleio.go @@ -4,6 +4,7 @@ package scaleio import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/modules/scaleio/client" @@ -24,28 +25,27 @@ func init() { // New creates ScaleIO with default values. func New() *ScaleIO { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "https://127.0.0.1", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &ScaleIO{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "https://127.0.0.1", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - return &ScaleIO{ - Config: config, charts: systemCharts.Copy(), charted: make(map[string]bool), } } +type Config struct { + web.HTTP `yaml:",inline"` +} + type ( - // Config is the ScaleIO module configuration. - Config struct { - web.HTTP `yaml:",inline"` - } // ScaleIO ScaleIO module. ScaleIO struct { module.Base @@ -65,32 +65,45 @@ type ( } ) +func (s *ScaleIO) Configuration() any { + return s.Config +} + // Init makes initialization. -func (s *ScaleIO) Init() bool { +func (s *ScaleIO) Init() error { if s.Username == "" || s.Password == "" { s.Error("username and password aren't set") - return false + return errors.New("username and password aren't set") } c, err := client.New(s.Client, s.Request) if err != nil { s.Errorf("error on creating ScaleIO client: %v", err) - return false + return err } s.client = c s.Debugf("using URL %s", s.URL) - s.Debugf("using timeout: %s", s.Timeout.Duration) - return true + s.Debugf("using timeout: %s", s.Timeout) + + return nil } // Check makes check. -func (s *ScaleIO) Check() bool { +func (s *ScaleIO) Check() error { if err := s.client.Login(); err != nil { s.Error(err) - return false + return err + } + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") } - return len(s.Collect()) > 0 + return nil } // Charts returns Charts. diff --git a/modules/scaleio/scaleio_test.go b/modules/scaleio/scaleio_test.go index 5547b174b..3443bd518 100644 --- a/modules/scaleio/scaleio_test.go +++ b/modules/scaleio/scaleio_test.go @@ -34,10 +34,10 @@ func TestScaleIO_Init(t *testing.T) { scaleIO.Username = "username" scaleIO.Password = "password" - assert.True(t, scaleIO.Init()) + assert.NoError(t, scaleIO.Init()) } func TestScaleIO_Init_UsernameAndPasswordNotSet(t *testing.T) { - assert.False(t, New().Init()) + assert.Error(t, New().Init()) } func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { @@ -46,24 +46,24 @@ func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { job.Password = "password" job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestScaleIO_Check(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) + require.NoError(t, scaleIO.Init()) - assert.True(t, scaleIO.Check()) + assert.NoError(t, scaleIO.Check()) } func TestScaleIO_Check_ErrorOnLogin(t *testing.T) { srv, mock, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) + require.NoError(t, scaleIO.Init()) mock.Password = "new password" - assert.False(t, scaleIO.Check()) + assert.Error(t, scaleIO.Check()) } func TestScaleIO_Charts(t *testing.T) { @@ -73,8 +73,8 @@ func TestScaleIO_Charts(t *testing.T) { func TestScaleIO_Cleanup(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) scaleIO.Cleanup() assert.False(t, scaleIO.client.LoggedIn()) @@ -83,8 +83,8 @@ func TestScaleIO_Cleanup(t *testing.T) { func TestScaleIO_Collect(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) expected := map[string]int64{ "sdc_6076fd0f00000000_bandwidth_read": 0, @@ -297,8 +297,8 @@ func TestScaleIO_Collect(t *testing.T) { func TestScaleIO_Collect_ConnectionRefused(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) scaleIO.client.Request.URL = "http://127.0.0.1:38001" assert.Nil(t, scaleIO.Collect()) diff --git a/modules/snmp/config_schema.json b/modules/snmp/config_schema.json index dd4e9c3ca..ff9e7ea02 100644 --- a/modules/snmp/config_schema.json +++ b/modules/snmp/config_schema.json @@ -1,188 +1,191 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "update_every": { - "type": "integer" - }, - "hostname": { - "type": "string" - }, - "community": { - "type": "string" - }, - "user": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "level": { - "type": "string", - "enum": [ - "none", - "authNoPriv", - "authPriv" - ] - }, - "auth_proto": { - "type": "string", - "enum": [ - "none", - "md5", - "sha", - "sha224", - "sha256", - "sha384", - "sha512" - ] - }, - "auth_key": { - "type": "string" - }, - "priv_proto": { - "type": "string", - "enum": [ - "none", - "des", - "aes", - "aes192", - "aes256", - "aes192c" - ] - }, - "priv_key": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "name": { + "type": "string" }, - "required": [ - "name", - "level", - "auth_proto", - "auth_key", - "priv_proto", - "priv_key" - ] - }, - "options": { - "type": "object", - "properties": { - "port": { - "type": "integer" - }, - "retries": { - "type": "integer" - }, - "timeout": { - "type": "integer" - }, - "version": { - "type": "string", - "enum": [ - "1", - "2", - "3" - ] - }, - "max_request_size": { - "type": "integer" - } + "update_every": { + "type": "integer" }, - "required": [ - "port", - "retries", - "timeout", - "version", - "max_request_size" - ] - }, - "charts": { - "type": "array", - "items": { + "hostname": { + "type": "string" + }, + "community": { + "type": "string" + }, + "user": { "type": "object", "properties": { - "id": { + "name": { "type": "string" }, - "title": { - "type": "string" + "level": { + "type": "string", + "enum": [ + "none", + "authNoPriv", + "authPriv" + ] }, - "units": { - "type": "string" + "auth_proto": { + "type": "string", + "enum": [ + "none", + "md5", + "sha", + "sha224", + "sha256", + "sha384", + "sha512" + ] }, - "family": { + "auth_key": { "type": "string" }, - "type": { + "priv_proto": { + "type": "string", + "enum": [ + "none", + "des", + "aes", + "aes192", + "aes256", + "aes192c" + ] + }, + "priv_key": { "type": "string" + } + }, + "required": [ + "name", + "level", + "auth_proto", + "auth_key", + "priv_proto", + "priv_key" + ] + }, + "options": { + "type": "object", + "properties": { + "port": { + "type": "integer" }, - "priority": { + "retries": { "type": "integer" }, - "multiply_range": { - "type": "array", - "items": { - "type": "integer" - } + "timeout": { + "type": "integer" }, - "dimensions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "oid": { - "type": "string" - }, - "name": { - "type": "string" - }, - "algorithm": { - "type": "string", - "enum": [ - "absolute", - "incremental" - ] - }, - "multiplier": { - "type": "integer" - }, - "divisor": { - "type": "integer" - } - }, - "required": [ - "oid", - "name", - "algorithm", - "multiplier", - "divisor" - ] - } + "version": { + "type": "string", + "enum": [ + "1", + "2", + "3" + ] + }, + "max_request_size": { + "type": "integer" } }, "required": [ - "id", - "title", - "units", - "family", - "type", - "priority", - "multiply_range", - "dimensions" + "port", + "retries", + "timeout", + "version", + "max_request_size" ] + }, + "charts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "title": { + "type": "string" + }, + "units": { + "type": "string" + }, + "family": { + "type": "string" + }, + "type": { + "type": "string" + }, + "priority": { + "type": "integer" + }, + "multiply_range": { + "type": "array", + "items": { + "type": "integer" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oid": { + "type": "string" + }, + "name": { + "type": "string" + }, + "algorithm": { + "type": "string", + "enum": [ + "absolute", + "incremental" + ] + }, + "multiplier": { + "type": "integer" + }, + "divisor": { + "type": "integer" + } + }, + "required": [ + "oid", + "name", + "algorithm", + "multiplier", + "divisor" + ] + } + } + }, + "required": [ + "id", + "title", + "units", + "family", + "type", + "priority", + "multiply_range", + "dimensions" + ] + } } - } + }, + "required": [ + "name", + "update_every", + "hostname", + "community", + "user", + "options", + "charts" + ] }, - "required": [ - "name", - "update_every", - "hostname", - "community", - "user", - "options", - "charts" - ] + "uiSchema": {} } diff --git a/modules/snmp/init.go b/modules/snmp/init.go index 802430936..5802d6682 100644 --- a/modules/snmp/init.go +++ b/modules/snmp/init.go @@ -12,7 +12,7 @@ import ( var newSNMPClient = gosnmp.NewHandler -func (s SNMP) validateConfig() error { +func (s *SNMP) validateConfig() error { if len(s.ChartsInput) == 0 { return errors.New("'charts' are required but not set") } @@ -35,7 +35,7 @@ func (s SNMP) validateConfig() error { return nil } -func (s SNMP) initSNMPClient() (gosnmp.Handler, error) { +func (s *SNMP) initSNMPClient() (gosnmp.Handler, error) { client := newSNMPClient() if client.SetTarget(s.Hostname); client.Target() == "" { @@ -96,7 +96,7 @@ func (s SNMP) initSNMPClient() (gosnmp.Handler, error) { return client, nil } -func (s SNMP) initOIDs() (oids []string) { +func (s *SNMP) initOIDs() (oids []string) { for _, c := range *s.charts { for _, d := range c.Dims { oids = append(oids, d.ID) diff --git a/modules/snmp/snmp.go b/modules/snmp/snmp.go index 7aa933f64..102b73b64 100644 --- a/modules/snmp/snmp.go +++ b/modules/snmp/snmp.go @@ -4,6 +4,7 @@ package snmp import ( _ "embed" + "errors" "fmt" "strings" @@ -104,17 +105,21 @@ type SNMP struct { oids []string } -func (s *SNMP) Init() bool { +func (s *SNMP) Configuration() any { + return s.Config +} + +func (s *SNMP) Init() error { err := s.validateConfig() if err != nil { s.Errorf("config validation: %v", err) - return false + return err } snmpClient, err := s.initSNMPClient() if err != nil { s.Errorf("SNMP client initialization: %v", err) - return false + return err } s.Info(snmpClientConnInfo(snmpClient)) @@ -122,24 +127,32 @@ func (s *SNMP) Init() bool { err = snmpClient.Connect() if err != nil { s.Errorf("SNMP client connect: %v", err) - return false + return err } s.snmpClient = snmpClient charts, err := newCharts(s.ChartsInput) if err != nil { s.Errorf("Population of charts failed: %v", err) - return false + return err } s.charts = charts s.oids = s.initOIDs() - return true + return nil } -func (s *SNMP) Check() bool { - return len(s.Collect()) > 0 +func (s *SNMP) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *SNMP) Charts() *module.Charts { diff --git a/modules/snmp/snmp_test.go b/modules/snmp/snmp_test.go index 9f1ef0e90..f888a7e5f 100644 --- a/modules/snmp/snmp_test.go +++ b/modules/snmp/snmp_test.go @@ -107,9 +107,9 @@ func TestSNMP_Init(t *testing.T) { snmp := test.prepareSNMP() if test.wantFail { - assert.False(t, snmp.Init()) + assert.Error(t, snmp.Init()) } else { - assert.True(t, snmp.Init()) + assert.NoError(t, snmp.Init()) } }) } @@ -209,12 +209,12 @@ func TestSNMP_Check(t *testing.T) { defaultMockExpects(mockSNMP) snmp := test.prepareSNMP(mockSNMP) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) if test.wantFail { - assert.False(t, snmp.Check()) + assert.Error(t, snmp.Check()) } else { - assert.True(t, snmp.Check()) + assert.NoError(t, snmp.Check()) } }) } @@ -311,7 +311,7 @@ func TestSNMP_Collect(t *testing.T) { defaultMockExpects(mockSNMP) snmp := test.prepareSNMP(mockSNMP) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) collected := snmp.Collect() @@ -328,7 +328,7 @@ func TestSNMP_Cleanup(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) m.EXPECT().Close().Times(1) @@ -339,7 +339,7 @@ func TestSNMP_Cleanup(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) snmp.snmpClient = nil return snmp @@ -371,7 +371,7 @@ func TestSNMP_Charts(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) return snmp }, @@ -381,7 +381,7 @@ func TestSNMP_Charts(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 9) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) return snmp }, diff --git a/modules/solr/collect.go b/modules/solr/collect.go new file mode 100644 index 000000000..70f90fdc0 --- /dev/null +++ b/modules/solr/collect.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package solr + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + minSupportedVersion = 6.4 + coresHandlersURLPath = "/solr/admin/metrics" + coresHandlersURLQuery = "group=core&prefix=UPDATE,QUERY&wt=json" + infoSystemURLPath = "/solr/admin/info/system" + infoSystemURLQuery = "wt=json" +) + +type infoSystem struct { + Lucene struct { + Version string `json:"solr-spec-version"` + } +} + +func (s *Solr) collect() (map[string]int64, error) { + req, err := createRequest(s.Request, coresHandlersURLPath, coresHandlersURLQuery) + if err != nil { + s.Errorf("error on creating http request : %v", err) + return nil, err + } + + resp, err := s.doRequest(req) + if err != nil { + s.Errorf("error on request to %s : %s", req.URL, err) + return nil, err + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + s.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + return nil, err + } + + mx, err := s.parse(resp) + if err != nil { + s.Errorf("error on parse response from %s : %s", req.URL, err) + return nil, err + } + + return mx, nil +} + +func (s *Solr) doRequest(req *http.Request) (*http.Response, error) { + return s.client.Do(req) +} + +func (s *Solr) getVersion() error { + req, err := createRequest(s.Request, infoSystemURLPath, infoSystemURLQuery) + if err != nil { + return fmt.Errorf("error on creating http request : %v", err) + } + + resp, err := s.doRequest(req) + if err != nil { + return fmt.Errorf("error on request to %s : %s", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + var info infoSystem + + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return fmt.Errorf("error on decode response from %s : %s", req.URL, err) + } + + var idx int + + if idx = strings.LastIndex(info.Lucene.Version, "."); idx == -1 { + return fmt.Errorf("error on parsing version '%s': bad format", info.Lucene.Version) + } + + if s.version, err = strconv.ParseFloat(info.Lucene.Version[:idx], 64); err != nil { + return fmt.Errorf("error on parsing version '%s' : %s", info.Lucene.Version, err) + } + + return nil +} + +func createRequest(req web.Request, urlPath, urlQuery string) (*http.Request, error) { + r := req.Copy() + u, err := url.Parse(r.URL) + if err != nil { + return nil, err + } + + u.Path = urlPath + u.RawQuery = urlQuery + r.URL = u.String() + return web.NewHTTPRequest(r) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/modules/solr/config_schema.json b/modules/solr/config_schema.json index 66dde58bf..de7320cbb 100644 --- a/modules/solr/config_schema.json +++ b/modules/solr/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/solr job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/solr job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/solr/solr.go b/modules/solr/solr.go index 57f2d7083..d2526f89a 100644 --- a/modules/solr/solr.go +++ b/modules/solr/solr.go @@ -4,13 +4,9 @@ package solr import ( _ "embed" - "encoding/json" + "errors" "fmt" - "io" "net/http" - "net/url" - "strconv" - "strings" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -28,40 +24,21 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:8983" - defaultHTTPTimeout = time.Second -) - -const ( - minSupportedVersion = 6.4 - coresHandlersURLPath = "/solr/admin/metrics" - coresHandlersURLQuery = "group=core&prefix=UPDATE,QUERY&wt=json" - infoSystemURLPath = "/solr/admin/info/system" - infoSystemURLQuery = "wt=json" -) - -type infoSystem struct { - Lucene struct { - Version string `json:"solr-spec-version"` - } -} - // New creates Solr with default values func New() *Solr { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Solr{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8983", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - return &Solr{ - Config: config, cores: make(map[string]bool), + charts: &Charts{}, } } @@ -81,132 +58,61 @@ type Solr struct { charts *Charts } -func (s *Solr) doRequest(req *http.Request) (*http.Response, error) { - return s.client.Do(req) +func (s *Solr) Configuration() any { + return s.Config } -// Cleanup makes cleanup -func (Solr) Cleanup() {} - // Init makes initialization -func (s *Solr) Init() bool { +func (s *Solr) Init() error { if s.URL == "" { - s.Error("URL not set") - return false + s.Error("url not set") + return errors.New("url not set") } client, err := web.NewHTTPClient(s.Client) if err != nil { s.Error(err) - return false + return err } - s.client = client - return true + + return nil } // Check makes check -func (s *Solr) Check() bool { +func (s *Solr) Check() error { if err := s.getVersion(); err != nil { s.Error(err) - return false + return err } if s.version < minSupportedVersion { s.Errorf("unsupported Solr version : %.1f", s.version) - return false + return fmt.Errorf("unsupported Solr version : %.1f", s.version) } - return true + return nil } // Charts creates Charts func (s *Solr) Charts() *Charts { - s.charts = &Charts{} - return s.charts } // Collect collects metrics func (s *Solr) Collect() map[string]int64 { - req, err := createRequest(s.Request, coresHandlersURLPath, coresHandlersURLQuery) - if err != nil { - s.Errorf("error on creating http request : %v", err) - return nil - } - - resp, err := s.doRequest(req) + mx, err := s.collect() if err != nil { - s.Errorf("error on request to %s : %s", req.URL, err) - return nil - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - s.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - return nil - } - - metrics, err := s.parse(resp) - if err != nil { - s.Errorf("error on parse response from %s : %s", req.URL, err) + s.Error(err) return nil } - return metrics + return mx } -func (s *Solr) getVersion() error { - req, err := createRequest(s.Request, infoSystemURLPath, infoSystemURLQuery) - if err != nil { - return fmt.Errorf("error on creating http request : %v", err) - } - - resp, err := s.doRequest(req) - if err != nil { - return fmt.Errorf("error on request to %s : %s", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - var info infoSystem - - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - return fmt.Errorf("error on decode response from %s : %s", req.URL, err) - } - - var idx int - - if idx = strings.LastIndex(info.Lucene.Version, "."); idx == -1 { - return fmt.Errorf("error on parsing version '%s': bad format", info.Lucene.Version) - } - - if s.version, err = strconv.ParseFloat(info.Lucene.Version[:idx], 64); err != nil { - return fmt.Errorf("error on parsing version '%s' : %s", info.Lucene.Version, err) - } - - return nil -} - -func createRequest(req web.Request, urlPath, urlQuery string) (*http.Request, error) { - r := req.Copy() - u, err := url.Parse(r.URL) - if err != nil { - return nil, err - } - - u.Path = urlPath - u.RawQuery = urlQuery - r.URL = u.String() - return web.NewHTTPRequest(r) -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() +// Cleanup makes cleanup +func (s *Solr) Cleanup() { + if s.client != nil { + s.client.CloseIdleConnections() } } diff --git a/modules/solr/solr_test.go b/modules/solr/solr_test.go index f545adeb0..1e811fa1c 100644 --- a/modules/solr/solr_test.go +++ b/modules/solr/solr_test.go @@ -9,7 +9,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,18 +22,10 @@ func version(v string) string { return format(`{ "lucene":{ "solr-spec-version":"%s"}}`, v) } -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) -} - func TestSolr_Init(t *testing.T) { job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.client) } @@ -51,8 +42,8 @@ func TestSolr_Check(t *testing.T) { })) job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestSolr_Check_UnsupportedVersion(t *testing.T) { @@ -69,9 +60,9 @@ func TestSolr_Check_UnsupportedVersion(t *testing.T) { job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestSolr_Charts(t *testing.T) { @@ -100,8 +91,8 @@ func TestSolr_CollectV6(t *testing.T) { job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) require.NotNil(t, job.Charts()) expected := map[string]int64{ @@ -189,8 +180,8 @@ func TestSolr_CollectV7(t *testing.T) { job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) require.NotNil(t, job.Charts()) expected := map[string]int64{ @@ -269,6 +260,6 @@ func TestSolr_Collect_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/springboot2/collect.go b/modules/springboot2/collect.go new file mode 100644 index 000000000..56a919e61 --- /dev/null +++ b/modules/springboot2/collect.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package springboot2 + +import ( + "strings" + + mtx "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type metrics struct { + Uptime mtx.Gauge `stm:"uptime,1000"` + + ThreadsDaemon mtx.Gauge `stm:"threads_daemon"` + Threads mtx.Gauge `stm:"threads"` + + Resp1xx mtx.Counter `stm:"resp_1xx"` + Resp2xx mtx.Counter `stm:"resp_2xx"` + Resp3xx mtx.Counter `stm:"resp_3xx"` + Resp4xx mtx.Counter `stm:"resp_4xx"` + Resp5xx mtx.Counter `stm:"resp_5xx"` + + HeapUsed heap `stm:"heap_used"` + HeapCommitted heap `stm:"heap_committed"` + + MemFree mtx.Gauge `stm:"mem_free"` +} + +type heap struct { + Eden mtx.Gauge `stm:"eden"` + Survivor mtx.Gauge `stm:"survivor"` + Old mtx.Gauge `stm:"old"` +} + +func (s *SpringBoot2) collect() (map[string]int64, error) { + rawMetrics, err := s.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + var m metrics + + // uptime + m.Uptime.Set(rawMetrics.FindByName("process_uptime_seconds").Max()) + + // response + s.gatherResponse(rawMetrics, &m) + + // threads + m.ThreadsDaemon.Set(rawMetrics.FindByNames("jvm_threads_daemon", "jvm_threads_daemon_threads").Max()) + m.Threads.Set(rawMetrics.FindByNames("jvm_threads_live", "jvm_threads_live_threads").Max()) + + // heap memory + gatherHeap(rawMetrics.FindByName("jvm_memory_used_bytes"), &m.HeapUsed) + gatherHeap(rawMetrics.FindByName("jvm_memory_committed_bytes"), &m.HeapCommitted) + m.MemFree.Set(m.HeapCommitted.Sum() - m.HeapUsed.Sum()) + + return stm.ToMap(m), nil +} + +func gatherHeap(rawMetrics prometheus.Series, m *heap) { + for _, metric := range rawMetrics { + id := metric.Labels.Get("id") + value := metric.Value + switch { + case strings.Contains(id, "Eden"): + m.Eden.Set(value) + case strings.Contains(id, "Survivor"): + m.Survivor.Set(value) + case strings.Contains(id, "Old") || strings.Contains(id, "Tenured"): + m.Old.Set(value) + } + } +} + +func (s *SpringBoot2) gatherResponse(rawMetrics prometheus.Series, m *metrics) { + for _, metric := range rawMetrics.FindByName("http_server_requests_seconds_count") { + if s.uriFilter != nil { + uri := metric.Labels.Get("uri") + if !s.uriFilter.MatchString(uri) { + continue + } + } + + status := metric.Labels.Get("status") + if status == "" { + continue + } + value := metric.Value + switch status[0] { + case '1': + m.Resp1xx.Add(value) + case '2': + m.Resp2xx.Add(value) + case '3': + m.Resp3xx.Add(value) + case '4': + m.Resp4xx.Add(value) + case '5': + m.Resp5xx.Add(value) + } + } +} + +func (h heap) Sum() float64 { + return h.Eden.Value() + h.Survivor.Value() + h.Old.Value() +} diff --git a/modules/springboot2/config_schema.json b/modules/springboot2/config_schema.json index 008a8bb2d..ea8e22c7d 100644 --- a/modules/springboot2/config_schema.json +++ b/modules/springboot2/config_schema.json @@ -1,76 +1,79 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/springboot2 job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "uri_filter": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/springboot2 job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "uri_filter": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/springboot2/init.go b/modules/springboot2/init.go new file mode 100644 index 000000000..79945d344 --- /dev/null +++ b/modules/springboot2/init.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package springboot2 + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (s *SpringBoot2) validateConfig() error { + if s.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (s *SpringBoot2) initUriFilter() (matcher.Matcher, error) { + if s.URIFilter.Empty() { + return matcher.TRUE(), nil + } + return s.URIFilter.Parse() +} + +func (s *SpringBoot2) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(s.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, s.Request), nil + +} diff --git a/modules/springboot2/springboot2.go b/modules/springboot2/springboot2.go index cff9d9c07..3f8b066b4 100644 --- a/modules/springboot2/springboot2.go +++ b/modules/springboot2/springboot2.go @@ -4,17 +4,13 @@ package springboot2 import ( _ "embed" - "strings" + "errors" "time" + "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/matcher" - - mtx "github.com/netdata/go.d.plugin/pkg/metrics" "github.com/netdata/go.d.plugin/pkg/prometheus" - "github.com/netdata/go.d.plugin/pkg/stm" "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" ) //go:embed "config_schema.json" @@ -27,164 +23,97 @@ func init() { }) } -const ( - defaultHTTPTimeout = time.Second -) - // New returns SpringBoot2 instance with default values func New() *SpringBoot2 { return &SpringBoot2{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration(time.Second * 1), + }, }, }, + charts: charts.Copy(), } } +type Config struct { + web.HTTP `yaml:",inline" json:",inline"` + URIFilter matcher.SimpleExpr `yaml:"uri_filter"` +} + // SpringBoot2 Spring boot 2 module type SpringBoot2 struct { module.Base + Config `yaml:",inline"` - web.HTTP `yaml:",inline"` - URIFilter matcher.SimpleExpr `yaml:"uri_filter"` + charts *module.Charts uriFilter matcher.Matcher prom prometheus.Prometheus } -type metrics struct { - Uptime mtx.Gauge `stm:"uptime,1000"` - - ThreadsDaemon mtx.Gauge `stm:"threads_daemon"` - Threads mtx.Gauge `stm:"threads"` - - Resp1xx mtx.Counter `stm:"resp_1xx"` - Resp2xx mtx.Counter `stm:"resp_2xx"` - Resp3xx mtx.Counter `stm:"resp_3xx"` - Resp4xx mtx.Counter `stm:"resp_4xx"` - Resp5xx mtx.Counter `stm:"resp_5xx"` - - HeapUsed heap `stm:"heap_used"` - HeapCommitted heap `stm:"heap_committed"` - - MemFree mtx.Gauge `stm:"mem_free"` -} - -type heap struct { - Eden mtx.Gauge `stm:"eden"` - Survivor mtx.Gauge `stm:"survivor"` - Old mtx.Gauge `stm:"old"` +func (s *SpringBoot2) Configuration() any { + return s.Config } -// Cleanup Cleanup -func (SpringBoot2) Cleanup() {} - // Init makes initialization -func (s *SpringBoot2) Init() bool { - client, err := web.NewHTTPClient(s.Client) +func (s *SpringBoot2) Init() error { + if err := s.validateConfig(); err != nil { + s.Error(err) + return err + } + + uf, err := s.initUriFilter() if err != nil { s.Error(err) - return false + return err } - s.uriFilter, err = s.URIFilter.Parse() - if err != nil && err != matcher.ErrEmptyExpr { + s.uriFilter = uf + + prom, err := s.initPrometheusClient() + if err != nil { s.Error(err) - return false + return err } - s.prom = prometheus.New(client, s.Request) - return true + s.prom = prom + + return nil } // Check makes check -func (s *SpringBoot2) Check() bool { - rawMetrics, err := s.prom.ScrapeSeries() +func (s *SpringBoot2) Check() error { + mx, err := s.collect() if err != nil { - s.Warning(err) - return false + s.Error(err) + return err } - jvmMemory := rawMetrics.FindByName("jvm_memory_used_bytes") - - return len(jvmMemory) > 0 + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts -func (SpringBoot2) Charts() *Charts { - return charts.Copy() +func (s *SpringBoot2) Charts() *Charts { + return s.charts } // Collect collects metrics func (s *SpringBoot2) Collect() map[string]int64 { - rawMetrics, err := s.prom.ScrapeSeries() + mx, err := s.collect() if err != nil { + s.Error(err) return nil } - var m metrics - - // uptime - m.Uptime.Set(rawMetrics.FindByName("process_uptime_seconds").Max()) - - // response - s.gatherResponse(rawMetrics, &m) - - // threads - m.ThreadsDaemon.Set(rawMetrics.FindByNames("jvm_threads_daemon", "jvm_threads_daemon_threads").Max()) - m.Threads.Set(rawMetrics.FindByNames("jvm_threads_live", "jvm_threads_live_threads").Max()) - - // heap memory - gatherHeap(rawMetrics.FindByName("jvm_memory_used_bytes"), &m.HeapUsed) - gatherHeap(rawMetrics.FindByName("jvm_memory_committed_bytes"), &m.HeapCommitted) - m.MemFree.Set(m.HeapCommitted.Sum() - m.HeapUsed.Sum()) - - return stm.ToMap(m) + return mx } -func gatherHeap(rawMetrics prometheus.Series, m *heap) { - for _, metric := range rawMetrics { - id := metric.Labels.Get("id") - value := metric.Value - switch { - case strings.Contains(id, "Eden"): - m.Eden.Set(value) - case strings.Contains(id, "Survivor"): - m.Survivor.Set(value) - case strings.Contains(id, "Old") || strings.Contains(id, "Tenured"): - m.Old.Set(value) - } - } -} - -func (s *SpringBoot2) gatherResponse(rawMetrics prometheus.Series, m *metrics) { - for _, metric := range rawMetrics.FindByName("http_server_requests_seconds_count") { - if s.uriFilter != nil { - uri := metric.Labels.Get("uri") - if !s.uriFilter.MatchString(uri) { - continue - } - } - - status := metric.Labels.Get("status") - if status == "" { - continue - } - value := metric.Value - switch status[0] { - case '1': - m.Resp1xx.Add(value) - case '2': - m.Resp2xx.Add(value) - case '3': - m.Resp3xx.Add(value) - case '4': - m.Resp4xx.Add(value) - case '5': - m.Resp5xx.Add(value) - } +// Cleanup Cleanup +func (s *SpringBoot2) Cleanup() { + if s.prom != nil && s.prom.HTTPClient() != nil { + s.prom.HTTPClient().CloseIdleConnections() } } - -func (h heap) Sum() float64 { - return h.Eden.Value() + h.Survivor.Value() + h.Old.Value() -} diff --git a/modules/springboot2/springboot2_test.go b/modules/springboot2/springboot2_test.go index 7198498d5..457d44c74 100644 --- a/modules/springboot2/springboot2_test.go +++ b/modules/springboot2/springboot2_test.go @@ -28,8 +28,8 @@ func TestSpringboot2_Collect(t *testing.T) { defer ts.Close() job1 := New() job1.HTTP.Request.URL = ts.URL + "/actuator/prometheus" - assert.True(t, job1.Init()) - assert.True(t, job1.Check()) + assert.NoError(t, job1.Init()) + assert.NoError(t, job1.Check()) assert.EqualValues( t, map[string]int64{ @@ -54,8 +54,8 @@ func TestSpringboot2_Collect(t *testing.T) { job2 := New() job2.HTTP.Request.URL = ts.URL + "/actuator/prometheus2" - assert.True(t, job2.Init()) - assert.True(t, job2.Check()) + assert.NoError(t, job2.Init()) + assert.NoError(t, job2.Check()) assert.EqualValues( t, map[string]int64{ @@ -87,9 +87,9 @@ func TestSpringboot2_404(t *testing.T) { job := New() job.HTTP.Request.URL = ts.URL + "/actuator/prometheus" - job.Init() + _ = job.Init() - assert.False(t, job.Check()) + assert.Error(t, job.Check()) job.Cleanup() } diff --git a/modules/squidlog/collect.go b/modules/squidlog/collect.go index 20d3f86e8..bafa6d4cc 100644 --- a/modules/squidlog/collect.go +++ b/modules/squidlog/collect.go @@ -14,7 +14,7 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (s SquidLog) logPanicStackIfAny() { +func (s *SquidLog) logPanicStackIfAny() { err := recover() if err == nil { return diff --git a/modules/squidlog/config_schema.json b/modules/squidlog/config_schema.json index dcf439c70..72b01958e 100644 --- a/modules/squidlog/config_schema.json +++ b/modules/squidlog/config_schema.json @@ -1,101 +1,104 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/squid_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "parser": { - "type": "object", - "properties": { - "log_type": { - "type": "string" - }, - "csv_config": { - "type": "object", - "properties": { - "fields_per_record": { - "type": "integer" - }, - "delimiter": { - "type": "string" - }, - "trim_leading_space": { - "type": "boolean" - }, - "format": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/squid_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parser": { + "type": "object", + "properties": { + "log_type": { + "type": "string" }, - "required": [ - "fields_per_record", - "delimiter", - "trim_leading_space", - "format" - ] - }, - "ltsv_config": { - "type": "object", - "properties": { - "field_delimiter": { - "type": "string" - }, - "value_delimiter": { - "type": "string" - }, - "mapping": { - "type": "object", - "additionalProperties": { + "csv_config": { + "type": "object", + "properties": { + "fields_per_record": { + "type": "integer" + }, + "delimiter": { + "type": "string" + }, + "trim_leading_space": { + "type": "boolean" + }, + "format": { "type": "string" } - } + }, + "required": [ + "fields_per_record", + "delimiter", + "trim_leading_space", + "format" + ] }, - "required": [ - "field_delimiter", - "value_delimiter", - "mapping" - ] - }, - "regexp_config": { - "type": "object", - "properties": { - "pattern": { - "type": "string" - } + "ltsv_config": { + "type": "object", + "properties": { + "field_delimiter": { + "type": "string" + }, + "value_delimiter": { + "type": "string" + }, + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "field_delimiter", + "value_delimiter", + "mapping" + ] }, - "required": [ - "pattern" - ] - }, - "json_config": { - "type": "object", - "properties": { - "mapping": { - "type": "object", - "additionalProperties": { + "regexp_config": { + "type": "object", + "properties": { + "pattern": { "type": "string" } - } + }, + "required": [ + "pattern" + ] }, - "required": [ - "mapping" - ] - } + "json_config": { + "type": "object", + "properties": { + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "mapping" + ] + } + }, + "required": [ + "log_type" + ] }, - "required": [ - "log_type" - ] - }, - "path": { - "type": "string" + "path": { + "type": "string" + }, + "exclude_path": { + "type": "string" + } }, - "exclude_path": { - "type": "string" - } + "required": [ + "name", + "path" + ] }, - "required": [ - "name", - "path" - ] + "uiSchema": {} } diff --git a/modules/squidlog/squidlog.go b/modules/squidlog/squidlog.go index 704bc9627..3d07c3f0c 100644 --- a/modules/squidlog/squidlog.go +++ b/modules/squidlog/squidlog.go @@ -20,68 +20,70 @@ func init() { } func New() *SquidLog { - cfg := logs.ParserConfig{ - LogType: logs.TypeCSV, - CSV: logs.CSVConfig{ - FieldsPerRecord: -1, - Delimiter: " ", - TrimLeadingSpace: true, - Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type", - CheckField: checkCSVFormatField, - }, - } return &SquidLog{ Config: Config{ Path: "/var/log/squid/access.log", ExcludePath: "*.gz", - Parser: cfg, + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: true, + Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type", + CheckField: checkCSVFormatField, + }, + }, }, } } -type ( - Config struct { - Parser logs.ParserConfig `yaml:",inline"` - Path string `yaml:"path"` - ExcludePath string `yaml:"exclude_path"` - } +type Config struct { + Parser logs.ParserConfig `yaml:",inline"` + Path string `yaml:"path"` + ExcludePath string `yaml:"exclude_path"` +} - SquidLog struct { - module.Base - Config `yaml:",inline"` +type SquidLog struct { + module.Base + Config `yaml:",inline"` - file *logs.Reader - parser logs.Parser - line *logLine + file *logs.Reader + parser logs.Parser + line *logLine - mx *metricsData - charts *module.Charts - } -) + mx *metricsData + charts *module.Charts +} -func (s *SquidLog) Init() bool { +func (s *SquidLog) Configuration() any { + return s.Config +} + +func (s *SquidLog) Init() error { s.line = newEmptyLogLine() s.mx = newMetricsData() - return true + return nil } -func (s *SquidLog) Check() bool { +func (s *SquidLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := s.createLogReader(); err != nil { s.Warning("check failed: ", err) - return false + return err } if err := s.createParser(); err != nil { s.Warning("check failed: ", err) - return false + return err } if err := s.createCharts(s.line); err != nil { s.Warning("check failed: ", err) - return false + return err } - return true + + return nil } func (s *SquidLog) Charts() *module.Charts { diff --git a/modules/squidlog/squidlog_test.go b/modules/squidlog/squidlog_test.go index c6d818bf9..cbf6114ee 100644 --- a/modules/squidlog/squidlog_test.go +++ b/modules/squidlog/squidlog_test.go @@ -30,7 +30,7 @@ func TestNew(t *testing.T) { func TestSquidLog_Init(t *testing.T) { squidlog := New() - assert.True(t, squidlog.Init()) + assert.NoError(t, squidlog.Init()) } func TestSquidLog_Check(t *testing.T) { @@ -40,18 +40,18 @@ func TestSquidLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { squid := New() defer squid.Cleanup() squid.Path = "testdata/not_exists.log" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquid_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { squid := New() defer squid.Cleanup() squid.Path = "testdata/unknown.log" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) { @@ -59,9 +59,9 @@ func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) { defer squid.Cleanup() squid.Path = "testdata/access.log" squid.Parser.CSV.Format = "$one $two" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquidLog_Charts(t *testing.T) { @@ -280,8 +280,8 @@ func prepareSquidCollect(t *testing.T) *SquidLog { t.Helper() squid := New() squid.Path = "testdata/access.log" - require.True(t, squid.Init()) - require.True(t, squid.Check()) + require.NoError(t, squid.Init()) + require.NoError(t, squid.Check()) defer squid.Cleanup() p, err := logs.NewCSVParser(squid.Parser.CSV, bytes.NewReader(nativeFormatAccessLog)) diff --git a/modules/supervisord/config_schema.json b/modules/supervisord/config_schema.json index d3617c94a..d7c48f7bc 100644 --- a/modules/supervisord/config_schema.json +++ b/modules/supervisord/config_schema.json @@ -1,21 +1,24 @@ { - "$id": "https://example.com/person.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Supervisord collector job configuration", - "type": "object", - "properties": { - "firstName": { - "type": "string", - "description": "The person's first name." - }, - "lastName": { - "type": "string", - "description": "The person's last name." - }, - "age": { - "description": "Age in years which must be equal to or greater than zero.", - "type": "integer", - "minimum": 0 + "jsonSchema": { + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Supervisord collector job configuration", + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The person's first name." + }, + "lastName": { + "type": "string", + "description": "The person's last name." + }, + "age": { + "description": "Age in years which must be equal to or greater than zero.", + "type": "integer", + "minimum": 0 + } } - } + }, + "uiSchema": {} } diff --git a/modules/supervisord/init.go b/modules/supervisord/init.go index 0c5285c3b..1c401bcd6 100644 --- a/modules/supervisord/init.go +++ b/modules/supervisord/init.go @@ -10,14 +10,14 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (s Supervisord) verifyConfig() error { +func (s *Supervisord) verifyConfig() error { if s.URL == "" { return errors.New("'url' not set") } return nil } -func (s Supervisord) initSupervisorClient() (supervisorClient, error) { +func (s *Supervisord) initSupervisorClient() (supervisorClient, error) { u, err := url.Parse(s.URL) if err != nil { return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL) diff --git a/modules/supervisord/supervisord.go b/modules/supervisord/supervisord.go index 1c9994710..31ab8d943 100644 --- a/modules/supervisord/supervisord.go +++ b/modules/supervisord/supervisord.go @@ -4,6 +4,7 @@ package supervisord import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -25,7 +26,7 @@ func New() *Supervisord { Config: Config{ URL: "http://127.0.0.1:9001/RPC2", Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, @@ -55,25 +56,37 @@ type ( } ) -func (s *Supervisord) Init() bool { +func (s *Supervisord) Configuration() any { + return s.Config +} + +func (s *Supervisord) Init() error { err := s.verifyConfig() if err != nil { s.Errorf("verify config: %v", err) - return false + return err } client, err := s.initSupervisorClient() if err != nil { s.Errorf("init supervisord client: %v", err) - return false + return err } s.client = client - return true + return nil } -func (s *Supervisord) Check() bool { - return len(s.Collect()) > 0 +func (s *Supervisord) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *Supervisord) Charts() *module.Charts { diff --git a/modules/supervisord/supervisord_test.go b/modules/supervisord/supervisord_test.go index 23ef1ff0c..725c0fe29 100644 --- a/modules/supervisord/supervisord_test.go +++ b/modules/supervisord/supervisord_test.go @@ -38,9 +38,9 @@ func TestSupervisord_Init(t *testing.T) { supvr.Config = test.config if test.wantFail { - assert.False(t, supvr.Init()) + assert.Error(t, supvr.Init()) } else { - assert.True(t, supvr.Init()) + assert.NoError(t, supvr.Init()) } }) } @@ -69,9 +69,9 @@ func TestSupervisord_Check(t *testing.T) { defer supvr.Cleanup() if test.wantFail { - assert.False(t, supvr.Check()) + assert.Error(t, supvr.Check()) } else { - assert.True(t, supvr.Check()) + assert.NoError(t, supvr.Check()) } }) } @@ -79,7 +79,7 @@ func TestSupervisord_Check(t *testing.T) { func TestSupervisord_Charts(t *testing.T) { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) assert.NotNil(t, supvr.Charts()) } @@ -88,7 +88,7 @@ func TestSupervisord_Cleanup(t *testing.T) { supvr := New() assert.NotPanics(t, supvr.Cleanup) - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) m := &mockSupervisorClient{} supvr.client = m @@ -188,21 +188,21 @@ func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) { func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{} return supvr } func prepareSupervisordZeroProcessesOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{returnZeroProcesses: true} return supvr } func prepareSupervisordErrorOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{errOnGetAllProcessInfo: true} return supvr } diff --git a/modules/systemdunits/collect.go b/modules/systemdunits/collect.go index 2843a4230..eb596605f 100644 --- a/modules/systemdunits/collect.go +++ b/modules/systemdunits/collect.go @@ -148,7 +148,7 @@ func (s *SystemdUnits) getSystemdVersion(conn systemdConnection) (int, error) { } func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration()) defer cancel() s.Debugf("calling function 'ListUnits'") @@ -169,7 +169,7 @@ func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus } func (s *SystemdUnits) getLoadedUnitsByPatterns(conn systemdConnection) ([]dbus.UnitStatus, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration()) defer cancel() s.Debugf("calling function 'ListUnitsByPatterns'") diff --git a/modules/systemdunits/config_schema.json b/modules/systemdunits/config_schema.json index 5a9df2571..b01f0af19 100644 --- a/modules/systemdunits/config_schema.json +++ b/modules/systemdunits/config_schema.json @@ -1,27 +1,30 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/systemdunits job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "include": { - "type": "array", - "items": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/systemdunits job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" }, - "minItems": 1 + "include": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "include" + ] }, - "required": [ - "name", - "include" - ] -} \ No newline at end of file + "uiSchema": {} +} diff --git a/modules/systemdunits/systemdunits.go b/modules/systemdunits/systemdunits.go index 3593b531e..503b06ff3 100644 --- a/modules/systemdunits/systemdunits.go +++ b/modules/systemdunits/systemdunits.go @@ -7,6 +7,7 @@ package systemdunits import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -33,7 +34,7 @@ func New() *SystemdUnits { Include: []string{ "*.service", }, - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: &module.Charts{}, @@ -61,27 +62,40 @@ type SystemdUnits struct { charts *module.Charts } -func (s *SystemdUnits) Init() bool { +func (s *SystemdUnits) Configuration() any { + return s.Config +} + +func (s *SystemdUnits) Init() error { err := s.validateConfig() if err != nil { s.Errorf("config validation: %v", err) - return false + return err } sr, err := s.initSelector() if err != nil { s.Errorf("init selector: %v", err) - return false + return err } s.sr = sr s.Debugf("unit names patterns: %v", s.Include) s.Debugf("timeout: %s", s.Timeout) - return true + + return nil } -func (s *SystemdUnits) Check() bool { - return len(s.Collect()) > 0 +func (s *SystemdUnits) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *SystemdUnits) Charts() *module.Charts { @@ -89,15 +103,15 @@ func (s *SystemdUnits) Charts() *module.Charts { } func (s *SystemdUnits) Collect() map[string]int64 { - ms, err := s.collect() + mx, err := s.collect() if err != nil { s.Error(err) } - if len(ms) == 0 { + if len(mx) == 0 { return nil } - return ms + return mx } func (s *SystemdUnits) Cleanup() { diff --git a/modules/systemdunits/systemdunits_test.go b/modules/systemdunits/systemdunits_test.go index baa9ed46a..606708bd5 100644 --- a/modules/systemdunits/systemdunits_test.go +++ b/modules/systemdunits/systemdunits_test.go @@ -48,9 +48,9 @@ func TestSystemdUnits_Init(t *testing.T) { systemd.Config = test.config if test.wantFail { - assert.False(t, systemd.Init()) + assert.Error(t, systemd.Init()) } else { - assert.True(t, systemd.Init()) + assert.NoError(t, systemd.Init()) } }) } @@ -115,12 +115,12 @@ func TestSystemdUnits_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { systemd := test.prepare() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) if test.wantFail { - assert.False(t, systemd.Check()) + assert.Error(t, systemd.Check()) } else { - assert.True(t, systemd.Check()) + assert.NoError(t, systemd.Check()) } }) } @@ -128,7 +128,7 @@ func TestSystemdUnits_Check(t *testing.T) { func TestSystemdUnits_Charts(t *testing.T) { systemd := New() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) assert.NotNil(t, systemd.Charts()) } @@ -138,7 +138,7 @@ func TestSystemdUnits_Cleanup(t *testing.T) { client := prepareOKClient(230) systemd.client = client - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) require.NotNil(t, systemd.Collect()) conn := systemd.conn systemd.Cleanup() @@ -681,7 +681,7 @@ func TestSystemdUnits_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { systemd := test.prepare() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) var collected map[string]int64 @@ -702,7 +702,7 @@ func TestSystemdUnits_connectionReuse(t *testing.T) { systemd.Include = []string{"*"} client := prepareOKClient(230) systemd.client = client - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) var collected map[string]int64 for i := 0; i < 10; i++ { diff --git a/modules/tengine/config_schema.json b/modules/tengine/config_schema.json index 30958bb1b..8f3a5c167 100644 --- a/modules/tengine/config_schema.json +++ b/modules/tengine/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/tengine job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/tengine job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/tengine/tengine.go b/modules/tengine/tengine.go index 169b390ab..6c80d085c 100644 --- a/modules/tengine/tengine.go +++ b/modules/tengine/tengine.go @@ -4,6 +4,7 @@ package tengine import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,24 +22,21 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/us" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Tengine with default values. func New() *Tengine { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Tengine{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/us", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, + charts: charts.Copy(), } - return &Tengine{Config: config} } // Config is the Tengine module configuration. @@ -51,40 +49,52 @@ type Tengine struct { module.Base Config `yaml:",inline"` + charts *module.Charts + apiClient *apiClient } -// Cleanup makes cleanup. -func (Tengine) Cleanup() {} +func (t *Tengine) Configuration() any { + return t.Config +} // Init makes initialization. -func (t *Tengine) Init() bool { +func (t *Tengine) Init() error { if t.URL == "" { - t.Error("URL not set") - return false + t.Error("url not set") + return errors.New("url not set") } client, err := web.NewHTTPClient(t.Client) if err != nil { t.Errorf("error on creating http client : %v", err) - return false + return err } t.apiClient = newAPIClient(client, t.Request) t.Debugf("using URL: %s", t.URL) - t.Debugf("using timeout: %s", t.Timeout.Duration) - return true + t.Debugf("using timeout: %s", t.Timeout) + + return nil } // Check makes check -func (t *Tengine) Check() bool { - return len(t.Collect()) > 0 +func (t *Tengine) Check() error { + mx, err := t.collect() + if err != nil { + t.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts returns Charts. -func (t Tengine) Charts() *module.Charts { - return charts.Copy() +func (t *Tengine) Charts() *module.Charts { + return t.charts } // Collect collects metrics. @@ -98,3 +108,10 @@ func (t *Tengine) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (t *Tengine) Cleanup() { + if t.apiClient != nil && t.apiClient.httpClient != nil { + t.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/tengine/tengine_test.go b/modules/tengine/tengine_test.go index 04fe5f9e7..74a8801cb 100644 --- a/modules/tengine/tengine_test.go +++ b/modules/tengine/tengine_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -17,20 +16,14 @@ var ( testStatusData, _ = os.ReadFile("testdata/status.txt") ) -func TestTengine_Cleanup(t *testing.T) { New().Cleanup() } - -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestTengine_Cleanup(t *testing.T) { + New().Cleanup() } func TestTengine_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -44,16 +37,16 @@ func TestTengine_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestTengine_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestTengine_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } @@ -68,8 +61,8 @@ func TestTengine_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 5944, @@ -116,8 +109,8 @@ func TestTengine_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestTengine_404(t *testing.T) { @@ -130,6 +123,6 @@ func TestTengine_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/traefik/config_schema.json b/modules/traefik/config_schema.json index 0596ef83b..1a957604e 100644 --- a/modules/traefik/config_schema.json +++ b/modules/traefik/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/traefik job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/traefik job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/traefik/traefik.go b/modules/traefik/traefik.go index a121b0236..9f2ea1463 100644 --- a/modules/traefik/traefik.go +++ b/modules/traefik/traefik.go @@ -4,6 +4,7 @@ package traefik import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Traefik { URL: "http://127.0.0.1:8082/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -73,24 +74,36 @@ type ( } ) -func (t *Traefik) Init() bool { +func (t *Traefik) Configuration() any { + return t.Config +} + +func (t *Traefik) Init() error { if err := t.validateConfig(); err != nil { t.Errorf("config validation: %v", err) - return false + return err } prom, err := t.initPrometheusClient() if err != nil { t.Errorf("prometheus client initialization: %v", err) - return false + return err } t.prom = prom - return true + return nil } -func (t *Traefik) Check() bool { - return len(t.Collect()) > 0 +func (t *Traefik) Check() error { + mx, err := t.collect() + if err != nil { + t.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (t *Traefik) Charts() *module.Charts { diff --git a/modules/traefik/traefik_test.go b/modules/traefik/traefik_test.go index c5804b672..b577bba7b 100644 --- a/modules/traefik/traefik_test.go +++ b/modules/traefik/traefik_test.go @@ -62,9 +62,9 @@ func TestTraefik_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -107,9 +107,9 @@ func TestTraefik_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, tk.Check()) + assert.Error(t, tk.Check()) } else { - assert.True(t, tk.Check()) + assert.NoError(t, tk.Check()) } }) } @@ -255,7 +255,7 @@ func prepareCaseTraefikV221Metrics(t *testing.T) (*Traefik, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -292,7 +292,7 @@ traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",me })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -320,7 +320,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -333,7 +333,7 @@ func prepareCase404Response(t *testing.T) (*Traefik, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -342,7 +342,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) { t.Helper() h := New() h.URL = "http://127.0.0.1:38001" - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, func() {} } diff --git a/modules/unbound/config_schema.json b/modules/unbound/config_schema.json index 290905ac0..b2baa1253 100644 --- a/modules/unbound/config_schema.json +++ b/modules/unbound/config_schema.json @@ -1,44 +1,47 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/unbound job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "conf_path": { - "type": "string" - }, - "cumulative_stats": { - "type": "boolean" - }, - "use_tls": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/unbound job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "conf_path": { + "type": "string" + }, + "cumulative_stats": { + "type": "boolean" + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/unbound/init.go b/modules/unbound/init.go index 6ae9543f3..bca49f027 100644 --- a/modules/unbound/init.go +++ b/modules/unbound/init.go @@ -87,9 +87,9 @@ func (u *Unbound) initClient() (err error) { u.client = socket.New(socket.Config{ Address: u.Address, - ConnectTimeout: u.Timeout.Duration, - ReadTimeout: u.Timeout.Duration, - WriteTimeout: u.Timeout.Duration, + ConnectTimeout: u.Timeout.Duration(), + ReadTimeout: u.Timeout.Duration(), + WriteTimeout: u.Timeout.Duration(), TLSConf: tlsCfg, }) return nil diff --git a/modules/unbound/unbound.go b/modules/unbound/unbound.go index 625ef75cd..a61bf41e9 100644 --- a/modules/unbound/unbound.go +++ b/modules/unbound/unbound.go @@ -4,6 +4,7 @@ package unbound import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/socket" @@ -24,60 +25,59 @@ func init() { } func New() *Unbound { - config := Config{ - Address: "127.0.0.1:8953", - ConfPath: "/etc/unbound/unbound.conf", - Timeout: web.Duration{Duration: time.Second}, - Cumulative: false, - UseTLS: true, - TLSConfig: tlscfg.TLSConfig{ - TLSCert: "/etc/unbound/unbound_control.pem", - TLSKey: "/etc/unbound/unbound_control.key", - InsecureSkipVerify: true, - }, - } - return &Unbound{ - Config: config, + Config: Config{ + Address: "127.0.0.1:8953", + ConfPath: "/etc/unbound/unbound.conf", + Timeout: web.Duration(time.Second), + Cumulative: false, + UseTLS: true, + TLSConfig: tlscfg.TLSConfig{ + TLSCert: "/etc/unbound/unbound_control.pem", + TLSKey: "/etc/unbound/unbound_control.key", + InsecureSkipVerify: true, + }, + }, curCache: newCollectCache(), cache: newCollectCache(), } } -type ( - Config struct { - Address string `yaml:"address"` - ConfPath string `yaml:"conf_path"` - Timeout web.Duration `yaml:"timeout"` - Cumulative bool `yaml:"cumulative_stats"` - UseTLS bool `yaml:"use_tls"` - tlscfg.TLSConfig `yaml:",inline"` - } - Unbound struct { - module.Base - Config `yaml:",inline"` +type Config struct { + Address string `yaml:"address"` + ConfPath string `yaml:"conf_path"` + Timeout web.Duration `yaml:"timeout"` + Cumulative bool `yaml:"cumulative_stats"` + UseTLS bool `yaml:"use_tls"` + tlscfg.TLSConfig `yaml:",inline"` +} - client socket.Client - cache collectCache - curCache collectCache +type Unbound struct { + module.Base + Config `yaml:",inline"` - prevCacheMiss float64 // needed for cumulative mode - extChartsCreated bool + client socket.Client + cache collectCache + curCache collectCache - charts *module.Charts - } -) + prevCacheMiss float64 // needed for cumulative mode + extChartsCreated bool -func (Unbound) Cleanup() {} + charts *module.Charts +} + +func (u *Unbound) Configuration() any { + return u.Config +} -func (u *Unbound) Init() bool { +func (u *Unbound) Init() error { if enabled := u.initConfig(); !enabled { - return false + return errors.New("remote control is disabled in the configuration file") } if err := u.initClient(); err != nil { u.Errorf("creating client: %v", err) - return false + return err } u.charts = charts(u.Cumulative) @@ -86,14 +86,23 @@ func (u *Unbound) Init() bool { if u.UseTLS { u.Debugf("using tls_skip_verify: %v, tls_key: %s, tls_cert: %s", u.InsecureSkipVerify, u.TLSKey, u.TLSCert) } - return true + + return nil } -func (u *Unbound) Check() bool { - return len(u.Collect()) > 0 +func (u *Unbound) Check() error { + mx, err := u.collect() + if err != nil { + u.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (u Unbound) Charts() *module.Charts { +func (u *Unbound) Charts() *module.Charts { return u.charts } @@ -108,3 +117,9 @@ func (u *Unbound) Collect() map[string]int64 { } return mx } + +func (u *Unbound) Cleanup() { + if u.client != nil { + _ = u.client.Disconnect() + } +} diff --git a/modules/unbound/unbound_test.go b/modules/unbound/unbound_test.go index fabea299d..05a86a412 100644 --- a/modules/unbound/unbound_test.go +++ b/modules/unbound/unbound_test.go @@ -55,7 +55,7 @@ func TestNew(t *testing.T) { func TestUnbound_Init(t *testing.T) { unbound := nonTLSUnbound() - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) { @@ -74,7 +74,7 @@ func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) { }, } - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) assert.Equal(t, expectedConfig, unbound.Config) } @@ -82,37 +82,37 @@ func TestUnbound_Init_DisabledInUnboundConf(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_disabled.conf" - assert.False(t, unbound.Init()) + assert.Error(t, unbound.Init()) } func TestUnbound_Init_HandleEmptyConfig(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_empty.conf" - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Init_HandleNonExistentConfig(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_non_existent.conf" - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Check(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: commonStatsData, err: false} - assert.True(t, unbound.Check()) + assert.NoError(t, unbound.Check()) } func TestUnbound_Check_ErrorDuringScrapingUnbound(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{err: true} - assert.False(t, unbound.Check()) + assert.Error(t, unbound.Check()) } func TestUnbound_Cleanup(t *testing.T) { @@ -121,14 +121,14 @@ func TestUnbound_Cleanup(t *testing.T) { func TestUnbound_Charts(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) assert.NotNil(t, unbound.Charts()) } func TestUnbound_Collect(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: commonStatsData, err: false} collected := unbound.Collect() @@ -138,7 +138,7 @@ func TestUnbound_Collect(t *testing.T) { func TestUnbound_Collect_ExtendedStats(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: extStatsData, err: false} collected := unbound.Collect() @@ -158,7 +158,7 @@ func TestUnbound_Collect_LifeCycleCumulativeExtendedStats(t *testing.T) { unbound := nonTLSUnbound() unbound.Cumulative = true - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) ubClient := &mockUnboundClient{err: false} unbound.client = ubClient @@ -186,7 +186,7 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) { unbound := nonTLSUnbound() unbound.Cumulative = false - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) ubClient := &mockUnboundClient{err: false} unbound.client = ubClient @@ -204,7 +204,7 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) { func TestUnbound_Collect_EmptyResponse(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: []byte{}, err: false} assert.Nil(t, unbound.Collect()) @@ -212,7 +212,7 @@ func TestUnbound_Collect_EmptyResponse(t *testing.T) { func TestUnbound_Collect_ErrorResponse(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: []byte("error unknown command 'unknown'"), err: false} assert.Nil(t, unbound.Collect()) @@ -220,7 +220,7 @@ func TestUnbound_Collect_ErrorResponse(t *testing.T) { func TestUnbound_Collect_ErrorOnSend(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{err: true} assert.Nil(t, unbound.Collect()) @@ -228,7 +228,7 @@ func TestUnbound_Collect_ErrorOnSend(t *testing.T) { func TestUnbound_Collect_ErrorOnParseBadSyntax(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) data := strings.Repeat("zk_avg_latency 0\nzk_min_latency 0\nzk_mix_latency 0\n", 10) unbound.client = mockUnboundClient{data: []byte(data), err: false} diff --git a/modules/upsd/client.go b/modules/upsd/client.go index be0148bc5..cf67acdf6 100644 --- a/modules/upsd/client.go +++ b/modules/upsd/client.go @@ -29,9 +29,9 @@ type upsUnit struct { func newUpsdConn(conf Config) upsdConn { return &upsdClient{conn: socket.New(socket.Config{ - ConnectTimeout: conf.Timeout.Duration, - ReadTimeout: conf.Timeout.Duration, - WriteTimeout: conf.Timeout.Duration, + ConnectTimeout: conf.Timeout.Duration(), + ReadTimeout: conf.Timeout.Duration(), + WriteTimeout: conf.Timeout.Duration(), Address: conf.Address, })} } diff --git a/modules/upsd/config_schema.json b/modules/upsd/config_schema.json index 49fc85354..cd53073d8 100644 --- a/modules/upsd/config_schema.json +++ b/modules/upsd/config_schema.json @@ -1,29 +1,32 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/upsd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/upsd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "address": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/upsd/upsd.go b/modules/upsd/upsd.go index ebe0f36bc..5836fffdd 100644 --- a/modules/upsd/upsd.go +++ b/modules/upsd/upsd.go @@ -3,6 +3,7 @@ package upsd import ( + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -19,7 +20,7 @@ func New() *Upsd { return &Upsd{ Config: Config{ Address: "127.0.0.1:3493", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, newUpsdConn: newUpsdConn, charts: &module.Charts{}, @@ -56,17 +57,29 @@ type ( } ) -func (u *Upsd) Init() bool { +func (u *Upsd) Configuration() any { + return u.Config +} + +func (u *Upsd) Init() error { if u.Address == "" { u.Error("config: 'address' not set") - return false + return errors.New("address not set") } - return true + return nil } -func (u *Upsd) Check() bool { - return len(u.Collect()) > 0 +func (u *Upsd) Check() error { + mx, err := u.collect() + if err != nil { + u.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (u *Upsd) Charts() *module.Charts { diff --git a/modules/upsd/upsd_test.go b/modules/upsd/upsd_test.go index 74c8626f1..dfe6ea563 100644 --- a/modules/upsd/upsd_test.go +++ b/modules/upsd/upsd_test.go @@ -19,7 +19,7 @@ func TestUpsd_Cleanup(t *testing.T) { mock := prepareMockConnOK() upsd.newUpsdConn = func(Config) upsdConn { return mock } - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) _ = upsd.Collect() require.NotPanics(t, upsd.Cleanup) assert.True(t, mock.calledDisconnect) @@ -46,9 +46,9 @@ func TestUpsd_Init(t *testing.T) { upsd.Config = test.config if test.wantFail { - assert.False(t, upsd.Init()) + assert.Error(t, upsd.Init()) } else { - assert.True(t, upsd.Init()) + assert.NoError(t, upsd.Init()) } }) } @@ -92,12 +92,12 @@ func TestUpsd_Check(t *testing.T) { upsd := test.prepareUpsd() upsd.newUpsdConn = func(Config) upsdConn { return test.prepareMock() } - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) if test.wantFail { - assert.False(t, upsd.Check()) + assert.Error(t, upsd.Check()) } else { - assert.True(t, upsd.Check()) + assert.NoError(t, upsd.Check()) } }) } @@ -105,7 +105,7 @@ func TestUpsd_Check(t *testing.T) { func TestUpsd_Charts(t *testing.T) { upsd := New() - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) assert.NotNil(t, upsd.Charts()) } @@ -225,7 +225,7 @@ func TestUpsd_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { upsd := test.prepareUpsd() - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) mock := test.prepareMock() upsd.newUpsdConn = func(Config) upsdConn { return mock } diff --git a/modules/vcsa/config_schema.json b/modules/vcsa/config_schema.json index aab0647ab..dc56fd9e4 100644 --- a/modules/vcsa/config_schema.json +++ b/modules/vcsa/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vcsa job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vcsa job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/vcsa/vcsa.go b/modules/vcsa/vcsa.go index ccac96f3a..82bdef978 100644 --- a/modules/vcsa/vcsa.go +++ b/modules/vcsa/vcsa.go @@ -4,6 +4,7 @@ package vcsa import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -29,7 +30,7 @@ func New() *VCSA { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -66,33 +67,47 @@ type ( } ) -func (vc *VCSA) Init() bool { +func (vc *VCSA) Configuration() any { + return vc.Config +} + +func (vc *VCSA) Init() error { if err := vc.validateConfig(); err != nil { vc.Error(err) - return false + return err } c, err := vc.initHealthClient() if err != nil { vc.Errorf("error on creating health client : %vc", err) - return false + return err } vc.client = c vc.Debugf("using URL %s", vc.URL) - vc.Debugf("using timeout: %s", vc.Timeout.Duration) + vc.Debugf("using timeout: %s", vc.Timeout) - return true + return nil } -func (vc *VCSA) Check() bool { +func (vc *VCSA) Check() error { err := vc.client.Login() if err != nil { vc.Error(err) - return false + return err + } + + mx, err := vc.collect() + if err != nil { + vc.Error(err) + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") } - return len(vc.Collect()) > 0 + return nil } func (vc *VCSA) Charts() *module.Charts { diff --git a/modules/vcsa/vcsa_test.go b/modules/vcsa/vcsa_test.go index 86185bfa2..7ecade8e3 100644 --- a/modules/vcsa/vcsa_test.go +++ b/modules/vcsa/vcsa_test.go @@ -27,54 +27,54 @@ func TestNew(t *testing.T) { func TestVCSA_Init(t *testing.T) { job := testNewVCSA() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.client) } func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) { job := New() - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestVCenter_InitErrorOnCreatingClient(t *testing.T) { job := testNewVCSA() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestVCenter_Check(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = &mockVCenterHealthClient{} - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) } func TestVCenter_CheckErrorOnLogin(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = &mockVCenterHealthClient{ login: func() error { return errors.New("login mock error") }, } - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestVCenter_CheckEnsureLoggedIn(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.True(t, mock.loginCalls == 1) } func TestVCenter_Cleanup(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock job.Cleanup() @@ -94,7 +94,7 @@ func TestVCenter_Charts(t *testing.T) { func TestVCenter_Collect(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock @@ -153,7 +153,7 @@ func TestVCenter_Collect(t *testing.T) { func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock job.Collect() @@ -163,7 +163,7 @@ func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) { func TestVCenter_CollectErrorOnPing(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{ ping: func() error { return errors.New("ping mock error") }, } @@ -174,7 +174,7 @@ func TestVCenter_CollectErrorOnPing(t *testing.T) { func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{ applMgmt: func() (string, error) { return "", errors.New("applMgmt mock error") }, databaseStorage: func() (string, error) { return "", errors.New("databaseStorage mock error") }, diff --git a/modules/vernemq/config_schema.json b/modules/vernemq/config_schema.json index f21bab451..49cb3eb64 100644 --- a/modules/vernemq/config_schema.json +++ b/modules/vernemq/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vernemq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vernemq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/vernemq/init.go b/modules/vernemq/init.go new file mode 100644 index 000000000..573b736ed --- /dev/null +++ b/modules/vernemq/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (v *VerneMQ) validateConfig() error { + if v.URL == "" { + return errors.New("url is not set") + } + return nil +} + +func (v *VerneMQ) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(v.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, v.Request), nil +} diff --git a/modules/vernemq/vernemq.go b/modules/vernemq/vernemq.go index d86f3b118..bfce2868d 100644 --- a/modules/vernemq/vernemq.go +++ b/modules/vernemq/vernemq.go @@ -24,29 +24,27 @@ func init() { } func New() *VerneMQ { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8888/metrics", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &VerneMQ{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8888/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &VerneMQ{ - Config: config, charts: charts.Copy(), cache: make(cache), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } +type Config struct { + web.HTTP `yaml:",inline"` +} +type ( VerneMQ struct { module.Base Config `yaml:",inline"` @@ -61,37 +59,36 @@ type ( func (c cache) hasP(v string) bool { ok := c[v]; c[v] = true; return ok } -func (v VerneMQ) validateConfig() error { - if v.URL == "" { - return errors.New("URL is not set") - } - return nil +func (v *VerneMQ) Configuration() any { + return v.Config } -func (v *VerneMQ) initClient() error { - client, err := web.NewHTTPClient(v.Client) +func (v *VerneMQ) Init() error { + if err := v.validateConfig(); err != nil { + v.Errorf("error on validating config: %v", err) + return err + } + + prom, err := v.initPrometheusClient() if err != nil { + v.Error(err) return err } + v.prom = prom - v.prom = prometheus.New(client, v.Request) return nil } -func (v *VerneMQ) Init() bool { - if err := v.validateConfig(); err != nil { - v.Errorf("error on validating config: %v", err) - return false +func (v *VerneMQ) Check() error { + mx, err := v.collect() + if err != nil { + v.Error(err) + return err } - if err := v.initClient(); err != nil { - v.Errorf("error on initializing client: %v", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (v *VerneMQ) Check() bool { - return len(v.Collect()) > 0 + return nil } func (v *VerneMQ) Charts() *Charts { @@ -110,4 +107,8 @@ func (v *VerneMQ) Collect() map[string]int64 { return mx } -func (VerneMQ) Cleanup() {} +func (v *VerneMQ) Cleanup() { + if v.prom != nil && v.prom.HTTPClient() != nil { + v.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/vernemq/vernemq_test.go b/modules/vernemq/vernemq_test.go index 5f07553cd..a86c20568 100644 --- a/modules/vernemq/vernemq_test.go +++ b/modules/vernemq/vernemq_test.go @@ -29,43 +29,43 @@ func TestNew(t *testing.T) { func TestVerneMQ_Init(t *testing.T) { verneMQ := prepareVerneMQ() - assert.True(t, verneMQ.Init()) + assert.NoError(t, verneMQ.Init()) } func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) { verneMQ := prepareVerneMQ() verneMQ.URL = "" - assert.False(t, verneMQ.Init()) + assert.Error(t, verneMQ.Init()) } func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { verneMQ := prepareVerneMQ() verneMQ.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, verneMQ.Init()) + assert.Error(t, verneMQ.Init()) } func TestVerneMQ_Check(t *testing.T) { verneMQ, srv := prepareClientServerV1101(t) defer srv.Close() - assert.True(t, verneMQ.Check()) + assert.NoError(t, verneMQ.Check()) } func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { verneMQ := prepareVerneMQ() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) - assert.False(t, verneMQ.Check()) + assert.Error(t, verneMQ.Check()) } func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) { verneMQ, srv := prepareClientServerNotVerneMQ(t) defer srv.Close() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) - assert.False(t, verneMQ.Check()) + assert.Error(t, verneMQ.Check()) } func TestVerneMQ_Charts(t *testing.T) { @@ -87,7 +87,7 @@ func TestVerneMQ_Collect(t *testing.T) { func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { verneMQ := prepareVerneMQ() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) assert.Nil(t, verneMQ.Collect()) } @@ -145,7 +145,7 @@ func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -159,7 +159,7 @@ func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -173,7 +173,7 @@ func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -187,7 +187,7 @@ func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } diff --git a/modules/vsphere/config_schema.json b/modules/vsphere/config_schema.json index 68bd55e1e..b1f716472 100644 --- a/modules/vsphere/config_schema.json +++ b/modules/vsphere/config_schema.json @@ -1,77 +1,80 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vsphere job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "discovery_interval": { - "type": [ - "string", - "integer" - ] - }, - "host_include": { - "type": "array", - "items": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vsphere job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" - } - }, - "vm_include": { - "type": "array", - "items": { + }, + "url": { "type": "string" - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "discovery_interval": { + "type": [ + "string", + "integer" + ] + }, + "host_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "vm_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/vsphere/discover.go b/modules/vsphere/discover.go index 65555a73b..1ea0a4d6e 100644 --- a/modules/vsphere/discover.go +++ b/modules/vsphere/discover.go @@ -14,7 +14,7 @@ func (vs *VSphere) goDiscovery() { vs.Errorf("error on discovering : %v", err) } } - vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration) + vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration()) } func (vs *VSphere) discoverOnce() error { diff --git a/modules/vsphere/init.go b/modules/vsphere/init.go index a0f966220..c17029a6c 100644 --- a/modules/vsphere/init.go +++ b/modules/vsphere/init.go @@ -30,7 +30,7 @@ func (vs *VSphere) initClient() (*client.Client, error) { URL: vs.URL, User: vs.Username, Password: vs.Password, - Timeout: vs.Timeout.Duration, + Timeout: vs.Timeout.Duration(), TLSConfig: vs.Client.TLSConfig, } return client.New(config) diff --git a/modules/vsphere/vsphere.go b/modules/vsphere/vsphere.go index d7af8a495..b5a063a18 100644 --- a/modules/vsphere/vsphere.go +++ b/modules/vsphere/vsphere.go @@ -29,20 +29,18 @@ func init() { } func New() *VSphere { - config := Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 20}, + return &VSphere{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration(time.Second * 20), + }, }, + DiscoveryInterval: web.Duration(time.Minute * 5), + HostsInclude: []string{"/*"}, + VMsInclude: []string{"/*"}, }, - DiscoveryInterval: web.Duration{Duration: time.Minute * 5}, - HostsInclude: []string{"/*"}, - VMsInclude: []string{"/*"}, - } - - return &VSphere{ - collectionLock: new(sync.RWMutex), - Config: config, + collectionLock: &sync.RWMutex{}, charts: &module.Charts{}, discoveredHosts: make(map[string]int), discoveredVMs: make(map[string]int), @@ -83,39 +81,41 @@ type ( } ) -func (vs *VSphere) Init() bool { +func (vs *VSphere) Configuration() any { + return vs.Config +} + +func (vs *VSphere) Init() error { if err := vs.validateConfig(); err != nil { vs.Errorf("error on validating config: %v", err) - return false + return err } vsClient, err := vs.initClient() if err != nil { vs.Errorf("error on creating vsphere client: %v", err) - return false + return err } - err = vs.initDiscoverer(vsClient) - if err != nil { + if err := vs.initDiscoverer(vsClient); err != nil { vs.Errorf("error on creating vsphere discoverer: %v", err) - return false + return err } vs.initScraper(vsClient) - err = vs.discoverOnce() - if err != nil { + if err := vs.discoverOnce(); err != nil { vs.Errorf("error on discovering: %v", err) - return false + return err } vs.goDiscovery() - return true + return nil } -func (vs *VSphere) Check() bool { - return true +func (vs *VSphere) Check() error { + return nil } func (vs *VSphere) Charts() *module.Charts { diff --git a/modules/vsphere/vsphere_test.go b/modules/vsphere/vsphere_test.go index 97c23d5ba..746082bb9 100644 --- a/modules/vsphere/vsphere_test.go +++ b/modules/vsphere/vsphere_test.go @@ -10,6 +10,7 @@ import ( "github.com/netdata/go.d.plugin/modules/vsphere/discover" "github.com/netdata/go.d.plugin/modules/vsphere/match" rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/web" "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" @@ -28,7 +29,7 @@ func TestVSphere_Init(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - assert.True(t, vSphere.Init()) + assert.NoError(t, vSphere.Init()) assert.NotNil(t, vSphere.discoverer) assert.NotNil(t, vSphere.scraper) assert.NotNil(t, vSphere.resources) @@ -41,7 +42,7 @@ func TestVSphere_Init_ReturnsFalseIfURLNotSet(t *testing.T) { defer teardown() vSphere.URL = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) { @@ -49,7 +50,7 @@ func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) { defer teardown() vSphere.Username = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { @@ -57,7 +58,7 @@ func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { defer teardown() vSphere.Password = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { @@ -65,7 +66,7 @@ func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { defer teardown() vSphere.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) { @@ -73,7 +74,7 @@ func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) { defer teardown() vSphere.URL = "http://127.0.0.1:32001" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) { @@ -81,16 +82,16 @@ func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) { defer teardown() vSphere.HostsInclude = match.HostIncludes{"invalid"} - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) vSphere.HostsInclude = vSphere.HostsInclude[:0] vSphere.VMsInclude = match.VMIncludes{"invalid"} - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Check(t *testing.T) { - assert.NotNil(t, New().Check()) + assert.NoError(t, New().Check()) } func TestVSphere_Charts(t *testing.T) { @@ -101,7 +102,7 @@ func TestVSphere_Cleanup(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) + require.NoError(t, vSphere.Init()) vSphere.Cleanup() time.Sleep(time.Second) @@ -117,7 +118,7 @@ func TestVSphere_Collect(t *testing.T) { vSphere, model, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) + require.NoError(t, vSphere.Init()) vSphere.scraper = mockScraper{vSphere.scraper} @@ -332,8 +333,8 @@ func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) - require.True(t, vSphere.Check()) + require.NoError(t, vSphere.Init()) + require.NoError(t, vSphere.Check()) okHostID := "host-50" okVMID := "vm-64" @@ -387,9 +388,9 @@ func TestVSphere_Collect_Run(t *testing.T) { vSphere, model, teardown := prepareVSphereSim(t) defer teardown() - vSphere.DiscoveryInterval.Duration = time.Second * 2 - require.True(t, vSphere.Init()) - require.True(t, vSphere.Check()) + vSphere.DiscoveryInterval = web.Duration(time.Second * 2) + require.NoError(t, vSphere.Init()) + require.NoError(t, vSphere.Check()) runs := 20 for i := 0; i < runs; i++ { diff --git a/modules/weblog/config_schema.json b/modules/weblog/config_schema.json index 82b6c358c..371ae3854 100644 --- a/modules/weblog/config_schema.json +++ b/modules/weblog/config_schema.json @@ -1,208 +1,211 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/web_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "parser": { - "type": "object", - "properties": { - "log_type": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/web_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parser": { + "type": "object", + "properties": { + "log_type": { + "type": "string" + }, + "csv_config": { + "type": "object", + "properties": { + "fields_per_record": { + "type": "integer" + }, + "delimiter": { + "type": "string" + }, + "trim_leading_space": { + "type": "boolean" + }, + "format": { + "type": "string" + } + }, + "required": [ + "fields_per_record", + "delimiter", + "trim_leading_space", + "format" + ] + }, + "ltsv_config": { + "type": "object", + "properties": { + "field_delimiter": { + "type": "string" + }, + "value_delimiter": { + "type": "string" + }, + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "field_delimiter", + "value_delimiter", + "mapping" + ] + }, + "regexp_config": { + "type": "object", + "properties": { + "pattern": { + "type": "string" + } + }, + "required": [ + "pattern" + ] + }, + "json_config": { + "type": "object", + "properties": { + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "mapping" + ] + } }, - "csv_config": { + "required": [ + "log_type" + ] + }, + "path": { + "type": "string" + }, + "exclude_path": { + "type": "string" + }, + "url_patterns": { + "type": "array", + "items": { "type": "object", "properties": { - "fields_per_record": { - "type": "integer" - }, - "delimiter": { + "name": { "type": "string" }, - "trim_leading_space": { - "type": "boolean" - }, - "format": { + "match": { "type": "string" } }, "required": [ - "fields_per_record", - "delimiter", - "trim_leading_space", - "format" + "name", + "match" ] - }, - "ltsv_config": { + } + }, + "custom_fields": { + "type": "array", + "items": { "type": "object", "properties": { - "field_delimiter": { - "type": "string" - }, - "value_delimiter": { + "name": { "type": "string" }, - "mapping": { - "type": "object", - "additionalProperties": { - "type": "string" + "patterns": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "match": { + "type": "string" + } + }, + "required": [ + "name", + "match" + ] } } }, "required": [ - "field_delimiter", - "value_delimiter", - "mapping" + "name", + "patterns" ] - }, - "regexp_config": { + } + }, + "custom_time_fields": { + "type": "array", + "items": { "type": "object", "properties": { - "pattern": { + "name": { "type": "string" + }, + "histogram": { + "type": "array", + "items": { + "type": "number" + } } }, "required": [ - "pattern" + "name", + "histogram" ] - }, - "json_config": { + } + }, + "custom_numeric_fields": { + "type": "array", + "items": { "type": "object", "properties": { - "mapping": { - "type": "object", - "additionalProperties": { - "type": "string" - } + "name": { + "type": "string" + }, + "units": { + "type": "string" + }, + "multiplier": { + "type": "integer" + }, + "divisor": { + "type": "integer" } }, "required": [ - "mapping" + "name", + "units", + "multiplier", + "divisor" ] } }, - "required": [ - "log_type" - ] - }, - "path": { - "type": "string" - }, - "exclude_path": { - "type": "string" - }, - "url_patterns": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "match": { - "type": "string" - } - }, - "required": [ - "name", - "match" - ] - } - }, - "custom_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "patterns": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "match": { - "type": "string" - } - }, - "required": [ - "name", - "match" - ] - } - } - }, - "required": [ - "name", - "patterns" - ] - } - }, - "custom_time_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "histogram": { - "type": "array", - "items": { - "type": "number" - } - } - }, - "required": [ - "name", - "histogram" - ] - } - }, - "custom_numeric_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "units": { - "type": "string" - }, - "multiplier": { - "type": "integer" - }, - "divisor": { - "type": "integer" - } - }, - "required": [ - "name", - "units", - "multiplier", - "divisor" - ] - } - }, - "histogram": { - "type": "array", - "items": { - "type": "number" + "histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "group_response_codes": { + "type": "boolean" } }, - "group_response_codes": { - "type": "boolean" - } + "required": [ + "name", + "path" + ] }, - "required": [ - "name", - "path" - ] + "uiSchema": {} } diff --git a/modules/weblog/weblog.go b/modules/weblog/weblog.go index 27bf43f9a..4e5375840 100644 --- a/modules/weblog/weblog.go +++ b/modules/weblog/weblog.go @@ -92,20 +92,24 @@ type WebLog struct { mx *metricsData } -func (w *WebLog) Init() bool { +func (w *WebLog) Configuration() any { + return w.Config +} + +func (w *WebLog) Init() error { if err := w.createURLPatterns(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomFields(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomTimeFields(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomNumericFields(); err != nil { @@ -115,26 +119,27 @@ func (w *WebLog) Init() bool { w.createLogLine() w.mx = newMetricsData(w.Config) - return true + return nil } -func (w *WebLog) Check() bool { +func (w *WebLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := w.createLogReader(); err != nil { w.Warning("check failed: ", err) - return false + return err } if err := w.createParser(); err != nil { w.Warning("check failed: ", err) - return false + return err } if err := w.createCharts(w.line); err != nil { w.Warning("check failed: ", err) - return false + return err } - return true + + return nil } func (w *WebLog) Charts() *module.Charts { diff --git a/modules/weblog/weblog_test.go b/modules/weblog/weblog_test.go index 6195d2e49..e65a46e21 100644 --- a/modules/weblog/weblog_test.go +++ b/modules/weblog/weblog_test.go @@ -42,48 +42,48 @@ func TestNew(t *testing.T) { func TestWebLog_Init(t *testing.T) { weblog := New() - assert.True(t, weblog.Init()) + assert.NoError(t, weblog.Init()) } func TestWebLog_Init_ErrorOnCreatingURLPatterns(t *testing.T) { weblog := New() weblog.URLPatterns = []userPattern{{Match: "* !*"}} - assert.False(t, weblog.Init()) + assert.Error(t, weblog.Init()) } func TestWebLog_Init_ErrorOnCreatingCustomFields(t *testing.T) { weblog := New() weblog.CustomFields = []customField{{Patterns: []userPattern{{Name: "p1", Match: "* !*"}}}} - assert.False(t, weblog.Init()) + assert.Error(t, weblog.Init()) } func TestWebLog_Check(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/common.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.True(t, weblog.Check()) + assert.NoError(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/not_exists.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/custom.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) { @@ -92,17 +92,17 @@ func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) { weblog.Path = "testdata/custom.log" weblog.Parser.LogType = logs.TypeCSV weblog.Parser.CSV.Format = "$one $two" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Charts(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/common.log" - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) assert.NotNil(t, weblog.Charts()) } @@ -1187,8 +1187,8 @@ func prepareWebLogCollectFull(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testFullLog)) @@ -1230,8 +1230,8 @@ func prepareWebLogCollectCommon(t *testing.T) *WebLog { weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCommonLog)) @@ -1282,8 +1282,8 @@ func prepareWebLogCollectCustom(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomLog)) @@ -1328,8 +1328,8 @@ func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) @@ -1374,8 +1374,8 @@ func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) @@ -1424,8 +1424,8 @@ func prepareWebLogCollectIISFields(t *testing.T) *WebLog { weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testIISLog)) diff --git a/modules/whoisquery/config_schema.json b/modules/whoisquery/config_schema.json index 9f5131789..f33325900 100644 --- a/modules/whoisquery/config_schema.json +++ b/modules/whoisquery/config_schema.json @@ -1,29 +1,32 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/whoisquery job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/whoisquery job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + } }, - "source": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "days_until_expiration_warning": { - "type": "integer" - }, - "days_until_expiration_critical": { - "type": "integer" - } + "required": [ + "name", + "source" + ] }, - "required": [ - "name", - "source" - ] + "uiSchema": {} } diff --git a/modules/whoisquery/provider.go b/modules/whoisquery/provider.go index 71318dd81..032f979f4 100644 --- a/modules/whoisquery/provider.go +++ b/modules/whoisquery/provider.go @@ -23,7 +23,7 @@ type fromNet struct { func newProvider(config Config) (provider, error) { domain := config.Source client := whois.NewClient() - client.SetTimeout(config.Timeout.Duration) + client.SetTimeout(config.Timeout.Duration()) return &fromNet{ domainAddress: domain, diff --git a/modules/whoisquery/whoisquery.go b/modules/whoisquery/whoisquery.go index 6265b4fb6..e803dbf99 100644 --- a/modules/whoisquery/whoisquery.go +++ b/modules/whoisquery/whoisquery.go @@ -4,6 +4,7 @@ package whoisquery import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -26,7 +27,7 @@ func init() { func New() *WhoisQuery { return &WhoisQuery{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), DaysUntilWarn: 90, DaysUntilCrit: 30, }, @@ -49,26 +50,38 @@ type WhoisQuery struct { prov provider } -func (w *WhoisQuery) Init() bool { +func (w *WhoisQuery) Configuration() any { + return w.Config +} + +func (w *WhoisQuery) Init() error { if err := w.validateConfig(); err != nil { w.Errorf("config validation: %v", err) - return false + return err } prov, err := w.initProvider() if err != nil { w.Errorf("init whois provider: %v", err) - return false + return err } w.prov = prov w.charts = w.initCharts() - return true + return nil } -func (w *WhoisQuery) Check() bool { - return len(w.Collect()) > 0 +func (w *WhoisQuery) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *WhoisQuery) Charts() *module.Charts { diff --git a/modules/whoisquery/whoisquery_test.go b/modules/whoisquery/whoisquery_test.go index 1f3c827bd..0fe6c01c8 100644 --- a/modules/whoisquery/whoisquery_test.go +++ b/modules/whoisquery/whoisquery_test.go @@ -17,7 +17,7 @@ func TestWhoisQuery_Cleanup(t *testing.T) { func TestWhoisQuery_Charts(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) assert.NotNil(t, whoisquery.Charts()) } @@ -45,9 +45,9 @@ func TestWhoisQuery_Init(t *testing.T) { whoisquery.Config = test.config if test.err { - assert.False(t, whoisquery.Init()) + assert.Error(t, whoisquery.Init()) } else { - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) var typeOK bool if test.providerType == net { @@ -64,20 +64,20 @@ func TestWhoisQuery_Check(t *testing.T) { whoisquery := New() whoisquery.prov = &mockProvider{remTime: 12345.678} - assert.True(t, whoisquery.Check()) + assert.NoError(t, whoisquery.Check()) } func TestWhoisQuery_Check_ReturnsFalseOnProviderError(t *testing.T) { whoisquery := New() whoisquery.prov = &mockProvider{err: true} - assert.False(t, whoisquery.Check()) + assert.Error(t, whoisquery.Check()) } func TestWhoisQuery_Collect(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) whoisquery.prov = &mockProvider{remTime: 12345} collected := whoisquery.Collect() @@ -96,7 +96,7 @@ func TestWhoisQuery_Collect(t *testing.T) { func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) whoisquery.prov = &mockProvider{err: true} assert.Nil(t, whoisquery.Collect()) diff --git a/modules/windows/config_schema.json b/modules/windows/config_schema.json index 1668dd905..cb1c2991b 100644 --- a/modules/windows/config_schema.json +++ b/modules/windows/config_schema.json @@ -1,59 +1,62 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/windows job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/windows job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": {} } diff --git a/modules/windows/init.go b/modules/windows/init.go index 34cf83672..51c3c4266 100644 --- a/modules/windows/init.go +++ b/modules/windows/init.go @@ -4,7 +4,6 @@ package windows import ( "errors" - "net/http" "github.com/netdata/go.d.plugin/pkg/prometheus" "github.com/netdata/go.d.plugin/pkg/web" @@ -17,10 +16,10 @@ func (w *Windows) validateConfig() error { return nil } -func (w *Windows) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(w.Client) -} - -func (w *Windows) initPrometheusClient(client *http.Client) (prometheus.Prometheus, error) { +func (w *Windows) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(w.Client) + if err != nil { + return nil, err + } return prometheus.New(client, w.Request), nil } diff --git a/modules/windows/windows.go b/modules/windows/windows.go index e405887e0..aacc73718 100644 --- a/modules/windows/windows.go +++ b/modules/windows/windows.go @@ -4,7 +4,7 @@ package windows import ( _ "embed" - "net/http" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -30,7 +30,7 @@ func New() *Windows { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -80,8 +80,7 @@ type ( doCheck bool - httpClient *http.Client - prom prometheus.Prometheus + prom prometheus.Prometheus cache cache } @@ -116,31 +115,36 @@ type ( } ) -func (w *Windows) Init() bool { +func (w *Windows) Configuration() any { + return w.Config +} + +func (w *Windows) Init() error { if err := w.validateConfig(); err != nil { w.Errorf("config validation: %v", err) - return false - } - - httpClient, err := w.initHTTPClient() - if err != nil { - w.Errorf("init HTTP client: %v", err) - return false + return err } - w.httpClient = httpClient - prom, err := w.initPrometheusClient(w.httpClient) + prom, err := w.initPrometheusClient() if err != nil { w.Errorf("init prometheus clients: %v", err) - return false + return err } w.prom = prom - return true + return nil } -func (w *Windows) Check() bool { - return len(w.Collect()) > 0 +func (w *Windows) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *Windows) Charts() *module.Charts { @@ -160,7 +164,7 @@ func (w *Windows) Collect() map[string]int64 { } func (w *Windows) Cleanup() { - if w.httpClient != nil { - w.httpClient.CloseIdleConnections() + if w.prom != nil && w.prom.HTTPClient() != nil { + w.prom.HTTPClient().CloseIdleConnections() } } diff --git a/modules/windows/windows_test.go b/modules/windows/windows_test.go index b98e40de6..8e837a56b 100644 --- a/modules/windows/windows_test.go +++ b/modules/windows/windows_test.go @@ -57,9 +57,9 @@ func TestWindows_Init(t *testing.T) { win.Config = test.config if test.wantFail { - assert.False(t, win.Init()) + assert.Error(t, win.Init()) } else { - assert.True(t, win.Init()) + assert.NoError(t, win.Init()) } }) } @@ -92,12 +92,12 @@ func TestWindows_Check(t *testing.T) { win, cleanup := test.prepare() defer cleanup() - require.True(t, win.Init()) + require.NoError(t, win.Init()) if test.wantFail { - assert.False(t, win.Check()) + assert.Error(t, win.Check()) } else { - assert.True(t, win.Check()) + assert.NoError(t, win.Check()) } }) } @@ -789,7 +789,7 @@ func TestWindows_Collect(t *testing.T) { win, cleanup := test.prepare() defer cleanup() - require.True(t, win.Init()) + require.NoError(t, win.Init()) mx := win.Collect() diff --git a/modules/wireguard/config_schema.json b/modules/wireguard/config_schema.json index c6d6c261f..27481ae49 100644 --- a/modules/wireguard/config_schema.json +++ b/modules/wireguard/config_schema.json @@ -1,13 +1,16 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/wireguard job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/wireguard job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": {} } diff --git a/modules/wireguard/wireguard.go b/modules/wireguard/wireguard.go index 6587dce3c..7114baae5 100644 --- a/modules/wireguard/wireguard.go +++ b/modules/wireguard/wireguard.go @@ -4,6 +4,7 @@ package wireguard import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,9 +33,14 @@ func New() *WireGuard { } } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` +} + type ( WireGuard struct { module.Base + Config `yaml:",inline"` charts *module.Charts @@ -53,12 +59,24 @@ type ( } ) -func (w *WireGuard) Init() bool { - return true +func (w *WireGuard) Configuration() any { + return w.Config } -func (w *WireGuard) Check() bool { - return len(w.Collect()) > 0 +func (w *WireGuard) Init() error { + return nil +} + +func (w *WireGuard) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *WireGuard) Charts() *module.Charts { diff --git a/modules/wireguard/wireguard_test.go b/modules/wireguard/wireguard_test.go index 5e6434dcc..9be84824d 100644 --- a/modules/wireguard/wireguard_test.go +++ b/modules/wireguard/wireguard_test.go @@ -17,7 +17,7 @@ import ( ) func TestWireGuard_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestWireGuard_Charts(t *testing.T) { @@ -114,13 +114,13 @@ func TestWireGuard_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { w := New() - require.True(t, w.Init()) + require.NoError(t, w.Init()) test.prepare(w) if test.wantFail { - assert.False(t, w.Check()) + assert.Error(t, w.Check()) } else { - assert.True(t, w.Check()) + assert.NoError(t, w.Check()) } }) } @@ -411,7 +411,7 @@ func TestWireGuard_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { w := New() - require.True(t, w.Init()) + require.NoError(t, w.Init()) m := &mockClient{} w.client = m diff --git a/modules/x509check/config_schema.json b/modules/x509check/config_schema.json index 5194715ae..ebc5ef9bc 100644 --- a/modules/x509check/config_schema.json +++ b/modules/x509check/config_schema.json @@ -1,54 +1,57 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/x509check job configuration schema.", - "properties": { - "name": { - "type": "string" - }, - "source": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "tlscfg": { - "type": "object", - "properties": { - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/x509check job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tlscfg": { + "type": "object", + "properties": { + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } }, - "tls_skip_verify": { - "type": "boolean" - } + "required": [ + "tls_ca", + "tls_cert", + "tls_key" + ] }, - "required": [ - "tls_ca", - "tls_cert", - "tls_key" - ] - }, - "days_until_expiration_warning": { - "type": "integer" - }, - "days_until_expiration_critical": { - "type": "integer" + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + }, + "check_revocation_status": { + "type": "boolean" + } }, - "check_revocation_status": { - "type": "boolean" - } + "required": [ + "name", + "source" + ] }, - "required": [ - "name", - "source" - ] + "uiSchema": {} } diff --git a/modules/x509check/provider.go b/modules/x509check/provider.go index c5ac4d711..86d10176c 100644 --- a/modules/x509check/provider.go +++ b/modules/x509check/provider.go @@ -59,10 +59,10 @@ func newProvider(config Config) (provider, error) { if sourceURL.Scheme == "https" { sourceURL.Scheme = "tcp" } - return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil case "smtp": sourceURL.Scheme = "tcp" - return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil default: return nil, fmt.Errorf("unsupported scheme '%s'", sourceURL) } diff --git a/modules/x509check/x509check.go b/modules/x509check/x509check.go index ed3a10b2f..89b93a265 100644 --- a/modules/x509check/x509check.go +++ b/modules/x509check/x509check.go @@ -4,6 +4,7 @@ package x509check import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/tlscfg" @@ -30,7 +31,7 @@ func init() { func New() *X509Check { return &X509Check{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), DaysUntilWarn: 14, DaysUntilCritical: 7, }, @@ -53,26 +54,38 @@ type X509Check struct { prov provider } -func (x *X509Check) Init() bool { +func (x *X509Check) Configuration() any { + return x.Config +} + +func (x *X509Check) Init() error { if err := x.validateConfig(); err != nil { x.Errorf("config validation: %v", err) - return false + return err } prov, err := x.initProvider() if err != nil { x.Errorf("certificate provider init: %v", err) - return false + return err } x.prov = prov x.charts = x.initCharts() - return true + return nil } -func (x *X509Check) Check() bool { - return len(x.Collect()) > 0 +func (x *X509Check) Check() error { + mx, err := x.collect() + if err != nil { + x.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (x *X509Check) Charts() *module.Charts { diff --git a/modules/x509check/x509check_test.go b/modules/x509check/x509check_test.go index 2c628af0a..bb82bd616 100644 --- a/modules/x509check/x509check_test.go +++ b/modules/x509check/x509check_test.go @@ -20,7 +20,7 @@ func TestX509Check_Cleanup(t *testing.T) { func TestX509Check_Charts(t *testing.T) { x509Check := New() x509Check.Source = "https://example.com" - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) assert.NotNil(t, x509Check.Charts()) } @@ -70,9 +70,9 @@ func TestX509Check_Init(t *testing.T) { x509Check.Config = test.config if test.err { - assert.False(t, x509Check.Init()) + assert.Error(t, x509Check.Init()) } else { - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) var typeOK bool switch test.providerType { @@ -94,20 +94,20 @@ func TestX509Check_Check(t *testing.T) { x509Check := New() x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} - assert.True(t, x509Check.Check()) + assert.NoError(t, x509Check.Check()) } func TestX509Check_Check_ReturnsFalseOnProviderError(t *testing.T) { x509Check := New() x509Check.prov = &mockProvider{err: true} - assert.False(t, x509Check.Check()) + assert.Error(t, x509Check.Check()) } func TestX509Check_Collect(t *testing.T) { x509Check := New() x509Check.Source = "https://example.com" - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} collected := x509Check.Collect() diff --git a/modules/zookeeper/collect.go b/modules/zookeeper/collect.go index 97d6f3e6c..86491e1b1 100644 --- a/modules/zookeeper/collect.go +++ b/modules/zookeeper/collect.go @@ -14,10 +14,12 @@ func (z *Zookeeper) collect() (map[string]int64, error) { func (z *Zookeeper) collectMntr() (map[string]int64, error) { const command = "mntr" + lines, err := z.fetch("mntr") if err != nil { return nil, err } + switch len(lines) { case 0: return nil, fmt.Errorf("'%s' command returned empty response", command) @@ -27,6 +29,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) { } mx := make(map[string]int64) + for _, line := range lines { parts := strings.Fields(line) if len(parts) != 2 || !strings.HasPrefix(parts[0], "zk_") { @@ -56,6 +59,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) { if len(mx) == 0 { return nil, fmt.Errorf("'%s' command: failed to parse response", command) } + return mx, nil } diff --git a/modules/zookeeper/config_schema.json b/modules/zookeeper/config_schema.json index 259987aba..9b113d5fe 100644 --- a/modules/zookeeper/config_schema.json +++ b/modules/zookeeper/config_schema.json @@ -1,38 +1,41 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/zookeeper job configuration schema.", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/zookeeper job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "use_tls": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": {} } diff --git a/modules/zookeeper/fetcher.go b/modules/zookeeper/fetcher.go index 7c3aae0ea..cd9eed90d 100644 --- a/modules/zookeeper/fetcher.go +++ b/modules/zookeeper/fetcher.go @@ -39,9 +39,12 @@ func (c *zookeeperFetcher) fetch(command string) (rows []string, err error) { if err != nil { return nil, err } + return rows, nil } +func (c *zookeeperFetcher) disconnect() {} + func isZKLine(line []byte) bool { return bytes.HasPrefix(line, []byte("zk_")) } diff --git a/modules/zookeeper/init.go b/modules/zookeeper/init.go new file mode 100644 index 000000000..d865a0949 --- /dev/null +++ b/modules/zookeeper/init.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "crypto/tls" + "errors" + "fmt" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" +) + +func (z *Zookeeper) verifyConfig() error { + if z.Address == "" { + return errors.New("address not set") + } + return nil +} + +func (z *Zookeeper) initZookeeperFetcher() (fetcher, error) { + var tlsConf *tls.Config + var err error + + if z.UseTLS { + tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig) + if err != nil { + return nil, fmt.Errorf("creating tls config : %v", err) + } + } + + sock := socket.New(socket.Config{ + Address: z.Address, + ConnectTimeout: z.Timeout.Duration(), + ReadTimeout: z.Timeout.Duration(), + WriteTimeout: z.Timeout.Duration(), + TLSConf: tlsConf, + }) + + return &zookeeperFetcher{Client: sock}, nil +} diff --git a/modules/zookeeper/zookeeper.go b/modules/zookeeper/zookeeper.go index 29ab1f858..f96933831 100644 --- a/modules/zookeeper/zookeeper.go +++ b/modules/zookeeper/zookeeper.go @@ -3,12 +3,10 @@ package zookeeper import ( - "crypto/tls" _ "embed" - "fmt" + "errors" "time" - "github.com/netdata/go.d.plugin/pkg/socket" "github.com/netdata/go.d.plugin/pkg/tlscfg" "github.com/netdata/go.d.plugin/pkg/web" @@ -25,6 +23,16 @@ func init() { }) } +// New creates Zookeeper with default values. +func New() *Zookeeper { + return &Zookeeper{ + Config: Config{ + Address: "127.0.0.1:2181", + Timeout: web.Duration(time.Second), + UseTLS: false, + }} +} + // Config is the Zookeeper module configuration. type Config struct { Address string @@ -33,68 +41,55 @@ type Config struct { tlscfg.TLSConfig `yaml:",inline"` } -// New creates Zookeeper with default values. -func New() *Zookeeper { - config := Config{ - Address: "127.0.0.1:2181", - Timeout: web.Duration{Duration: time.Second}, - UseTLS: false, - } - return &Zookeeper{Config: config} -} - -type fetcher interface { - fetch(command string) ([]string, error) -} - // Zookeeper Zookeeper module. -type Zookeeper struct { - module.Base - fetcher - Config `yaml:",inline"` -} +type ( + Zookeeper struct { + module.Base + Config `yaml:",inline"` -// Cleanup makes cleanup. -func (Zookeeper) Cleanup() {} - -func (z *Zookeeper) createZookeeperFetcher() (err error) { - var tlsConf *tls.Config - if z.UseTLS { - tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig) - if err != nil { - return fmt.Errorf("error on creating tls config : %v", err) - } + fetcher + } + fetcher interface { + fetch(command string) ([]string, error) } +) - sock := socket.New(socket.Config{ - Address: z.Address, - ConnectTimeout: z.Timeout.Duration, - ReadTimeout: z.Timeout.Duration, - WriteTimeout: z.Timeout.Duration, - TLSConf: tlsConf, - }) - z.fetcher = &zookeeperFetcher{Client: sock} - return nil +func (z *Zookeeper) Configuration() any { + return z.Config } // Init makes initialization. -func (z *Zookeeper) Init() bool { - err := z.createZookeeperFetcher() +func (z *Zookeeper) Init() error { + if err := z.verifyConfig(); err != nil { + z.Error(err) + return err + } + + f, err := z.initZookeeperFetcher() if err != nil { z.Error(err) - return false + return err } + z.fetcher = f - return true + return nil } // Check makes check. -func (z *Zookeeper) Check() bool { - return len(z.Collect()) > 0 +func (z *Zookeeper) Check() error { + mx, err := z.collect() + if err != nil { + z.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (Zookeeper) Charts() *Charts { +func (z *Zookeeper) Charts() *Charts { return charts.Copy() } @@ -110,3 +105,6 @@ func (z *Zookeeper) Collect() map[string]int64 { } return mx } + +// Cleanup makes cleanup. +func (z *Zookeeper) Cleanup() {} diff --git a/modules/zookeeper/zookeeper_test.go b/modules/zookeeper/zookeeper_test.go index 13f3632c2..8aaac2ed1 100644 --- a/modules/zookeeper/zookeeper_test.go +++ b/modules/zookeeper/zookeeper_test.go @@ -32,7 +32,7 @@ func TestNew(t *testing.T) { func TestZookeeper_Init(t *testing.T) { job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.fetcher) } @@ -41,23 +41,23 @@ func TestZookeeper_InitErrorOnCreatingTLSConfig(t *testing.T) { job.UseTLS = true job.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestZookeeper_Check(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrData} - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) } func TestZookeeper_CheckErrorOnFetch(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{err: true} - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestZookeeper_Charts(t *testing.T) { @@ -70,7 +70,7 @@ func TestZookeeper_Cleanup(t *testing.T) { func TestZookeeper_Collect(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrData} expected := map[string]int64{ @@ -98,7 +98,7 @@ func TestZookeeper_Collect(t *testing.T) { func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrNotInWhiteListData} assert.Nil(t, job.Collect()) @@ -106,7 +106,7 @@ func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{} assert.Nil(t, job.Collect()) @@ -114,7 +114,7 @@ func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) { func TestZookeeper_CollectMntrInvalidData(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: []byte("hello \nand good buy\n")} assert.Nil(t, job.Collect()) @@ -122,7 +122,7 @@ func TestZookeeper_CollectMntrInvalidData(t *testing.T) { func TestZookeeper_CollectMntrReceiveError(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{err: true} assert.Nil(t, job.Collect()) diff --git a/pkg/tlscfg/config.go b/pkg/tlscfg/config.go index 26051e486..60e152e0f 100644 --- a/pkg/tlscfg/config.go +++ b/pkg/tlscfg/config.go @@ -12,16 +12,16 @@ import ( // TLSConfig represents the standard client TLS configuration. type TLSConfig struct { // TLSCA specifies the certificate authority to use when verifying server certificates. - TLSCA string `yaml:"tls_ca"` + TLSCA string `yaml:"tls_ca" json:"tls_ca"` // TLSCert specifies tls certificate file. - TLSCert string `yaml:"tls_cert"` + TLSCert string `yaml:"tls_cert" json:"tls_cert"` // TLSKey specifies tls key file. - TLSKey string `yaml:"tls_key"` + TLSKey string `yaml:"tls_key" json:"tls_key"` // InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. - InsecureSkipVerify bool `yaml:"tls_skip_verify"` + InsecureSkipVerify bool `yaml:"tls_skip_verify" json:"tls_skip_verify"` } // NewTLSConfig creates a tls.Config, may be nil without an error if TLS is not configured. diff --git a/pkg/web/client.go b/pkg/web/client.go index ae3ecd462..eb0e3c30c 100644 --- a/pkg/web/client.go +++ b/pkg/web/client.go @@ -21,18 +21,18 @@ var ErrRedirectAttempted = errors.New("redirect") type Client struct { // Timeout specifies a time limit for requests made by this Client. // Default (zero value) is no timeout. Must be set before http.Client creation. - Timeout Duration `yaml:"timeout"` + Timeout Duration `yaml:"timeout" json:"timeout"` // NotFollowRedirect specifies the policy for handling redirects. // Default (zero value) is std http package default policy (stop after 10 consecutive requests). - NotFollowRedirect bool `yaml:"not_follow_redirects"` + NotFollowRedirect bool `yaml:"not_follow_redirects" json:"not_follow_redirect"` // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL. - ProxyURL string `yaml:"proxy_url"` + ProxyURL string `yaml:"proxy_url" json:"proxy_url"` // TLSConfig specifies the TLS configuration. - tlscfg.TLSConfig `yaml:",inline"` + tlscfg.TLSConfig `yaml:",inline" json:",inline"` } // NewHTTPClient returns a new *http.Client given a Client configuration and an error if any. @@ -48,17 +48,17 @@ func NewHTTPClient(cfg Client) (*http.Client, error) { } } - d := &net.Dialer{Timeout: cfg.Timeout.Duration} + d := &net.Dialer{Timeout: cfg.Timeout.Duration()} transport := &http.Transport{ Proxy: proxyFunc(cfg.ProxyURL), TLSClientConfig: tlsConfig, DialContext: d.DialContext, - TLSHandshakeTimeout: cfg.Timeout.Duration, + TLSHandshakeTimeout: cfg.Timeout.Duration(), } return &http.Client{ - Timeout: cfg.Timeout.Duration, + Timeout: cfg.Timeout.Duration(), Transport: transport, CheckRedirect: redirectFunc(cfg.NotFollowRedirect), }, nil diff --git a/pkg/web/client_test.go b/pkg/web/client_test.go index e11d6ce47..ead1486c3 100644 --- a/pkg/web/client_test.go +++ b/pkg/web/client_test.go @@ -12,7 +12,7 @@ import ( func TestNewHTTPClient(t *testing.T) { client, _ := NewHTTPClient(Client{ - Timeout: Duration{Duration: time.Second * 5}, + Timeout: Duration(time.Second * 5), NotFollowRedirect: true, ProxyURL: "http://127.0.0.1:3128", }) diff --git a/pkg/web/duration.go b/pkg/web/duration.go index ced991f91..03003e24c 100644 --- a/pkg/web/duration.go +++ b/pkg/web/duration.go @@ -3,15 +3,14 @@ package web import ( + "encoding/json" "fmt" "strconv" "time" ) // Duration is a time.Duration wrapper. -type Duration struct { - Duration time.Duration -} +type Duration time.Duration // UnmarshalYAML implements yaml.Unmarshaler. func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { @@ -22,18 +21,32 @@ func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { } if v, err := time.ParseDuration(s); err == nil { - d.Duration = v + *d = Duration(v) return nil } if v, err := strconv.ParseInt(s, 10, 64); err == nil { - d.Duration = time.Duration(v) * time.Second + *d = Duration(time.Duration(v) * time.Second) return nil } if v, err := strconv.ParseFloat(s, 64); err == nil { - d.Duration = time.Duration(v) * time.Second + *d = Duration(time.Duration(v) * time.Second) return nil } return fmt.Errorf("unparsable duration format '%s'", s) } -func (d Duration) String() string { return d.Duration.String() } +func (d Duration) Duration() time.Duration { + return time.Duration(d) +} + +func (d Duration) String() string { + return d.Duration().String() +} + +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +func (d Duration) MarshalYAML() (any, error) { + return d.String(), nil +} diff --git a/pkg/web/request.go b/pkg/web/request.go index 5740da6d1..3db08f734 100644 --- a/pkg/web/request.go +++ b/pkg/web/request.go @@ -14,30 +14,30 @@ import ( // Supported configuration file formats: YAML. type Request struct { // URL specifies the URL to access. - URL string `yaml:"url"` + URL string `yaml:"url" json:"url"` // Body specifies the HTTP request body to be sent by the client. - Body string `yaml:"body"` + Body string `yaml:"body" json:"body"` // Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET. - Method string `yaml:"method"` + Method string `yaml:"method" json:"method"` // Headers specifies the HTTP request header fields to be sent by the client. - Headers map[string]string `yaml:"headers"` + Headers map[string]string `yaml:"headers" json:"headers"` // Username specifies the username for basic HTTP authentication. - Username string `yaml:"username"` + Username string `yaml:"username" json:"username"` // Password specifies the password for basic HTTP authentication. - Password string `yaml:"password"` + Password string `yaml:"password" json:"password"` // ProxyUsername specifies the username for basic HTTP authentication. // It is used to authenticate a user agent to a proxy server. - ProxyUsername string `yaml:"proxy_username"` + ProxyUsername string `yaml:"proxy_username" json:"proxy_username"` // ProxyPassword specifies the password for basic HTTP authentication. // It is used to authenticate a user agent to a proxy server. - ProxyPassword string `yaml:"proxy_password"` + ProxyPassword string `yaml:"proxy_password" json:"proxy_password"` } // Copy makes a full copy of the Request. diff --git a/pkg/web/web.go b/pkg/web/web.go index e2a7098ba..07cef4839 100644 --- a/pkg/web/web.go +++ b/pkg/web/web.go @@ -6,6 +6,6 @@ package web // This structure intended to be part of the module configuration. // Supported configuration file formats: YAML. type HTTP struct { - Request `yaml:",inline"` - Client `yaml:",inline"` + Request `yaml:",inline" json:",inline"` + Client `yaml:",inline" json:",inline"` } diff --git a/qq.json b/qq.json new file mode 100644 index 000000000..42a509038 --- /dev/null +++ b/qq.json @@ -0,0 +1,211 @@ +{ + "version": 1, + "tree": { + "/collectors/example": { + "go.d:collector:example:jobs": { + "type": "template", + "status": "accepted", + "cmds": [ + "schema", + "add", + "test", + "enable", + "disable" + ], + "access": { + "view": [ + "signed-in", + "same-space", + "view-config" + ], + "edit": [ + "signed-in", + "same-space", + "edit-config" + ] + }, + "source_type": "internal", + "source": "internal", + "sync": false, + "user_disabled": false, + "restart_required": false, + "plugin_rejected": false, + "payload": { + "available": false + }, + "saves": 0, + "created_ut": 1707566196766292, + "modified_ut": 1707566196766292 + }, + "go.d:collector:example:jobs:example1": { + "type": "job", + "template": "go.d:collector:example:jobs", + "status": "running", + "cmds": [ + "get", + "schema", + "update", + "remove", + "enable", + "disable", + "restart" + ], + "access": { + "view": [ + "signed-in", + "same-space", + "view-config" + ], + "edit": [ + "signed-in", + "same-space", + "edit-config" + ] + }, + "source_type": "dyncfg", + "source": "method=api,role=god,permissions=0x7ff,ip=10.20.4.44", + "sync": false, + "user_disabled": false, + "restart_required": false, + "plugin_rejected": false, + "payload": { + "available": true, + "status": "running", + "source_type": "dyncfg", + "source": "method=api,role=god,permissions=0x7ff,ip=10.20.4.44", + "created_ut": 1707499290351471, + "modified_ut": 1707499381852340, + "content_type": "text/yaml", + "content_length": 36 + }, + "saves": 3, + "created_ut": 1707499290351471, + "modified_ut": 1707566196766742 + } + }, + "/collectors/nginx": { + "go.d:collector:nginx:jobs": { + "type": "template", + "status": "accepted", + "cmds": [ + "schema", + "add", + "test", + "enable", + "disable" + ], + "access": { + "view": [ + "signed-in", + "same-space", + "view-config" + ], + "edit": [ + "signed-in", + "same-space", + "edit-config" + ] + }, + "source_type": "internal", + "source": "internal", + "sync": false, + "user_disabled": false, + "restart_required": false, + "plugin_rejected": false, + "payload": { + "available": false + }, + "saves": 0, + "created_ut": 1707566196766267, + "modified_ut": 1707566196766267 + }, + "go.d:collector:nginx:jobs:local": { + "type": "job", + "template": "go.d:collector:nginx:jobs", + "status": "failed", + "cmds": [ + "get", + "schema", + "update", + "enable", + "disable", + "restart" + ], + "access": { + "view": [ + "signed-in", + "same-space", + "view-config" + ], + "edit": [ + "signed-in", + "same-space", + "edit-config" + ] + }, + "source_type": "stock", + "source": "/opt/netdata/usr/lib/netdata/conf.d/go.d/nginx.conf", + "sync": false, + "user_disabled": false, + "restart_required": false, + "plugin_rejected": false, + "payload": { + "available": false + }, + "saves": 0, + "created_ut": 1707566196767193, + "modified_ut": 1707566196767193 + } + }, + "/collectors/ping": { + "go.d:collector:ping:jobs": { + "type": "template", + "status": "accepted", + "cmds": [ + "schema", + "add", + "test", + "enable", + "disable" + ], + "access": { + "view": [ + "signed-in", + "same-space", + "view-config" + ], + "edit": [ + "signed-in", + "same-space", + "edit-config" + ] + }, + "source_type": "internal", + "source": "internal", + "sync": false, + "user_disabled": false, + "restart_required": false, + "plugin_rejected": false, + "payload": { + "available": false + }, + "saves": 0, + "created_ut": 1707566196766285, + "modified_ut": 1707566196766285 + } + } + }, + "attention": { + "degraded": true, + "restart_required": 0, + "plugin_rejected": 0, + "status_failed": 1, + "status_incomplete": 0 + }, + "agent": { + "mg": "7e25c574-c36e-11ee-9a4d-e6007f1f06b0", + "nd": "17d875a1-27a8-4177-ba11-0409b363e712", + "nm": "pve-deb-work", + "now": 1707566210 + } +} \ No newline at end of file